xref: /linux/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c (revision bf5802238dc181b1f7375d358af1d01cd72d1c11)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "i40e.h"
5 #include "i40e_lan_hmc.h"
6 #include "i40e_virtchnl_pf.h"
7 
8 /*********************notification routines***********************/
9 
10 /**
11  * i40e_vc_vf_broadcast
12  * @pf: pointer to the PF structure
13  * @v_opcode: operation code
14  * @v_retval: return value
15  * @msg: pointer to the msg buffer
16  * @msglen: msg length
17  *
18  * send a message to all VFs on a given PF
19  **/
20 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
21 				 enum virtchnl_ops v_opcode,
22 				 int v_retval, u8 *msg,
23 				 u16 msglen)
24 {
25 	struct i40e_hw *hw = &pf->hw;
26 	struct i40e_vf *vf = pf->vf;
27 	int i;
28 
29 	for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
30 		int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
31 		/* Not all vfs are enabled so skip the ones that are not */
32 		if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
33 		    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 			continue;
35 
36 		/* Ignore return value on purpose - a given VF may fail, but
37 		 * we need to keep going and send to all of them
38 		 */
39 		i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
40 				       msg, msglen, NULL);
41 	}
42 }
43 
44 /**
45  * i40e_vc_link_speed2mbps
46  * converts i40e_aq_link_speed to integer value of Mbps
47  * @link_speed: the speed to convert
48  *
49  * return the speed as direct value of Mbps.
50  **/
51 static u32
52 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
53 {
54 	switch (link_speed) {
55 	case I40E_LINK_SPEED_100MB:
56 		return SPEED_100;
57 	case I40E_LINK_SPEED_1GB:
58 		return SPEED_1000;
59 	case I40E_LINK_SPEED_2_5GB:
60 		return SPEED_2500;
61 	case I40E_LINK_SPEED_5GB:
62 		return SPEED_5000;
63 	case I40E_LINK_SPEED_10GB:
64 		return SPEED_10000;
65 	case I40E_LINK_SPEED_20GB:
66 		return SPEED_20000;
67 	case I40E_LINK_SPEED_25GB:
68 		return SPEED_25000;
69 	case I40E_LINK_SPEED_40GB:
70 		return SPEED_40000;
71 	case I40E_LINK_SPEED_UNKNOWN:
72 		return SPEED_UNKNOWN;
73 	}
74 	return SPEED_UNKNOWN;
75 }
76 
77 /**
78  * i40e_set_vf_link_state
79  * @vf: pointer to the VF structure
80  * @pfe: pointer to PF event structure
81  * @ls: pointer to link status structure
82  *
83  * set a link state on a single vf
84  **/
85 static void i40e_set_vf_link_state(struct i40e_vf *vf,
86 				   struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
87 {
88 	u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
89 
90 	if (vf->link_forced)
91 		link_status = vf->link_up;
92 
93 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
94 		pfe->event_data.link_event_adv.link_speed = link_status ?
95 			i40e_vc_link_speed2mbps(ls->link_speed) : 0;
96 		pfe->event_data.link_event_adv.link_status = link_status;
97 	} else {
98 		pfe->event_data.link_event.link_speed = link_status ?
99 			i40e_virtchnl_link_speed(ls->link_speed) : 0;
100 		pfe->event_data.link_event.link_status = link_status;
101 	}
102 }
103 
104 /**
105  * i40e_vc_notify_vf_link_state
106  * @vf: pointer to the VF structure
107  *
108  * send a link status message to a single VF
109  **/
110 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
111 {
112 	struct virtchnl_pf_event pfe;
113 	struct i40e_pf *pf = vf->pf;
114 	struct i40e_hw *hw = &pf->hw;
115 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
116 	int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
117 
118 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
119 	pfe.severity = PF_EVENT_SEVERITY_INFO;
120 
121 	i40e_set_vf_link_state(vf, &pfe, ls);
122 
123 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
124 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
125 }
126 
127 /**
128  * i40e_vc_notify_link_state
129  * @pf: pointer to the PF structure
130  *
131  * send a link status message to all VFs on a given PF
132  **/
133 void i40e_vc_notify_link_state(struct i40e_pf *pf)
134 {
135 	int i;
136 
137 	for (i = 0; i < pf->num_alloc_vfs; i++)
138 		i40e_vc_notify_vf_link_state(&pf->vf[i]);
139 }
140 
141 /**
142  * i40e_vc_notify_reset
143  * @pf: pointer to the PF structure
144  *
145  * indicate a pending reset to all VFs on a given PF
146  **/
147 void i40e_vc_notify_reset(struct i40e_pf *pf)
148 {
149 	struct virtchnl_pf_event pfe;
150 
151 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
152 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
153 	i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
154 			     (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
155 }
156 
157 #ifdef CONFIG_PCI_IOV
158 void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
159 {
160 	u16 vf_id;
161 	u16 pos;
162 
163 	/* Continue only if this is a PF */
164 	if (!pdev->is_physfn)
165 		return;
166 
167 	if (!pci_num_vf(pdev))
168 		return;
169 
170 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
171 	if (pos) {
172 		struct pci_dev *vf_dev = NULL;
173 
174 		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
175 		while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
176 			if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
177 				pci_restore_msi_state(vf_dev);
178 		}
179 	}
180 }
181 #endif /* CONFIG_PCI_IOV */
182 
183 /**
184  * i40e_vc_notify_vf_reset
185  * @vf: pointer to the VF structure
186  *
187  * indicate a pending reset to the given VF
188  **/
189 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
190 {
191 	struct virtchnl_pf_event pfe;
192 	int abs_vf_id;
193 
194 	/* validate the request */
195 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
196 		return;
197 
198 	/* verify if the VF is in either init or active before proceeding */
199 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
200 	    !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
201 		return;
202 
203 	abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
204 
205 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
206 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
207 	i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
208 			       0, (u8 *)&pfe,
209 			       sizeof(struct virtchnl_pf_event), NULL);
210 }
211 /***********************misc routines*****************************/
212 
213 /**
214  * i40e_vc_reset_vf
215  * @vf: pointer to the VF info
216  * @notify_vf: notify vf about reset or not
217  * Reset VF handler.
218  **/
219 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
220 {
221 	struct i40e_pf *pf = vf->pf;
222 	int i;
223 
224 	if (notify_vf)
225 		i40e_vc_notify_vf_reset(vf);
226 
227 	/* We want to ensure that an actual reset occurs initiated after this
228 	 * function was called. However, we do not want to wait forever, so
229 	 * we'll give a reasonable time and print a message if we failed to
230 	 * ensure a reset.
231 	 */
232 	for (i = 0; i < 20; i++) {
233 		/* If PF is in VFs releasing state reset VF is impossible,
234 		 * so leave it.
235 		 */
236 		if (test_bit(__I40E_VFS_RELEASING, pf->state))
237 			return;
238 		if (i40e_reset_vf(vf, false))
239 			return;
240 		usleep_range(10000, 20000);
241 	}
242 
243 	if (notify_vf)
244 		dev_warn(&vf->pf->pdev->dev,
245 			 "Failed to initiate reset for VF %d after 200 milliseconds\n",
246 			 vf->vf_id);
247 	else
248 		dev_dbg(&vf->pf->pdev->dev,
249 			"Failed to initiate reset for VF %d after 200 milliseconds\n",
250 			vf->vf_id);
251 }
252 
253 /**
254  * i40e_vc_isvalid_vsi_id
255  * @vf: pointer to the VF info
256  * @vsi_id: VF relative VSI id
257  *
258  * check for the valid VSI id
259  **/
260 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
261 {
262 	struct i40e_pf *pf = vf->pf;
263 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
264 
265 	return (vsi && (vsi->vf_id == vf->vf_id));
266 }
267 
268 /**
269  * i40e_vc_isvalid_queue_id
270  * @vf: pointer to the VF info
271  * @vsi_id: vsi id
272  * @qid: vsi relative queue id
273  *
274  * check for the valid queue id
275  **/
276 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
277 					    u16 qid)
278 {
279 	struct i40e_pf *pf = vf->pf;
280 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
281 
282 	return (vsi && (qid < vsi->alloc_queue_pairs));
283 }
284 
285 /**
286  * i40e_vc_isvalid_vector_id
287  * @vf: pointer to the VF info
288  * @vector_id: VF relative vector id
289  *
290  * check for the valid vector id
291  **/
292 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
293 {
294 	struct i40e_pf *pf = vf->pf;
295 
296 	return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
297 }
298 
299 /***********************vf resource mgmt routines*****************/
300 
301 /**
302  * i40e_vc_get_pf_queue_id
303  * @vf: pointer to the VF info
304  * @vsi_id: id of VSI as provided by the FW
305  * @vsi_queue_id: vsi relative queue id
306  *
307  * return PF relative queue id
308  **/
309 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
310 				   u8 vsi_queue_id)
311 {
312 	struct i40e_pf *pf = vf->pf;
313 	struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
314 	u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
315 
316 	if (!vsi)
317 		return pf_queue_id;
318 
319 	if (le16_to_cpu(vsi->info.mapping_flags) &
320 	    I40E_AQ_VSI_QUE_MAP_NONCONTIG)
321 		pf_queue_id =
322 			le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
323 	else
324 		pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
325 			      vsi_queue_id;
326 
327 	return pf_queue_id;
328 }
329 
330 /**
331  * i40e_get_real_pf_qid
332  * @vf: pointer to the VF info
333  * @vsi_id: vsi id
334  * @queue_id: queue number
335  *
336  * wrapper function to get pf_queue_id handling ADq code as well
337  **/
338 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
339 {
340 	int i;
341 
342 	if (vf->adq_enabled) {
343 		/* Although VF considers all the queues(can be 1 to 16) as its
344 		 * own but they may actually belong to different VSIs(up to 4).
345 		 * We need to find which queues belongs to which VSI.
346 		 */
347 		for (i = 0; i < vf->num_tc; i++) {
348 			if (queue_id < vf->ch[i].num_qps) {
349 				vsi_id = vf->ch[i].vsi_id;
350 				break;
351 			}
352 			/* find right queue id which is relative to a
353 			 * given VSI.
354 			 */
355 			queue_id -= vf->ch[i].num_qps;
356 			}
357 		}
358 
359 	return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
360 }
361 
362 /**
363  * i40e_config_irq_link_list
364  * @vf: pointer to the VF info
365  * @vsi_id: id of VSI as given by the FW
366  * @vecmap: irq map info
367  *
368  * configure irq link list from the map
369  **/
370 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
371 				      struct virtchnl_vector_map *vecmap)
372 {
373 	unsigned long linklistmap = 0, tempmap;
374 	struct i40e_pf *pf = vf->pf;
375 	struct i40e_hw *hw = &pf->hw;
376 	u16 vsi_queue_id, pf_queue_id;
377 	enum i40e_queue_type qtype;
378 	u16 next_q, vector_id, size;
379 	u32 reg, reg_idx;
380 	u16 itr_idx = 0;
381 
382 	vector_id = vecmap->vector_id;
383 	/* setup the head */
384 	if (0 == vector_id)
385 		reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
386 	else
387 		reg_idx = I40E_VPINT_LNKLSTN(
388 		     ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
389 		     (vector_id - 1));
390 
391 	if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
392 		/* Special case - No queues mapped on this vector */
393 		wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
394 		goto irq_list_done;
395 	}
396 	tempmap = vecmap->rxq_map;
397 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
398 		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
399 				    vsi_queue_id));
400 	}
401 
402 	tempmap = vecmap->txq_map;
403 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
404 		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
405 				     vsi_queue_id + 1));
406 	}
407 
408 	size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
409 	next_q = find_first_bit(&linklistmap, size);
410 	if (unlikely(next_q == size))
411 		goto irq_list_done;
412 
413 	vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
414 	qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
415 	pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
416 	reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
417 
418 	wr32(hw, reg_idx, reg);
419 
420 	while (next_q < size) {
421 		switch (qtype) {
422 		case I40E_QUEUE_TYPE_RX:
423 			reg_idx = I40E_QINT_RQCTL(pf_queue_id);
424 			itr_idx = vecmap->rxitr_idx;
425 			break;
426 		case I40E_QUEUE_TYPE_TX:
427 			reg_idx = I40E_QINT_TQCTL(pf_queue_id);
428 			itr_idx = vecmap->txitr_idx;
429 			break;
430 		default:
431 			break;
432 		}
433 
434 		next_q = find_next_bit(&linklistmap, size, next_q + 1);
435 		if (next_q < size) {
436 			vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
437 			qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
438 			pf_queue_id = i40e_get_real_pf_qid(vf,
439 							   vsi_id,
440 							   vsi_queue_id);
441 		} else {
442 			pf_queue_id = I40E_QUEUE_END_OF_LIST;
443 			qtype = 0;
444 		}
445 
446 		/* format for the RQCTL & TQCTL regs is same */
447 		reg = (vector_id) |
448 		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
449 		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
450 		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
451 		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
452 		wr32(hw, reg_idx, reg);
453 	}
454 
455 	/* if the vf is running in polling mode and using interrupt zero,
456 	 * need to disable auto-mask on enabling zero interrupt for VFs.
457 	 */
458 	if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
459 	    (vector_id == 0)) {
460 		reg = rd32(hw, I40E_GLINT_CTL);
461 		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
462 			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
463 			wr32(hw, I40E_GLINT_CTL, reg);
464 		}
465 	}
466 
467 irq_list_done:
468 	i40e_flush(hw);
469 }
470 
471 /**
472  * i40e_release_rdma_qvlist
473  * @vf: pointer to the VF.
474  *
475  **/
476 static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
477 {
478 	struct i40e_pf *pf = vf->pf;
479 	struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
480 	u32 msix_vf;
481 	u32 i;
482 
483 	if (!vf->qvlist_info)
484 		return;
485 
486 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
487 	for (i = 0; i < qvlist_info->num_vectors; i++) {
488 		struct virtchnl_rdma_qv_info *qv_info;
489 		u32 next_q_index, next_q_type;
490 		struct i40e_hw *hw = &pf->hw;
491 		u32 v_idx, reg_idx, reg;
492 
493 		qv_info = &qvlist_info->qv_info[i];
494 		if (!qv_info)
495 			continue;
496 		v_idx = qv_info->v_idx;
497 		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
498 			/* Figure out the queue after CEQ and make that the
499 			 * first queue.
500 			 */
501 			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
502 			reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
503 			next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK,
504 						 reg);
505 			next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK,
506 						reg);
507 
508 			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
509 			reg = (next_q_index &
510 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
511 			       (next_q_type <<
512 			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
513 
514 			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
515 		}
516 	}
517 	kfree(vf->qvlist_info);
518 	vf->qvlist_info = NULL;
519 }
520 
521 /**
522  * i40e_config_rdma_qvlist
523  * @vf: pointer to the VF info
524  * @qvlist_info: queue and vector list
525  *
526  * Return 0 on success or < 0 on error
527  **/
528 static int
529 i40e_config_rdma_qvlist(struct i40e_vf *vf,
530 			struct virtchnl_rdma_qvlist_info *qvlist_info)
531 {
532 	struct i40e_pf *pf = vf->pf;
533 	struct i40e_hw *hw = &pf->hw;
534 	struct virtchnl_rdma_qv_info *qv_info;
535 	u32 v_idx, i, reg_idx, reg;
536 	u32 next_q_idx, next_q_type;
537 	size_t size;
538 	u32 msix_vf;
539 	int ret = 0;
540 
541 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
542 
543 	if (qvlist_info->num_vectors > msix_vf) {
544 		dev_warn(&pf->pdev->dev,
545 			 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
546 			 qvlist_info->num_vectors,
547 			 msix_vf);
548 		ret = -EINVAL;
549 		goto err_out;
550 	}
551 
552 	kfree(vf->qvlist_info);
553 	size = virtchnl_struct_size(vf->qvlist_info, qv_info,
554 				    qvlist_info->num_vectors);
555 	vf->qvlist_info = kzalloc(size, GFP_KERNEL);
556 	if (!vf->qvlist_info) {
557 		ret = -ENOMEM;
558 		goto err_out;
559 	}
560 	vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
561 
562 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
563 	for (i = 0; i < qvlist_info->num_vectors; i++) {
564 		qv_info = &qvlist_info->qv_info[i];
565 		if (!qv_info)
566 			continue;
567 
568 		/* Validate vector id belongs to this vf */
569 		if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
570 			ret = -EINVAL;
571 			goto err_free;
572 		}
573 
574 		v_idx = qv_info->v_idx;
575 
576 		vf->qvlist_info->qv_info[i] = *qv_info;
577 
578 		reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
579 		/* We might be sharing the interrupt, so get the first queue
580 		 * index and type, push it down the list by adding the new
581 		 * queue on top. Also link it with the new queue in CEQCTL.
582 		 */
583 		reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
584 		next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK,
585 				       reg);
586 		next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK,
587 					reg);
588 
589 		if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
590 			reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
591 			reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
592 			(v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
593 			(qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
594 			(next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
595 			(next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
596 			wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
597 
598 			reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
599 			reg = (qv_info->ceq_idx &
600 			       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
601 			       (I40E_QUEUE_TYPE_PE_CEQ <<
602 			       I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
603 			wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
604 		}
605 
606 		if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
607 			reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
608 			(v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
609 			(qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
610 
611 			wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
612 		}
613 	}
614 
615 	return 0;
616 err_free:
617 	kfree(vf->qvlist_info);
618 	vf->qvlist_info = NULL;
619 err_out:
620 	return ret;
621 }
622 
623 /**
624  * i40e_config_vsi_tx_queue
625  * @vf: pointer to the VF info
626  * @vsi_id: id of VSI as provided by the FW
627  * @vsi_queue_id: vsi relative queue index
628  * @info: config. info
629  *
630  * configure tx queue
631  **/
632 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
633 				    u16 vsi_queue_id,
634 				    struct virtchnl_txq_info *info)
635 {
636 	struct i40e_pf *pf = vf->pf;
637 	struct i40e_hw *hw = &pf->hw;
638 	struct i40e_hmc_obj_txq tx_ctx;
639 	struct i40e_vsi *vsi;
640 	u16 pf_queue_id;
641 	u32 qtx_ctl;
642 	int ret = 0;
643 
644 	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
645 		ret = -ENOENT;
646 		goto error_context;
647 	}
648 	pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
649 	vsi = i40e_find_vsi_from_id(pf, vsi_id);
650 	if (!vsi) {
651 		ret = -ENOENT;
652 		goto error_context;
653 	}
654 
655 	/* clear the context structure first */
656 	memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
657 
658 	/* only set the required fields */
659 	tx_ctx.base = info->dma_ring_addr / 128;
660 	tx_ctx.qlen = info->ring_len;
661 	tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
662 	tx_ctx.rdylist_act = 0;
663 	tx_ctx.head_wb_ena = info->headwb_enabled;
664 	tx_ctx.head_wb_addr = info->dma_headwb_addr;
665 
666 	/* clear the context in the HMC */
667 	ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
668 	if (ret) {
669 		dev_err(&pf->pdev->dev,
670 			"Failed to clear VF LAN Tx queue context %d, error: %d\n",
671 			pf_queue_id, ret);
672 		ret = -ENOENT;
673 		goto error_context;
674 	}
675 
676 	/* set the context in the HMC */
677 	ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
678 	if (ret) {
679 		dev_err(&pf->pdev->dev,
680 			"Failed to set VF LAN Tx queue context %d error: %d\n",
681 			pf_queue_id, ret);
682 		ret = -ENOENT;
683 		goto error_context;
684 	}
685 
686 	/* associate this queue with the PCI VF function */
687 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
688 	qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
689 	qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
690 			      vf->vf_id + hw->func_caps.vf_base_id);
691 	wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
692 	i40e_flush(hw);
693 
694 error_context:
695 	return ret;
696 }
697 
698 /**
699  * i40e_config_vsi_rx_queue
700  * @vf: pointer to the VF info
701  * @vsi_id: id of VSI  as provided by the FW
702  * @vsi_queue_id: vsi relative queue index
703  * @info: config. info
704  *
705  * configure rx queue
706  **/
707 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
708 				    u16 vsi_queue_id,
709 				    struct virtchnl_rxq_info *info)
710 {
711 	u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
712 	struct i40e_pf *pf = vf->pf;
713 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
714 	struct i40e_hw *hw = &pf->hw;
715 	struct i40e_hmc_obj_rxq rx_ctx;
716 	int ret = 0;
717 
718 	/* clear the context structure first */
719 	memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
720 
721 	/* only set the required fields */
722 	rx_ctx.base = info->dma_ring_addr / 128;
723 	rx_ctx.qlen = info->ring_len;
724 
725 	if (info->splithdr_enabled) {
726 		rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
727 				  I40E_RX_SPLIT_IP      |
728 				  I40E_RX_SPLIT_TCP_UDP |
729 				  I40E_RX_SPLIT_SCTP;
730 		/* header length validation */
731 		if (info->hdr_size > ((2 * 1024) - 64)) {
732 			ret = -EINVAL;
733 			goto error_param;
734 		}
735 		rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
736 
737 		/* set split mode 10b */
738 		rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
739 	}
740 
741 	/* databuffer length validation */
742 	if (info->databuffer_size > ((16 * 1024) - 128)) {
743 		ret = -EINVAL;
744 		goto error_param;
745 	}
746 	rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
747 
748 	/* max pkt. length validation */
749 	if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
750 		ret = -EINVAL;
751 		goto error_param;
752 	}
753 	rx_ctx.rxmax = info->max_pkt_size;
754 
755 	/* if port VLAN is configured increase the max packet size */
756 	if (vsi->info.pvid)
757 		rx_ctx.rxmax += VLAN_HLEN;
758 
759 	/* enable 32bytes desc always */
760 	rx_ctx.dsize = 1;
761 
762 	/* default values */
763 	rx_ctx.lrxqthresh = 1;
764 	rx_ctx.crcstrip = 1;
765 	rx_ctx.prefena = 1;
766 	rx_ctx.l2tsel = 1;
767 
768 	/* clear the context in the HMC */
769 	ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
770 	if (ret) {
771 		dev_err(&pf->pdev->dev,
772 			"Failed to clear VF LAN Rx queue context %d, error: %d\n",
773 			pf_queue_id, ret);
774 		ret = -ENOENT;
775 		goto error_param;
776 	}
777 
778 	/* set the context in the HMC */
779 	ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
780 	if (ret) {
781 		dev_err(&pf->pdev->dev,
782 			"Failed to set VF LAN Rx queue context %d error: %d\n",
783 			pf_queue_id, ret);
784 		ret = -ENOENT;
785 		goto error_param;
786 	}
787 
788 error_param:
789 	return ret;
790 }
791 
792 /**
793  * i40e_alloc_vsi_res
794  * @vf: pointer to the VF info
795  * @idx: VSI index, applies only for ADq mode, zero otherwise
796  *
797  * alloc VF vsi context & resources
798  **/
799 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
800 {
801 	struct i40e_mac_filter *f = NULL;
802 	struct i40e_pf *pf = vf->pf;
803 	struct i40e_vsi *vsi;
804 	u64 max_tx_rate = 0;
805 	int ret = 0;
806 
807 	vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
808 			     vf->vf_id);
809 
810 	if (!vsi) {
811 		dev_err(&pf->pdev->dev,
812 			"add vsi failed for VF %d, aq_err %d\n",
813 			vf->vf_id, pf->hw.aq.asq_last_status);
814 		ret = -ENOENT;
815 		goto error_alloc_vsi_res;
816 	}
817 
818 	if (!idx) {
819 		u64 hena = i40e_pf_get_default_rss_hena(pf);
820 		u8 broadcast[ETH_ALEN];
821 
822 		vf->lan_vsi_idx = vsi->idx;
823 		vf->lan_vsi_id = vsi->id;
824 		/* If the port VLAN has been configured and then the
825 		 * VF driver was removed then the VSI port VLAN
826 		 * configuration was destroyed.  Check if there is
827 		 * a port VLAN and restore the VSI configuration if
828 		 * needed.
829 		 */
830 		if (vf->port_vlan_id)
831 			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
832 
833 		spin_lock_bh(&vsi->mac_filter_hash_lock);
834 		if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
835 			f = i40e_add_mac_filter(vsi,
836 						vf->default_lan_addr.addr);
837 			if (!f)
838 				dev_info(&pf->pdev->dev,
839 					 "Could not add MAC filter %pM for VF %d\n",
840 					vf->default_lan_addr.addr, vf->vf_id);
841 		}
842 		eth_broadcast_addr(broadcast);
843 		f = i40e_add_mac_filter(vsi, broadcast);
844 		if (!f)
845 			dev_info(&pf->pdev->dev,
846 				 "Could not allocate VF broadcast filter\n");
847 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
848 		wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
849 		wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
850 		/* program mac filter only for VF VSI */
851 		ret = i40e_sync_vsi_filters(vsi);
852 		if (ret)
853 			dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
854 	}
855 
856 	/* storing VSI index and id for ADq and don't apply the mac filter */
857 	if (vf->adq_enabled) {
858 		vf->ch[idx].vsi_idx = vsi->idx;
859 		vf->ch[idx].vsi_id = vsi->id;
860 	}
861 
862 	/* Set VF bandwidth if specified */
863 	if (vf->tx_rate) {
864 		max_tx_rate = vf->tx_rate;
865 	} else if (vf->ch[idx].max_tx_rate) {
866 		max_tx_rate = vf->ch[idx].max_tx_rate;
867 	}
868 
869 	if (max_tx_rate) {
870 		max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
871 		ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
872 						  max_tx_rate, 0, NULL);
873 		if (ret)
874 			dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
875 				vf->vf_id, ret);
876 	}
877 
878 error_alloc_vsi_res:
879 	return ret;
880 }
881 
882 /**
883  * i40e_map_pf_queues_to_vsi
884  * @vf: pointer to the VF info
885  *
886  * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
887  * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
888  **/
889 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
890 {
891 	struct i40e_pf *pf = vf->pf;
892 	struct i40e_hw *hw = &pf->hw;
893 	u32 reg, num_tc = 1; /* VF has at least one traffic class */
894 	u16 vsi_id, qps;
895 	int i, j;
896 
897 	if (vf->adq_enabled)
898 		num_tc = vf->num_tc;
899 
900 	for (i = 0; i < num_tc; i++) {
901 		if (vf->adq_enabled) {
902 			qps = vf->ch[i].num_qps;
903 			vsi_id =  vf->ch[i].vsi_id;
904 		} else {
905 			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
906 			vsi_id = vf->lan_vsi_id;
907 		}
908 
909 		for (j = 0; j < 7; j++) {
910 			if (j * 2 >= qps) {
911 				/* end of list */
912 				reg = 0x07FF07FF;
913 			} else {
914 				u16 qid = i40e_vc_get_pf_queue_id(vf,
915 								  vsi_id,
916 								  j * 2);
917 				reg = qid;
918 				qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
919 							      (j * 2) + 1);
920 				reg |= qid << 16;
921 			}
922 			i40e_write_rx_ctl(hw,
923 					  I40E_VSILAN_QTABLE(j, vsi_id),
924 					  reg);
925 		}
926 	}
927 }
928 
929 /**
930  * i40e_map_pf_to_vf_queues
931  * @vf: pointer to the VF info
932  *
933  * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
934  * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
935  **/
936 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
937 {
938 	struct i40e_pf *pf = vf->pf;
939 	struct i40e_hw *hw = &pf->hw;
940 	u32 reg, total_qps = 0;
941 	u32 qps, num_tc = 1; /* VF has at least one traffic class */
942 	u16 vsi_id, qid;
943 	int i, j;
944 
945 	if (vf->adq_enabled)
946 		num_tc = vf->num_tc;
947 
948 	for (i = 0; i < num_tc; i++) {
949 		if (vf->adq_enabled) {
950 			qps = vf->ch[i].num_qps;
951 			vsi_id =  vf->ch[i].vsi_id;
952 		} else {
953 			qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
954 			vsi_id = vf->lan_vsi_id;
955 		}
956 
957 		for (j = 0; j < qps; j++) {
958 			qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
959 
960 			reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
961 			wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
962 			     reg);
963 			total_qps++;
964 		}
965 	}
966 }
967 
968 /**
969  * i40e_enable_vf_mappings
970  * @vf: pointer to the VF info
971  *
972  * enable VF mappings
973  **/
974 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
975 {
976 	struct i40e_pf *pf = vf->pf;
977 	struct i40e_hw *hw = &pf->hw;
978 	u32 reg;
979 
980 	/* Tell the hardware we're using noncontiguous mapping. HW requires
981 	 * that VF queues be mapped using this method, even when they are
982 	 * contiguous in real life
983 	 */
984 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
985 			  I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
986 
987 	/* enable VF vplan_qtable mappings */
988 	reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
989 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
990 
991 	i40e_map_pf_to_vf_queues(vf);
992 	i40e_map_pf_queues_to_vsi(vf);
993 
994 	i40e_flush(hw);
995 }
996 
997 /**
998  * i40e_disable_vf_mappings
999  * @vf: pointer to the VF info
1000  *
1001  * disable VF mappings
1002  **/
1003 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
1004 {
1005 	struct i40e_pf *pf = vf->pf;
1006 	struct i40e_hw *hw = &pf->hw;
1007 	int i;
1008 
1009 	/* disable qp mappings */
1010 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
1011 	for (i = 0; i < I40E_MAX_VSI_QP; i++)
1012 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
1013 		     I40E_QUEUE_END_OF_LIST);
1014 	i40e_flush(hw);
1015 }
1016 
1017 /**
1018  * i40e_free_vf_res
1019  * @vf: pointer to the VF info
1020  *
1021  * free VF resources
1022  **/
1023 static void i40e_free_vf_res(struct i40e_vf *vf)
1024 {
1025 	struct i40e_pf *pf = vf->pf;
1026 	struct i40e_hw *hw = &pf->hw;
1027 	u32 reg_idx, reg;
1028 	int i, j, msix_vf;
1029 
1030 	/* Start by disabling VF's configuration API to prevent the OS from
1031 	 * accessing the VF's VSI after it's freed / invalidated.
1032 	 */
1033 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1034 
1035 	/* It's possible the VF had requeuested more queues than the default so
1036 	 * do the accounting here when we're about to free them.
1037 	 */
1038 	if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1039 		pf->queues_left += vf->num_queue_pairs -
1040 				   I40E_DEFAULT_QUEUES_PER_VF;
1041 	}
1042 
1043 	/* free vsi & disconnect it from the parent uplink */
1044 	if (vf->lan_vsi_idx) {
1045 		i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1046 		vf->lan_vsi_idx = 0;
1047 		vf->lan_vsi_id = 0;
1048 	}
1049 
1050 	/* do the accounting and remove additional ADq VSI's */
1051 	if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1052 		for (j = 0; j < vf->num_tc; j++) {
1053 			/* At this point VSI0 is already released so don't
1054 			 * release it again and only clear their values in
1055 			 * structure variables
1056 			 */
1057 			if (j)
1058 				i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1059 			vf->ch[j].vsi_idx = 0;
1060 			vf->ch[j].vsi_id = 0;
1061 		}
1062 	}
1063 	msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1064 
1065 	/* disable interrupts so the VF starts in a known state */
1066 	for (i = 0; i < msix_vf; i++) {
1067 		/* format is same for both registers */
1068 		if (0 == i)
1069 			reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1070 		else
1071 			reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1072 						      (vf->vf_id))
1073 						     + (i - 1));
1074 		wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1075 		i40e_flush(hw);
1076 	}
1077 
1078 	/* clear the irq settings */
1079 	for (i = 0; i < msix_vf; i++) {
1080 		/* format is same for both registers */
1081 		if (0 == i)
1082 			reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1083 		else
1084 			reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1085 						      (vf->vf_id))
1086 						     + (i - 1));
1087 		reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1088 		       I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1089 		wr32(hw, reg_idx, reg);
1090 		i40e_flush(hw);
1091 	}
1092 	/* reset some of the state variables keeping track of the resources */
1093 	vf->num_queue_pairs = 0;
1094 	clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1095 	clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1096 }
1097 
1098 /**
1099  * i40e_alloc_vf_res
1100  * @vf: pointer to the VF info
1101  *
1102  * allocate VF resources
1103  **/
1104 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1105 {
1106 	struct i40e_pf *pf = vf->pf;
1107 	int total_queue_pairs = 0;
1108 	int ret, idx;
1109 
1110 	if (vf->num_req_queues &&
1111 	    vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1112 		pf->num_vf_qps = vf->num_req_queues;
1113 	else
1114 		pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1115 
1116 	/* allocate hw vsi context & associated resources */
1117 	ret = i40e_alloc_vsi_res(vf, 0);
1118 	if (ret)
1119 		goto error_alloc;
1120 	total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1121 
1122 	/* allocate additional VSIs based on tc information for ADq */
1123 	if (vf->adq_enabled) {
1124 		if (pf->queues_left >=
1125 		    (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1126 			/* TC 0 always belongs to VF VSI */
1127 			for (idx = 1; idx < vf->num_tc; idx++) {
1128 				ret = i40e_alloc_vsi_res(vf, idx);
1129 				if (ret)
1130 					goto error_alloc;
1131 			}
1132 			/* send correct number of queues */
1133 			total_queue_pairs = I40E_MAX_VF_QUEUES;
1134 		} else {
1135 			dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1136 				 vf->vf_id);
1137 			vf->adq_enabled = false;
1138 		}
1139 	}
1140 
1141 	/* We account for each VF to get a default number of queue pairs.  If
1142 	 * the VF has now requested more, we need to account for that to make
1143 	 * certain we never request more queues than we actually have left in
1144 	 * HW.
1145 	 */
1146 	if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1147 		pf->queues_left -=
1148 			total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1149 
1150 	if (vf->trusted)
1151 		set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1152 	else
1153 		clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1154 
1155 	/* store the total qps number for the runtime
1156 	 * VF req validation
1157 	 */
1158 	vf->num_queue_pairs = total_queue_pairs;
1159 
1160 	/* VF is now completely initialized */
1161 	set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1162 
1163 error_alloc:
1164 	if (ret)
1165 		i40e_free_vf_res(vf);
1166 
1167 	return ret;
1168 }
1169 
1170 #define VF_DEVICE_STATUS 0xAA
1171 #define VF_TRANS_PENDING_MASK 0x20
1172 /**
1173  * i40e_quiesce_vf_pci
1174  * @vf: pointer to the VF structure
1175  *
1176  * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1177  * if the transactions never clear.
1178  **/
1179 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1180 {
1181 	struct i40e_pf *pf = vf->pf;
1182 	struct i40e_hw *hw = &pf->hw;
1183 	int vf_abs_id, i;
1184 	u32 reg;
1185 
1186 	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1187 
1188 	wr32(hw, I40E_PF_PCI_CIAA,
1189 	     VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1190 	for (i = 0; i < 100; i++) {
1191 		reg = rd32(hw, I40E_PF_PCI_CIAD);
1192 		if ((reg & VF_TRANS_PENDING_MASK) == 0)
1193 			return 0;
1194 		udelay(1);
1195 	}
1196 	return -EIO;
1197 }
1198 
1199 /**
1200  * __i40e_getnum_vf_vsi_vlan_filters
1201  * @vsi: pointer to the vsi
1202  *
1203  * called to get the number of VLANs offloaded on this VF
1204  **/
1205 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1206 {
1207 	struct i40e_mac_filter *f;
1208 	u16 num_vlans = 0, bkt;
1209 
1210 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1211 		if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1212 			num_vlans++;
1213 	}
1214 
1215 	return num_vlans;
1216 }
1217 
1218 /**
1219  * i40e_getnum_vf_vsi_vlan_filters
1220  * @vsi: pointer to the vsi
1221  *
1222  * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1223  **/
1224 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1225 {
1226 	int num_vlans;
1227 
1228 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1229 	num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1230 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1231 
1232 	return num_vlans;
1233 }
1234 
1235 /**
1236  * i40e_get_vlan_list_sync
1237  * @vsi: pointer to the VSI
1238  * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1239  * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1240  *             This array is allocated here, but has to be freed in caller.
1241  *
1242  * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1243  **/
1244 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1245 				    s16 **vlan_list)
1246 {
1247 	struct i40e_mac_filter *f;
1248 	int i = 0;
1249 	int bkt;
1250 
1251 	spin_lock_bh(&vsi->mac_filter_hash_lock);
1252 	*num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1253 	*vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1254 	if (!(*vlan_list))
1255 		goto err;
1256 
1257 	hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1258 		if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1259 			continue;
1260 		(*vlan_list)[i++] = f->vlan;
1261 	}
1262 err:
1263 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
1264 }
1265 
1266 /**
1267  * i40e_set_vsi_promisc
1268  * @vf: pointer to the VF struct
1269  * @seid: VSI number
1270  * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1271  *                for a given VLAN
1272  * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1273  *                  for a given VLAN
1274  * @vl: List of VLANs - apply filter for given VLANs
1275  * @num_vlans: Number of elements in @vl
1276  **/
1277 static int
1278 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1279 		     bool unicast_enable, s16 *vl, u16 num_vlans)
1280 {
1281 	struct i40e_pf *pf = vf->pf;
1282 	struct i40e_hw *hw = &pf->hw;
1283 	int aq_ret, aq_tmp = 0;
1284 	int i;
1285 
1286 	/* No VLAN to set promisc on, set on VSI */
1287 	if (!num_vlans || !vl) {
1288 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1289 							       multi_enable,
1290 							       NULL);
1291 		if (aq_ret) {
1292 			int aq_err = pf->hw.aq.asq_last_status;
1293 
1294 			dev_err(&pf->pdev->dev,
1295 				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1296 				vf->vf_id,
1297 				ERR_PTR(aq_ret),
1298 				i40e_aq_str(&pf->hw, aq_err));
1299 
1300 			return aq_ret;
1301 		}
1302 
1303 		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1304 							     unicast_enable,
1305 							     NULL, true);
1306 
1307 		if (aq_ret) {
1308 			int aq_err = pf->hw.aq.asq_last_status;
1309 
1310 			dev_err(&pf->pdev->dev,
1311 				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1312 				vf->vf_id,
1313 				ERR_PTR(aq_ret),
1314 				i40e_aq_str(&pf->hw, aq_err));
1315 		}
1316 
1317 		return aq_ret;
1318 	}
1319 
1320 	for (i = 0; i < num_vlans; i++) {
1321 		aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1322 							    multi_enable,
1323 							    vl[i], NULL);
1324 		if (aq_ret) {
1325 			int aq_err = pf->hw.aq.asq_last_status;
1326 
1327 			dev_err(&pf->pdev->dev,
1328 				"VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1329 				vf->vf_id,
1330 				ERR_PTR(aq_ret),
1331 				i40e_aq_str(&pf->hw, aq_err));
1332 
1333 			if (!aq_tmp)
1334 				aq_tmp = aq_ret;
1335 		}
1336 
1337 		aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1338 							    unicast_enable,
1339 							    vl[i], NULL);
1340 		if (aq_ret) {
1341 			int aq_err = pf->hw.aq.asq_last_status;
1342 
1343 			dev_err(&pf->pdev->dev,
1344 				"VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1345 				vf->vf_id,
1346 				ERR_PTR(aq_ret),
1347 				i40e_aq_str(&pf->hw, aq_err));
1348 
1349 			if (!aq_tmp)
1350 				aq_tmp = aq_ret;
1351 		}
1352 	}
1353 
1354 	if (aq_tmp)
1355 		aq_ret = aq_tmp;
1356 
1357 	return aq_ret;
1358 }
1359 
1360 /**
1361  * i40e_config_vf_promiscuous_mode
1362  * @vf: pointer to the VF info
1363  * @vsi_id: VSI id
1364  * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1365  * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1366  *
1367  * Called from the VF to configure the promiscuous mode of
1368  * VF vsis and from the VF reset path to reset promiscuous mode.
1369  **/
1370 static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1371 					   u16 vsi_id,
1372 					   bool allmulti,
1373 					   bool alluni)
1374 {
1375 	struct i40e_pf *pf = vf->pf;
1376 	struct i40e_vsi *vsi;
1377 	int aq_ret = 0;
1378 	u16 num_vlans;
1379 	s16 *vl;
1380 
1381 	vsi = i40e_find_vsi_from_id(pf, vsi_id);
1382 	if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1383 		return -EINVAL;
1384 
1385 	if (vf->port_vlan_id) {
1386 		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1387 					      alluni, &vf->port_vlan_id, 1);
1388 		return aq_ret;
1389 	} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1390 		i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1391 
1392 		if (!vl)
1393 			return -ENOMEM;
1394 
1395 		aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1396 					      vl, num_vlans);
1397 		kfree(vl);
1398 		return aq_ret;
1399 	}
1400 
1401 	/* no VLANs to set on, set on VSI */
1402 	aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1403 				      NULL, 0);
1404 	return aq_ret;
1405 }
1406 
1407 /**
1408  * i40e_sync_vfr_reset
1409  * @hw: pointer to hw struct
1410  * @vf_id: VF identifier
1411  *
1412  * Before trigger hardware reset, we need to know if no other process has
1413  * reserved the hardware for any reset operations. This check is done by
1414  * examining the status of the RSTAT1 register used to signal the reset.
1415  **/
1416 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1417 {
1418 	u32 reg;
1419 	int i;
1420 
1421 	for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1422 		reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1423 			   I40E_VFINT_ICR0_ADMINQ_MASK;
1424 		if (reg)
1425 			return 0;
1426 
1427 		usleep_range(100, 200);
1428 	}
1429 
1430 	return -EAGAIN;
1431 }
1432 
1433 /**
1434  * i40e_trigger_vf_reset
1435  * @vf: pointer to the VF structure
1436  * @flr: VFLR was issued or not
1437  *
1438  * Trigger hardware to start a reset for a particular VF. Expects the caller
1439  * to wait the proper amount of time to allow hardware to reset the VF before
1440  * it cleans up and restores VF functionality.
1441  **/
1442 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1443 {
1444 	struct i40e_pf *pf = vf->pf;
1445 	struct i40e_hw *hw = &pf->hw;
1446 	u32 reg, reg_idx, bit_idx;
1447 	bool vf_active;
1448 	u32 radq;
1449 
1450 	/* warn the VF */
1451 	vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1452 
1453 	/* Disable VF's configuration API during reset. The flag is re-enabled
1454 	 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1455 	 * It's normally disabled in i40e_free_vf_res(), but it's safer
1456 	 * to do it earlier to give some time to finish to any VF config
1457 	 * functions that may still be running at this point.
1458 	 */
1459 	clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1460 
1461 	/* In the case of a VFLR, the HW has already reset the VF and we
1462 	 * just need to clean up, so don't hit the VFRTRIG register.
1463 	 */
1464 	if (!flr) {
1465 		/* Sync VFR reset before trigger next one */
1466 		radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1467 			    I40E_VFINT_ICR0_ADMINQ_MASK;
1468 		if (vf_active && !radq)
1469 			/* waiting for finish reset by virtual driver */
1470 			if (i40e_sync_vfr_reset(hw, vf->vf_id))
1471 				dev_info(&pf->pdev->dev,
1472 					 "Reset VF %d never finished\n",
1473 				vf->vf_id);
1474 
1475 		/* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1476 		 * in progress state in rstat1 register.
1477 		 */
1478 		reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1479 		reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1480 		wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1481 		i40e_flush(hw);
1482 	}
1483 	/* clear the VFLR bit in GLGEN_VFLRSTAT */
1484 	reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1485 	bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1486 	wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1487 	i40e_flush(hw);
1488 
1489 	if (i40e_quiesce_vf_pci(vf))
1490 		dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1491 			vf->vf_id);
1492 }
1493 
1494 /**
1495  * i40e_cleanup_reset_vf
1496  * @vf: pointer to the VF structure
1497  *
1498  * Cleanup a VF after the hardware reset is finished. Expects the caller to
1499  * have verified whether the reset is finished properly, and ensure the
1500  * minimum amount of wait time has passed.
1501  **/
1502 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1503 {
1504 	struct i40e_pf *pf = vf->pf;
1505 	struct i40e_hw *hw = &pf->hw;
1506 	u32 reg;
1507 
1508 	/* disable promisc modes in case they were enabled */
1509 	i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1510 
1511 	/* free VF resources to begin resetting the VSI state */
1512 	i40e_free_vf_res(vf);
1513 
1514 	/* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1515 	 * By doing this we allow HW to access VF memory at any point. If we
1516 	 * did it any sooner, HW could access memory while it was being freed
1517 	 * in i40e_free_vf_res(), causing an IOMMU fault.
1518 	 *
1519 	 * On the other hand, this needs to be done ASAP, because the VF driver
1520 	 * is waiting for this to happen and may report a timeout. It's
1521 	 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1522 	 * it.
1523 	 */
1524 	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1525 	reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1526 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1527 
1528 	/* reallocate VF resources to finish resetting the VSI state */
1529 	if (!i40e_alloc_vf_res(vf)) {
1530 		int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1531 		i40e_enable_vf_mappings(vf);
1532 		set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1533 		clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1534 		/* Do not notify the client during VF init */
1535 		if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1536 					&vf->vf_states))
1537 			i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1538 		vf->num_vlan = 0;
1539 	}
1540 
1541 	/* Tell the VF driver the reset is done. This needs to be done only
1542 	 * after VF has been fully initialized, because the VF driver may
1543 	 * request resources immediately after setting this flag.
1544 	 */
1545 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1546 }
1547 
1548 /**
1549  * i40e_reset_vf
1550  * @vf: pointer to the VF structure
1551  * @flr: VFLR was issued or not
1552  *
1553  * Returns true if the VF is in reset, resets successfully, or resets
1554  * are disabled and false otherwise.
1555  **/
1556 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1557 {
1558 	struct i40e_pf *pf = vf->pf;
1559 	struct i40e_hw *hw = &pf->hw;
1560 	bool rsd = false;
1561 	u32 reg;
1562 	int i;
1563 
1564 	if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1565 		return true;
1566 
1567 	/* Bail out if VFs are disabled. */
1568 	if (test_bit(__I40E_VF_DISABLE, pf->state))
1569 		return true;
1570 
1571 	/* If VF is being reset already we don't need to continue. */
1572 	if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1573 		return true;
1574 
1575 	i40e_trigger_vf_reset(vf, flr);
1576 
1577 	/* poll VPGEN_VFRSTAT reg to make sure
1578 	 * that reset is complete
1579 	 */
1580 	for (i = 0; i < 10; i++) {
1581 		/* VF reset requires driver to first reset the VF and then
1582 		 * poll the status register to make sure that the reset
1583 		 * completed successfully. Due to internal HW FIFO flushes,
1584 		 * we must wait 10ms before the register will be valid.
1585 		 */
1586 		usleep_range(10000, 20000);
1587 		reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1588 		if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1589 			rsd = true;
1590 			break;
1591 		}
1592 	}
1593 
1594 	if (flr)
1595 		usleep_range(10000, 20000);
1596 
1597 	if (!rsd)
1598 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1599 			vf->vf_id);
1600 	usleep_range(10000, 20000);
1601 
1602 	/* On initial reset, we don't have any queues to disable */
1603 	if (vf->lan_vsi_idx != 0)
1604 		i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1605 
1606 	i40e_cleanup_reset_vf(vf);
1607 
1608 	i40e_flush(hw);
1609 	usleep_range(20000, 40000);
1610 	clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1611 
1612 	return true;
1613 }
1614 
1615 /**
1616  * i40e_reset_all_vfs
1617  * @pf: pointer to the PF structure
1618  * @flr: VFLR was issued or not
1619  *
1620  * Reset all allocated VFs in one go. First, tell the hardware to reset each
1621  * VF, then do all the waiting in one chunk, and finally finish restoring each
1622  * VF after the wait. This is useful during PF routines which need to reset
1623  * all VFs, as otherwise it must perform these resets in a serialized fashion.
1624  *
1625  * Returns true if any VFs were reset, and false otherwise.
1626  **/
1627 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1628 {
1629 	struct i40e_hw *hw = &pf->hw;
1630 	struct i40e_vf *vf;
1631 	int i, v;
1632 	u32 reg;
1633 
1634 	/* If we don't have any VFs, then there is nothing to reset */
1635 	if (!pf->num_alloc_vfs)
1636 		return false;
1637 
1638 	/* If VFs have been disabled, there is no need to reset */
1639 	if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1640 		return false;
1641 
1642 	/* Begin reset on all VFs at once */
1643 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1644 		vf = &pf->vf[v];
1645 		/* If VF is being reset no need to trigger reset again */
1646 		if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1647 			i40e_trigger_vf_reset(&pf->vf[v], flr);
1648 	}
1649 
1650 	/* HW requires some time to make sure it can flush the FIFO for a VF
1651 	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1652 	 * sequence to make sure that it has completed. We'll keep track of
1653 	 * the VFs using a simple iterator that increments once that VF has
1654 	 * finished resetting.
1655 	 */
1656 	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1657 		usleep_range(10000, 20000);
1658 
1659 		/* Check each VF in sequence, beginning with the VF to fail
1660 		 * the previous check.
1661 		 */
1662 		while (v < pf->num_alloc_vfs) {
1663 			vf = &pf->vf[v];
1664 			if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1665 				reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1666 				if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1667 					break;
1668 			}
1669 
1670 			/* If the current VF has finished resetting, move on
1671 			 * to the next VF in sequence.
1672 			 */
1673 			v++;
1674 		}
1675 	}
1676 
1677 	if (flr)
1678 		usleep_range(10000, 20000);
1679 
1680 	/* Display a warning if at least one VF didn't manage to reset in
1681 	 * time, but continue on with the operation.
1682 	 */
1683 	if (v < pf->num_alloc_vfs)
1684 		dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1685 			pf->vf[v].vf_id);
1686 	usleep_range(10000, 20000);
1687 
1688 	/* Begin disabling all the rings associated with VFs, but do not wait
1689 	 * between each VF.
1690 	 */
1691 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1692 		/* On initial reset, we don't have any queues to disable */
1693 		if (pf->vf[v].lan_vsi_idx == 0)
1694 			continue;
1695 
1696 		/* If VF is reset in another thread just continue */
1697 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1698 			continue;
1699 
1700 		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1701 	}
1702 
1703 	/* Now that we've notified HW to disable all of the VF rings, wait
1704 	 * until they finish.
1705 	 */
1706 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1707 		/* On initial reset, we don't have any queues to disable */
1708 		if (pf->vf[v].lan_vsi_idx == 0)
1709 			continue;
1710 
1711 		/* If VF is reset in another thread just continue */
1712 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1713 			continue;
1714 
1715 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1716 	}
1717 
1718 	/* Hw may need up to 50ms to finish disabling the RX queues. We
1719 	 * minimize the wait by delaying only once for all VFs.
1720 	 */
1721 	mdelay(50);
1722 
1723 	/* Finish the reset on each VF */
1724 	for (v = 0; v < pf->num_alloc_vfs; v++) {
1725 		/* If VF is reset in another thread just continue */
1726 		if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1727 			continue;
1728 
1729 		i40e_cleanup_reset_vf(&pf->vf[v]);
1730 	}
1731 
1732 	i40e_flush(hw);
1733 	usleep_range(20000, 40000);
1734 	clear_bit(__I40E_VF_DISABLE, pf->state);
1735 
1736 	return true;
1737 }
1738 
1739 /**
1740  * i40e_free_vfs
1741  * @pf: pointer to the PF structure
1742  *
1743  * free VF resources
1744  **/
1745 void i40e_free_vfs(struct i40e_pf *pf)
1746 {
1747 	struct i40e_hw *hw = &pf->hw;
1748 	u32 reg_idx, bit_idx;
1749 	int i, tmp, vf_id;
1750 
1751 	if (!pf->vf)
1752 		return;
1753 
1754 	set_bit(__I40E_VFS_RELEASING, pf->state);
1755 	while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1756 		usleep_range(1000, 2000);
1757 
1758 	i40e_notify_client_of_vf_enable(pf, 0);
1759 
1760 	/* Disable IOV before freeing resources. This lets any VF drivers
1761 	 * running in the host get themselves cleaned up before we yank
1762 	 * the carpet out from underneath their feet.
1763 	 */
1764 	if (!pci_vfs_assigned(pf->pdev))
1765 		pci_disable_sriov(pf->pdev);
1766 	else
1767 		dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1768 
1769 	/* Amortize wait time by stopping all VFs at the same time */
1770 	for (i = 0; i < pf->num_alloc_vfs; i++) {
1771 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1772 			continue;
1773 
1774 		i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1775 	}
1776 
1777 	for (i = 0; i < pf->num_alloc_vfs; i++) {
1778 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1779 			continue;
1780 
1781 		i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1782 	}
1783 
1784 	/* free up VF resources */
1785 	tmp = pf->num_alloc_vfs;
1786 	pf->num_alloc_vfs = 0;
1787 	for (i = 0; i < tmp; i++) {
1788 		if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1789 			i40e_free_vf_res(&pf->vf[i]);
1790 		/* disable qp mappings */
1791 		i40e_disable_vf_mappings(&pf->vf[i]);
1792 	}
1793 
1794 	kfree(pf->vf);
1795 	pf->vf = NULL;
1796 
1797 	/* This check is for when the driver is unloaded while VFs are
1798 	 * assigned. Setting the number of VFs to 0 through sysfs is caught
1799 	 * before this function ever gets called.
1800 	 */
1801 	if (!pci_vfs_assigned(pf->pdev)) {
1802 		/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1803 		 * work correctly when SR-IOV gets re-enabled.
1804 		 */
1805 		for (vf_id = 0; vf_id < tmp; vf_id++) {
1806 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1807 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1808 			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1809 		}
1810 	}
1811 	clear_bit(__I40E_VF_DISABLE, pf->state);
1812 	clear_bit(__I40E_VFS_RELEASING, pf->state);
1813 }
1814 
1815 #ifdef CONFIG_PCI_IOV
1816 /**
1817  * i40e_alloc_vfs
1818  * @pf: pointer to the PF structure
1819  * @num_alloc_vfs: number of VFs to allocate
1820  *
1821  * allocate VF resources
1822  **/
1823 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1824 {
1825 	struct i40e_vf *vfs;
1826 	int i, ret = 0;
1827 
1828 	/* Disable interrupt 0 so we don't try to handle the VFLR. */
1829 	i40e_irq_dynamic_disable_icr0(pf);
1830 
1831 	/* Check to see if we're just allocating resources for extant VFs */
1832 	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1833 		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1834 		if (ret) {
1835 			clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1836 			pf->num_alloc_vfs = 0;
1837 			goto err_iov;
1838 		}
1839 	}
1840 	/* allocate memory */
1841 	vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1842 	if (!vfs) {
1843 		ret = -ENOMEM;
1844 		goto err_alloc;
1845 	}
1846 	pf->vf = vfs;
1847 
1848 	/* apply default profile */
1849 	for (i = 0; i < num_alloc_vfs; i++) {
1850 		vfs[i].pf = pf;
1851 		vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1852 		vfs[i].vf_id = i;
1853 
1854 		/* assign default capabilities */
1855 		set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1856 		vfs[i].spoofchk = true;
1857 
1858 		set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1859 
1860 	}
1861 	pf->num_alloc_vfs = num_alloc_vfs;
1862 
1863 	/* VF resources get allocated during reset */
1864 	i40e_reset_all_vfs(pf, false);
1865 
1866 	i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1867 
1868 err_alloc:
1869 	if (ret)
1870 		i40e_free_vfs(pf);
1871 err_iov:
1872 	/* Re-enable interrupt 0. */
1873 	i40e_irq_dynamic_enable_icr0(pf);
1874 	return ret;
1875 }
1876 
1877 #endif
1878 /**
1879  * i40e_pci_sriov_enable
1880  * @pdev: pointer to a pci_dev structure
1881  * @num_vfs: number of VFs to allocate
1882  *
1883  * Enable or change the number of VFs
1884  **/
1885 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1886 {
1887 #ifdef CONFIG_PCI_IOV
1888 	struct i40e_pf *pf = pci_get_drvdata(pdev);
1889 	int pre_existing_vfs = pci_num_vf(pdev);
1890 	int err = 0;
1891 
1892 	if (test_bit(__I40E_TESTING, pf->state)) {
1893 		dev_warn(&pdev->dev,
1894 			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1895 		err = -EPERM;
1896 		goto err_out;
1897 	}
1898 
1899 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1900 		i40e_free_vfs(pf);
1901 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1902 		goto out;
1903 
1904 	if (num_vfs > pf->num_req_vfs) {
1905 		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1906 			 num_vfs, pf->num_req_vfs);
1907 		err = -EPERM;
1908 		goto err_out;
1909 	}
1910 
1911 	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1912 	err = i40e_alloc_vfs(pf, num_vfs);
1913 	if (err) {
1914 		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1915 		goto err_out;
1916 	}
1917 
1918 out:
1919 	return num_vfs;
1920 
1921 err_out:
1922 	return err;
1923 #endif
1924 	return 0;
1925 }
1926 
1927 /**
1928  * i40e_pci_sriov_configure
1929  * @pdev: pointer to a pci_dev structure
1930  * @num_vfs: number of VFs to allocate
1931  *
1932  * Enable or change the number of VFs. Called when the user updates the number
1933  * of VFs in sysfs.
1934  **/
1935 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1936 {
1937 	struct i40e_pf *pf = pci_get_drvdata(pdev);
1938 	int ret = 0;
1939 
1940 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1941 		dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1942 		return -EAGAIN;
1943 	}
1944 
1945 	if (num_vfs) {
1946 		if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
1947 			set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1948 			i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1949 		}
1950 		ret = i40e_pci_sriov_enable(pdev, num_vfs);
1951 		goto sriov_configure_out;
1952 	}
1953 
1954 	if (!pci_vfs_assigned(pf->pdev)) {
1955 		i40e_free_vfs(pf);
1956 		clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1957 		i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1958 	} else {
1959 		dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1960 		ret = -EINVAL;
1961 		goto sriov_configure_out;
1962 	}
1963 sriov_configure_out:
1964 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1965 	return ret;
1966 }
1967 
1968 /***********************virtual channel routines******************/
1969 
1970 /**
1971  * i40e_vc_send_msg_to_vf
1972  * @vf: pointer to the VF info
1973  * @v_opcode: virtual channel opcode
1974  * @v_retval: virtual channel return value
1975  * @msg: pointer to the msg buffer
1976  * @msglen: msg length
1977  *
1978  * send msg to VF
1979  **/
1980 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1981 				  u32 v_retval, u8 *msg, u16 msglen)
1982 {
1983 	struct i40e_pf *pf;
1984 	struct i40e_hw *hw;
1985 	int abs_vf_id;
1986 	int aq_ret;
1987 
1988 	/* validate the request */
1989 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1990 		return -EINVAL;
1991 
1992 	pf = vf->pf;
1993 	hw = &pf->hw;
1994 	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1995 
1996 	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
1997 					msg, msglen, NULL);
1998 	if (aq_ret) {
1999 		dev_info(&pf->pdev->dev,
2000 			 "Unable to send the message to VF %d aq_err %d\n",
2001 			 vf->vf_id, pf->hw.aq.asq_last_status);
2002 		return -EIO;
2003 	}
2004 
2005 	return 0;
2006 }
2007 
2008 /**
2009  * i40e_vc_send_resp_to_vf
2010  * @vf: pointer to the VF info
2011  * @opcode: operation code
2012  * @retval: return value
2013  *
2014  * send resp msg to VF
2015  **/
2016 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
2017 				   enum virtchnl_ops opcode,
2018 				   int retval)
2019 {
2020 	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
2021 }
2022 
2023 /**
2024  * i40e_sync_vf_state
2025  * @vf: pointer to the VF info
2026  * @state: VF state
2027  *
2028  * Called from a VF message to synchronize the service with a potential
2029  * VF reset state
2030  **/
2031 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
2032 {
2033 	int i;
2034 
2035 	/* When handling some messages, it needs VF state to be set.
2036 	 * It is possible that this flag is cleared during VF reset,
2037 	 * so there is a need to wait until the end of the reset to
2038 	 * handle the request message correctly.
2039 	 */
2040 	for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
2041 		if (test_bit(state, &vf->vf_states))
2042 			return true;
2043 		usleep_range(10000, 20000);
2044 	}
2045 
2046 	return test_bit(state, &vf->vf_states);
2047 }
2048 
2049 /**
2050  * i40e_vc_get_version_msg
2051  * @vf: pointer to the VF info
2052  * @msg: pointer to the msg buffer
2053  *
2054  * called from the VF to request the API version used by the PF
2055  **/
2056 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2057 {
2058 	struct virtchnl_version_info info = {
2059 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2060 	};
2061 
2062 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
2063 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2064 	if (VF_IS_V10(&vf->vf_ver))
2065 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2066 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2067 				      0, (u8 *)&info,
2068 				      sizeof(struct virtchnl_version_info));
2069 }
2070 
2071 /**
2072  * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2073  * @vf: pointer to VF structure
2074  **/
2075 static void i40e_del_qch(struct i40e_vf *vf)
2076 {
2077 	struct i40e_pf *pf = vf->pf;
2078 	int i;
2079 
2080 	/* first element in the array belongs to primary VF VSI and we shouldn't
2081 	 * delete it. We should however delete the rest of the VSIs created
2082 	 */
2083 	for (i = 1; i < vf->num_tc; i++) {
2084 		if (vf->ch[i].vsi_idx) {
2085 			i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2086 			vf->ch[i].vsi_idx = 0;
2087 			vf->ch[i].vsi_id = 0;
2088 		}
2089 	}
2090 }
2091 
2092 /**
2093  * i40e_vc_get_max_frame_size
2094  * @vf: pointer to the VF
2095  *
2096  * Max frame size is determined based on the current port's max frame size and
2097  * whether a port VLAN is configured on this VF. The VF is not aware whether
2098  * it's in a port VLAN so the PF needs to account for this in max frame size
2099  * checks and sending the max frame size to the VF.
2100  **/
2101 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2102 {
2103 	u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2104 
2105 	if (vf->port_vlan_id)
2106 		max_frame_size -= VLAN_HLEN;
2107 
2108 	return max_frame_size;
2109 }
2110 
2111 /**
2112  * i40e_vc_get_vf_resources_msg
2113  * @vf: pointer to the VF info
2114  * @msg: pointer to the msg buffer
2115  *
2116  * called from the VF to request its resources
2117  **/
2118 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2119 {
2120 	struct virtchnl_vf_resource *vfres = NULL;
2121 	struct i40e_pf *pf = vf->pf;
2122 	struct i40e_vsi *vsi;
2123 	int num_vsis = 1;
2124 	int aq_ret = 0;
2125 	size_t len = 0;
2126 	int ret;
2127 
2128 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2129 		aq_ret = -EINVAL;
2130 		goto err;
2131 	}
2132 
2133 	len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
2134 	vfres = kzalloc(len, GFP_KERNEL);
2135 	if (!vfres) {
2136 		aq_ret = -ENOMEM;
2137 		len = 0;
2138 		goto err;
2139 	}
2140 	if (VF_IS_V11(&vf->vf_ver))
2141 		vf->driver_caps = *(u32 *)msg;
2142 	else
2143 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2144 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
2145 				  VIRTCHNL_VF_OFFLOAD_VLAN;
2146 
2147 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2148 	vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2149 	vsi = pf->vsi[vf->lan_vsi_idx];
2150 	if (!vsi->info.pvid)
2151 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2152 
2153 	if (i40e_vf_client_capable(pf, vf->vf_id) &&
2154 	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
2155 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
2156 		set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2157 	} else {
2158 		clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2159 	}
2160 
2161 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2162 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2163 	} else {
2164 		if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
2165 		    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2166 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2167 		else
2168 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2169 	}
2170 
2171 	if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) {
2172 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2173 			vfres->vf_cap_flags |=
2174 				VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2175 	}
2176 
2177 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2178 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2179 
2180 	if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) &&
2181 	    (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2182 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2183 
2184 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2185 		if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
2186 			dev_err(&pf->pdev->dev,
2187 				"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2188 				 vf->vf_id);
2189 			aq_ret = -EINVAL;
2190 			goto err;
2191 		}
2192 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2193 	}
2194 
2195 	if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) {
2196 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2197 			vfres->vf_cap_flags |=
2198 					VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2199 	}
2200 
2201 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2202 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2203 
2204 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2205 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2206 
2207 	vfres->num_vsis = num_vsis;
2208 	vfres->num_queue_pairs = vf->num_queue_pairs;
2209 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2210 	vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2211 	vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2212 	vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2213 
2214 	if (vf->lan_vsi_idx) {
2215 		vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2216 		vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2217 		vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2218 		/* VFs only use TC 0 */
2219 		vfres->vsi_res[0].qset_handle
2220 					  = le16_to_cpu(vsi->info.qs_handle[0]);
2221 		if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2222 			i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2223 			eth_zero_addr(vf->default_lan_addr.addr);
2224 		}
2225 		ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2226 				vf->default_lan_addr.addr);
2227 	}
2228 	set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2229 
2230 err:
2231 	/* send the response back to the VF */
2232 	ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2233 				     aq_ret, (u8 *)vfres, len);
2234 
2235 	kfree(vfres);
2236 	return ret;
2237 }
2238 
2239 /**
2240  * i40e_vc_config_promiscuous_mode_msg
2241  * @vf: pointer to the VF info
2242  * @msg: pointer to the msg buffer
2243  *
2244  * called from the VF to configure the promiscuous mode of
2245  * VF vsis
2246  **/
2247 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2248 {
2249 	struct virtchnl_promisc_info *info =
2250 	    (struct virtchnl_promisc_info *)msg;
2251 	struct i40e_pf *pf = vf->pf;
2252 	bool allmulti = false;
2253 	bool alluni = false;
2254 	int aq_ret = 0;
2255 
2256 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2257 		aq_ret = -EINVAL;
2258 		goto err_out;
2259 	}
2260 	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2261 		dev_err(&pf->pdev->dev,
2262 			"Unprivileged VF %d is attempting to configure promiscuous mode\n",
2263 			vf->vf_id);
2264 
2265 		/* Lie to the VF on purpose, because this is an error we can
2266 		 * ignore. Unprivileged VF is not a virtual channel error.
2267 		 */
2268 		aq_ret = 0;
2269 		goto err_out;
2270 	}
2271 
2272 	if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2273 		aq_ret = -EINVAL;
2274 		goto err_out;
2275 	}
2276 
2277 	if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2278 		aq_ret = -EINVAL;
2279 		goto err_out;
2280 	}
2281 
2282 	/* Multicast promiscuous handling*/
2283 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2284 		allmulti = true;
2285 
2286 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2287 		alluni = true;
2288 	aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2289 						 alluni);
2290 	if (aq_ret)
2291 		goto err_out;
2292 
2293 	if (allmulti) {
2294 		if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2295 				      &vf->vf_states))
2296 			dev_info(&pf->pdev->dev,
2297 				 "VF %d successfully set multicast promiscuous mode\n",
2298 				 vf->vf_id);
2299 	} else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2300 				      &vf->vf_states))
2301 		dev_info(&pf->pdev->dev,
2302 			 "VF %d successfully unset multicast promiscuous mode\n",
2303 			 vf->vf_id);
2304 
2305 	if (alluni) {
2306 		if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2307 				      &vf->vf_states))
2308 			dev_info(&pf->pdev->dev,
2309 				 "VF %d successfully set unicast promiscuous mode\n",
2310 				 vf->vf_id);
2311 	} else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2312 				      &vf->vf_states))
2313 		dev_info(&pf->pdev->dev,
2314 			 "VF %d successfully unset unicast promiscuous mode\n",
2315 			 vf->vf_id);
2316 
2317 err_out:
2318 	/* send the response to the VF */
2319 	return i40e_vc_send_resp_to_vf(vf,
2320 				       VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2321 				       aq_ret);
2322 }
2323 
2324 /**
2325  * i40e_vc_config_queues_msg
2326  * @vf: pointer to the VF info
2327  * @msg: pointer to the msg buffer
2328  *
2329  * called from the VF to configure the rx/tx
2330  * queues
2331  **/
2332 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2333 {
2334 	struct virtchnl_vsi_queue_config_info *qci =
2335 	    (struct virtchnl_vsi_queue_config_info *)msg;
2336 	struct virtchnl_queue_pair_info *qpi;
2337 	u16 vsi_id, vsi_queue_id = 0;
2338 	struct i40e_pf *pf = vf->pf;
2339 	int i, j = 0, idx = 0;
2340 	struct i40e_vsi *vsi;
2341 	u16 num_qps_all = 0;
2342 	int aq_ret = 0;
2343 
2344 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2345 		aq_ret = -EINVAL;
2346 		goto error_param;
2347 	}
2348 
2349 	if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2350 		aq_ret = -EINVAL;
2351 		goto error_param;
2352 	}
2353 
2354 	if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2355 		aq_ret = -EINVAL;
2356 		goto error_param;
2357 	}
2358 
2359 	if (vf->adq_enabled) {
2360 		for (i = 0; i < vf->num_tc; i++)
2361 			num_qps_all += vf->ch[i].num_qps;
2362 		if (num_qps_all != qci->num_queue_pairs) {
2363 			aq_ret = -EINVAL;
2364 			goto error_param;
2365 		}
2366 	}
2367 
2368 	vsi_id = qci->vsi_id;
2369 
2370 	for (i = 0; i < qci->num_queue_pairs; i++) {
2371 		qpi = &qci->qpair[i];
2372 
2373 		if (!vf->adq_enabled) {
2374 			if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2375 						      qpi->txq.queue_id)) {
2376 				aq_ret = -EINVAL;
2377 				goto error_param;
2378 			}
2379 
2380 			vsi_queue_id = qpi->txq.queue_id;
2381 
2382 			if (qpi->txq.vsi_id != qci->vsi_id ||
2383 			    qpi->rxq.vsi_id != qci->vsi_id ||
2384 			    qpi->rxq.queue_id != vsi_queue_id) {
2385 				aq_ret = -EINVAL;
2386 				goto error_param;
2387 			}
2388 		}
2389 
2390 		if (vf->adq_enabled) {
2391 			if (idx >= ARRAY_SIZE(vf->ch)) {
2392 				aq_ret = -ENODEV;
2393 				goto error_param;
2394 			}
2395 			vsi_id = vf->ch[idx].vsi_id;
2396 		}
2397 
2398 		if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2399 					     &qpi->rxq) ||
2400 		    i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2401 					     &qpi->txq)) {
2402 			aq_ret = -EINVAL;
2403 			goto error_param;
2404 		}
2405 
2406 		/* For ADq there can be up to 4 VSIs with max 4 queues each.
2407 		 * VF does not know about these additional VSIs and all
2408 		 * it cares is about its own queues. PF configures these queues
2409 		 * to its appropriate VSIs based on TC mapping
2410 		 */
2411 		if (vf->adq_enabled) {
2412 			if (idx >= ARRAY_SIZE(vf->ch)) {
2413 				aq_ret = -ENODEV;
2414 				goto error_param;
2415 			}
2416 			if (j == (vf->ch[idx].num_qps - 1)) {
2417 				idx++;
2418 				j = 0; /* resetting the queue count */
2419 				vsi_queue_id = 0;
2420 			} else {
2421 				j++;
2422 				vsi_queue_id++;
2423 			}
2424 		}
2425 	}
2426 	/* set vsi num_queue_pairs in use to num configured by VF */
2427 	if (!vf->adq_enabled) {
2428 		pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2429 			qci->num_queue_pairs;
2430 	} else {
2431 		for (i = 0; i < vf->num_tc; i++) {
2432 			vsi = pf->vsi[vf->ch[i].vsi_idx];
2433 			vsi->num_queue_pairs = vf->ch[i].num_qps;
2434 
2435 			if (i40e_update_adq_vsi_queues(vsi, i)) {
2436 				aq_ret = -EIO;
2437 				goto error_param;
2438 			}
2439 		}
2440 	}
2441 
2442 error_param:
2443 	/* send the response to the VF */
2444 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2445 				       aq_ret);
2446 }
2447 
2448 /**
2449  * i40e_validate_queue_map - check queue map is valid
2450  * @vf: the VF structure pointer
2451  * @vsi_id: vsi id
2452  * @queuemap: Tx or Rx queue map
2453  *
2454  * check if Tx or Rx queue map is valid
2455  **/
2456 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2457 				   unsigned long queuemap)
2458 {
2459 	u16 vsi_queue_id, queue_id;
2460 
2461 	for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2462 		if (vf->adq_enabled) {
2463 			vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2464 			queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2465 		} else {
2466 			queue_id = vsi_queue_id;
2467 		}
2468 
2469 		if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2470 			return -EINVAL;
2471 	}
2472 
2473 	return 0;
2474 }
2475 
2476 /**
2477  * i40e_vc_config_irq_map_msg
2478  * @vf: pointer to the VF info
2479  * @msg: pointer to the msg buffer
2480  *
2481  * called from the VF to configure the irq to
2482  * queue map
2483  **/
2484 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2485 {
2486 	struct virtchnl_irq_map_info *irqmap_info =
2487 	    (struct virtchnl_irq_map_info *)msg;
2488 	struct virtchnl_vector_map *map;
2489 	int aq_ret = 0;
2490 	u16 vsi_id;
2491 	int i;
2492 
2493 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2494 		aq_ret = -EINVAL;
2495 		goto error_param;
2496 	}
2497 
2498 	if (irqmap_info->num_vectors >
2499 	    vf->pf->hw.func_caps.num_msix_vectors_vf) {
2500 		aq_ret = -EINVAL;
2501 		goto error_param;
2502 	}
2503 
2504 	for (i = 0; i < irqmap_info->num_vectors; i++) {
2505 		map = &irqmap_info->vecmap[i];
2506 		/* validate msg params */
2507 		if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2508 		    !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2509 			aq_ret = -EINVAL;
2510 			goto error_param;
2511 		}
2512 		vsi_id = map->vsi_id;
2513 
2514 		if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2515 			aq_ret = -EINVAL;
2516 			goto error_param;
2517 		}
2518 
2519 		if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2520 			aq_ret = -EINVAL;
2521 			goto error_param;
2522 		}
2523 
2524 		i40e_config_irq_link_list(vf, vsi_id, map);
2525 	}
2526 error_param:
2527 	/* send the response to the VF */
2528 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2529 				       aq_ret);
2530 }
2531 
2532 /**
2533  * i40e_ctrl_vf_tx_rings
2534  * @vsi: the SRIOV VSI being configured
2535  * @q_map: bit map of the queues to be enabled
2536  * @enable: start or stop the queue
2537  **/
2538 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2539 				 bool enable)
2540 {
2541 	struct i40e_pf *pf = vsi->back;
2542 	int ret = 0;
2543 	u16 q_id;
2544 
2545 	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2546 		ret = i40e_control_wait_tx_q(vsi->seid, pf,
2547 					     vsi->base_queue + q_id,
2548 					     false /*is xdp*/, enable);
2549 		if (ret)
2550 			break;
2551 	}
2552 	return ret;
2553 }
2554 
2555 /**
2556  * i40e_ctrl_vf_rx_rings
2557  * @vsi: the SRIOV VSI being configured
2558  * @q_map: bit map of the queues to be enabled
2559  * @enable: start or stop the queue
2560  **/
2561 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2562 				 bool enable)
2563 {
2564 	struct i40e_pf *pf = vsi->back;
2565 	int ret = 0;
2566 	u16 q_id;
2567 
2568 	for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2569 		ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2570 					     enable);
2571 		if (ret)
2572 			break;
2573 	}
2574 	return ret;
2575 }
2576 
2577 /**
2578  * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2579  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2580  *
2581  * Returns true if validation was successful, else false.
2582  */
2583 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2584 {
2585 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2586 	    vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2587 	    vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2588 		return false;
2589 
2590 	return true;
2591 }
2592 
2593 /**
2594  * i40e_vc_enable_queues_msg
2595  * @vf: pointer to the VF info
2596  * @msg: pointer to the msg buffer
2597  *
2598  * called from the VF to enable all or specific queue(s)
2599  **/
2600 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2601 {
2602 	struct virtchnl_queue_select *vqs =
2603 	    (struct virtchnl_queue_select *)msg;
2604 	struct i40e_pf *pf = vf->pf;
2605 	int aq_ret = 0;
2606 	int i;
2607 
2608 	if (vf->is_disabled_from_host) {
2609 		aq_ret = -EPERM;
2610 		dev_info(&pf->pdev->dev,
2611 			 "Admin has disabled VF %d, will not enable queues\n",
2612 			 vf->vf_id);
2613 		goto error_param;
2614 	}
2615 
2616 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2617 		aq_ret = -EINVAL;
2618 		goto error_param;
2619 	}
2620 
2621 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2622 		aq_ret = -EINVAL;
2623 		goto error_param;
2624 	}
2625 
2626 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2627 		aq_ret = -EINVAL;
2628 		goto error_param;
2629 	}
2630 
2631 	/* Use the queue bit map sent by the VF */
2632 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2633 				  true)) {
2634 		aq_ret = -EIO;
2635 		goto error_param;
2636 	}
2637 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2638 				  true)) {
2639 		aq_ret = -EIO;
2640 		goto error_param;
2641 	}
2642 
2643 	/* need to start the rings for additional ADq VSI's as well */
2644 	if (vf->adq_enabled) {
2645 		/* zero belongs to LAN VSI */
2646 		for (i = 1; i < vf->num_tc; i++) {
2647 			if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2648 				aq_ret = -EIO;
2649 		}
2650 	}
2651 
2652 error_param:
2653 	/* send the response to the VF */
2654 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2655 				       aq_ret);
2656 }
2657 
2658 /**
2659  * i40e_vc_disable_queues_msg
2660  * @vf: pointer to the VF info
2661  * @msg: pointer to the msg buffer
2662  *
2663  * called from the VF to disable all or specific
2664  * queue(s)
2665  **/
2666 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2667 {
2668 	struct virtchnl_queue_select *vqs =
2669 	    (struct virtchnl_queue_select *)msg;
2670 	struct i40e_pf *pf = vf->pf;
2671 	int aq_ret = 0;
2672 
2673 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2674 		aq_ret = -EINVAL;
2675 		goto error_param;
2676 	}
2677 
2678 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2679 		aq_ret = -EINVAL;
2680 		goto error_param;
2681 	}
2682 
2683 	if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2684 		aq_ret = -EINVAL;
2685 		goto error_param;
2686 	}
2687 
2688 	/* Use the queue bit map sent by the VF */
2689 	if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2690 				  false)) {
2691 		aq_ret = -EIO;
2692 		goto error_param;
2693 	}
2694 	if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2695 				  false)) {
2696 		aq_ret = -EIO;
2697 		goto error_param;
2698 	}
2699 error_param:
2700 	/* send the response to the VF */
2701 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2702 				       aq_ret);
2703 }
2704 
2705 /**
2706  * i40e_check_enough_queue - find big enough queue number
2707  * @vf: pointer to the VF info
2708  * @needed: the number of items needed
2709  *
2710  * Returns the base item index of the queue, or negative for error
2711  **/
2712 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2713 {
2714 	unsigned int  i, cur_queues, more, pool_size;
2715 	struct i40e_lump_tracking *pile;
2716 	struct i40e_pf *pf = vf->pf;
2717 	struct i40e_vsi *vsi;
2718 
2719 	vsi = pf->vsi[vf->lan_vsi_idx];
2720 	cur_queues = vsi->alloc_queue_pairs;
2721 
2722 	/* if current allocated queues are enough for need */
2723 	if (cur_queues >= needed)
2724 		return vsi->base_queue;
2725 
2726 	pile = pf->qp_pile;
2727 	if (cur_queues > 0) {
2728 		/* if the allocated queues are not zero
2729 		 * just check if there are enough queues for more
2730 		 * behind the allocated queues.
2731 		 */
2732 		more = needed - cur_queues;
2733 		for (i = vsi->base_queue + cur_queues;
2734 			i < pile->num_entries; i++) {
2735 			if (pile->list[i] & I40E_PILE_VALID_BIT)
2736 				break;
2737 
2738 			if (more-- == 1)
2739 				/* there is enough */
2740 				return vsi->base_queue;
2741 		}
2742 	}
2743 
2744 	pool_size = 0;
2745 	for (i = 0; i < pile->num_entries; i++) {
2746 		if (pile->list[i] & I40E_PILE_VALID_BIT) {
2747 			pool_size = 0;
2748 			continue;
2749 		}
2750 		if (needed <= ++pool_size)
2751 			/* there is enough */
2752 			return i;
2753 	}
2754 
2755 	return -ENOMEM;
2756 }
2757 
2758 /**
2759  * i40e_vc_request_queues_msg
2760  * @vf: pointer to the VF info
2761  * @msg: pointer to the msg buffer
2762  *
2763  * VFs get a default number of queues but can use this message to request a
2764  * different number.  If the request is successful, PF will reset the VF and
2765  * return 0.  If unsuccessful, PF will send message informing VF of number of
2766  * available queues and return result of sending VF a message.
2767  **/
2768 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2769 {
2770 	struct virtchnl_vf_res_request *vfres =
2771 		(struct virtchnl_vf_res_request *)msg;
2772 	u16 req_pairs = vfres->num_queue_pairs;
2773 	u8 cur_pairs = vf->num_queue_pairs;
2774 	struct i40e_pf *pf = vf->pf;
2775 
2776 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2777 		return -EINVAL;
2778 
2779 	if (req_pairs > I40E_MAX_VF_QUEUES) {
2780 		dev_err(&pf->pdev->dev,
2781 			"VF %d tried to request more than %d queues.\n",
2782 			vf->vf_id,
2783 			I40E_MAX_VF_QUEUES);
2784 		vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2785 	} else if (req_pairs - cur_pairs > pf->queues_left) {
2786 		dev_warn(&pf->pdev->dev,
2787 			 "VF %d requested %d more queues, but only %d left.\n",
2788 			 vf->vf_id,
2789 			 req_pairs - cur_pairs,
2790 			 pf->queues_left);
2791 		vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2792 	} else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2793 		dev_warn(&pf->pdev->dev,
2794 			 "VF %d requested %d more queues, but there is not enough for it.\n",
2795 			 vf->vf_id,
2796 			 req_pairs - cur_pairs);
2797 		vfres->num_queue_pairs = cur_pairs;
2798 	} else {
2799 		/* successful request */
2800 		vf->num_req_queues = req_pairs;
2801 		i40e_vc_reset_vf(vf, true);
2802 		return 0;
2803 	}
2804 
2805 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2806 				      (u8 *)vfres, sizeof(*vfres));
2807 }
2808 
2809 /**
2810  * i40e_vc_get_stats_msg
2811  * @vf: pointer to the VF info
2812  * @msg: pointer to the msg buffer
2813  *
2814  * called from the VF to get vsi stats
2815  **/
2816 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2817 {
2818 	struct virtchnl_queue_select *vqs =
2819 	    (struct virtchnl_queue_select *)msg;
2820 	struct i40e_pf *pf = vf->pf;
2821 	struct i40e_eth_stats stats;
2822 	int aq_ret = 0;
2823 	struct i40e_vsi *vsi;
2824 
2825 	memset(&stats, 0, sizeof(struct i40e_eth_stats));
2826 
2827 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2828 		aq_ret = -EINVAL;
2829 		goto error_param;
2830 	}
2831 
2832 	if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2833 		aq_ret = -EINVAL;
2834 		goto error_param;
2835 	}
2836 
2837 	vsi = pf->vsi[vf->lan_vsi_idx];
2838 	if (!vsi) {
2839 		aq_ret = -EINVAL;
2840 		goto error_param;
2841 	}
2842 	i40e_update_eth_stats(vsi);
2843 	stats = vsi->eth_stats;
2844 
2845 error_param:
2846 	/* send the response back to the VF */
2847 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2848 				      (u8 *)&stats, sizeof(stats));
2849 }
2850 
2851 #define I40E_MAX_MACVLAN_PER_HW 3072
2852 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW /	\
2853 	(num_ports))
2854 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2855  * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2856  */
2857 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2858 #define I40E_VC_MAX_VLAN_PER_VF 16
2859 
2860 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports)		\
2861 ({	typeof(vf_num) vf_num_ = (vf_num);				\
2862 	typeof(num_ports) num_ports_ = (num_ports);			\
2863 	((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ *		\
2864 	I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) +			\
2865 	I40E_VC_MAX_MAC_ADDR_PER_VF; })
2866 /**
2867  * i40e_check_vf_permission
2868  * @vf: pointer to the VF info
2869  * @al: MAC address list from virtchnl
2870  *
2871  * Check that the given list of MAC addresses is allowed. Will return -EPERM
2872  * if any address in the list is not valid. Checks the following conditions:
2873  *
2874  * 1) broadcast and zero addresses are never valid
2875  * 2) unicast addresses are not allowed if the VMM has administratively set
2876  *    the VF MAC address, unless the VF is marked as privileged.
2877  * 3) There is enough space to add all the addresses.
2878  *
2879  * Note that to guarantee consistency, it is expected this function be called
2880  * while holding the mac_filter_hash_lock, as otherwise the current number of
2881  * addresses might not be accurate.
2882  **/
2883 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2884 					   struct virtchnl_ether_addr_list *al)
2885 {
2886 	struct i40e_pf *pf = vf->pf;
2887 	struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2888 	struct i40e_hw *hw = &pf->hw;
2889 	int mac2add_cnt = 0;
2890 	int i;
2891 
2892 	for (i = 0; i < al->num_elements; i++) {
2893 		struct i40e_mac_filter *f;
2894 		u8 *addr = al->list[i].addr;
2895 
2896 		if (is_broadcast_ether_addr(addr) ||
2897 		    is_zero_ether_addr(addr)) {
2898 			dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2899 				addr);
2900 			return -EINVAL;
2901 		}
2902 
2903 		/* If the host VMM administrator has set the VF MAC address
2904 		 * administratively via the ndo_set_vf_mac command then deny
2905 		 * permission to the VF to add or delete unicast MAC addresses.
2906 		 * Unless the VF is privileged and then it can do whatever.
2907 		 * The VF may request to set the MAC address filter already
2908 		 * assigned to it so do not return an error in that case.
2909 		 */
2910 		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2911 		    !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2912 		    !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2913 			dev_err(&pf->pdev->dev,
2914 				"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2915 			return -EPERM;
2916 		}
2917 
2918 		/*count filters that really will be added*/
2919 		f = i40e_find_mac(vsi, addr);
2920 		if (!f)
2921 			++mac2add_cnt;
2922 	}
2923 
2924 	/* If this VF is not privileged, then we can't add more than a limited
2925 	 * number of addresses. Check to make sure that the additions do not
2926 	 * push us over the limit.
2927 	 */
2928 	if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2929 		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2930 		    I40E_VC_MAX_MAC_ADDR_PER_VF) {
2931 			dev_err(&pf->pdev->dev,
2932 				"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2933 			return -EPERM;
2934 		}
2935 	/* If this VF is trusted, it can use more resources than untrusted.
2936 	 * However to ensure that every trusted VF has appropriate number of
2937 	 * resources, divide whole pool of resources per port and then across
2938 	 * all VFs.
2939 	 */
2940 	} else {
2941 		if ((i40e_count_filters(vsi) + mac2add_cnt) >
2942 		    I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
2943 						       hw->num_ports)) {
2944 			dev_err(&pf->pdev->dev,
2945 				"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2946 			return -EPERM;
2947 		}
2948 	}
2949 	return 0;
2950 }
2951 
2952 /**
2953  * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2954  * @vc_ether_addr: used to extract the type
2955  **/
2956 static u8
2957 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
2958 {
2959 	return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
2960 }
2961 
2962 /**
2963  * i40e_is_vc_addr_legacy
2964  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2965  *
2966  * check if the MAC address is from an older VF
2967  **/
2968 static bool
2969 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
2970 {
2971 	return i40e_vc_ether_addr_type(vc_ether_addr) ==
2972 		VIRTCHNL_ETHER_ADDR_LEGACY;
2973 }
2974 
2975 /**
2976  * i40e_is_vc_addr_primary
2977  * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2978  *
2979  * check if the MAC address is the VF's primary MAC
2980  * This function should only be called when the MAC address in
2981  * virtchnl_ether_addr is a valid unicast MAC
2982  **/
2983 static bool
2984 i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
2985 {
2986 	return i40e_vc_ether_addr_type(vc_ether_addr) ==
2987 		VIRTCHNL_ETHER_ADDR_PRIMARY;
2988 }
2989 
2990 /**
2991  * i40e_update_vf_mac_addr
2992  * @vf: VF to update
2993  * @vc_ether_addr: structure from VIRTCHNL with MAC to add
2994  *
2995  * update the VF's cached hardware MAC if allowed
2996  **/
2997 static void
2998 i40e_update_vf_mac_addr(struct i40e_vf *vf,
2999 			struct virtchnl_ether_addr *vc_ether_addr)
3000 {
3001 	u8 *mac_addr = vc_ether_addr->addr;
3002 
3003 	if (!is_valid_ether_addr(mac_addr))
3004 		return;
3005 
3006 	/* If request to add MAC filter is a primary request update its default
3007 	 * MAC address with the requested one. If it is a legacy request then
3008 	 * check if current default is empty if so update the default MAC
3009 	 */
3010 	if (i40e_is_vc_addr_primary(vc_ether_addr)) {
3011 		ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3012 	} else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
3013 		if (is_zero_ether_addr(vf->default_lan_addr.addr))
3014 			ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3015 	}
3016 }
3017 
3018 /**
3019  * i40e_vc_add_mac_addr_msg
3020  * @vf: pointer to the VF info
3021  * @msg: pointer to the msg buffer
3022  *
3023  * add guest mac address filter
3024  **/
3025 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3026 {
3027 	struct virtchnl_ether_addr_list *al =
3028 	    (struct virtchnl_ether_addr_list *)msg;
3029 	struct i40e_pf *pf = vf->pf;
3030 	struct i40e_vsi *vsi = NULL;
3031 	int ret = 0;
3032 	int i;
3033 
3034 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3035 	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3036 		ret = -EINVAL;
3037 		goto error_param;
3038 	}
3039 
3040 	vsi = pf->vsi[vf->lan_vsi_idx];
3041 
3042 	/* Lock once, because all function inside for loop accesses VSI's
3043 	 * MAC filter list which needs to be protected using same lock.
3044 	 */
3045 	spin_lock_bh(&vsi->mac_filter_hash_lock);
3046 
3047 	ret = i40e_check_vf_permission(vf, al);
3048 	if (ret) {
3049 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3050 		goto error_param;
3051 	}
3052 
3053 	/* add new addresses to the list */
3054 	for (i = 0; i < al->num_elements; i++) {
3055 		struct i40e_mac_filter *f;
3056 
3057 		f = i40e_find_mac(vsi, al->list[i].addr);
3058 		if (!f) {
3059 			f = i40e_add_mac_filter(vsi, al->list[i].addr);
3060 
3061 			if (!f) {
3062 				dev_err(&pf->pdev->dev,
3063 					"Unable to add MAC filter %pM for VF %d\n",
3064 					al->list[i].addr, vf->vf_id);
3065 				ret = -EINVAL;
3066 				spin_unlock_bh(&vsi->mac_filter_hash_lock);
3067 				goto error_param;
3068 			}
3069 		}
3070 		i40e_update_vf_mac_addr(vf, &al->list[i]);
3071 	}
3072 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3073 
3074 	/* program the updated filter list */
3075 	ret = i40e_sync_vsi_filters(vsi);
3076 	if (ret)
3077 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3078 			vf->vf_id, ret);
3079 
3080 error_param:
3081 	/* send the response to the VF */
3082 	return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3083 				      ret, NULL, 0);
3084 }
3085 
3086 /**
3087  * i40e_vc_del_mac_addr_msg
3088  * @vf: pointer to the VF info
3089  * @msg: pointer to the msg buffer
3090  *
3091  * remove guest mac address filter
3092  **/
3093 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3094 {
3095 	struct virtchnl_ether_addr_list *al =
3096 	    (struct virtchnl_ether_addr_list *)msg;
3097 	bool was_unimac_deleted = false;
3098 	struct i40e_pf *pf = vf->pf;
3099 	struct i40e_vsi *vsi = NULL;
3100 	int ret = 0;
3101 	int i;
3102 
3103 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3104 	    !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3105 		ret = -EINVAL;
3106 		goto error_param;
3107 	}
3108 
3109 	for (i = 0; i < al->num_elements; i++) {
3110 		if (is_broadcast_ether_addr(al->list[i].addr) ||
3111 		    is_zero_ether_addr(al->list[i].addr)) {
3112 			dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
3113 				al->list[i].addr, vf->vf_id);
3114 			ret = -EINVAL;
3115 			goto error_param;
3116 		}
3117 		if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
3118 			was_unimac_deleted = true;
3119 	}
3120 	vsi = pf->vsi[vf->lan_vsi_idx];
3121 
3122 	spin_lock_bh(&vsi->mac_filter_hash_lock);
3123 	/* delete addresses from the list */
3124 	for (i = 0; i < al->num_elements; i++)
3125 		if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3126 			ret = -EINVAL;
3127 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
3128 			goto error_param;
3129 		}
3130 
3131 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
3132 
3133 	if (was_unimac_deleted)
3134 		eth_zero_addr(vf->default_lan_addr.addr);
3135 
3136 	/* program the updated filter list */
3137 	ret = i40e_sync_vsi_filters(vsi);
3138 	if (ret)
3139 		dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3140 			vf->vf_id, ret);
3141 
3142 	if (vf->trusted && was_unimac_deleted) {
3143 		struct i40e_mac_filter *f;
3144 		struct hlist_node *h;
3145 		u8 *macaddr = NULL;
3146 		int bkt;
3147 
3148 		/* set last unicast mac address as default */
3149 		spin_lock_bh(&vsi->mac_filter_hash_lock);
3150 		hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3151 			if (is_valid_ether_addr(f->macaddr))
3152 				macaddr = f->macaddr;
3153 		}
3154 		if (macaddr)
3155 			ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3156 		spin_unlock_bh(&vsi->mac_filter_hash_lock);
3157 	}
3158 error_param:
3159 	/* send the response to the VF */
3160 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3161 }
3162 
3163 /**
3164  * i40e_vc_add_vlan_msg
3165  * @vf: pointer to the VF info
3166  * @msg: pointer to the msg buffer
3167  *
3168  * program guest vlan id
3169  **/
3170 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3171 {
3172 	struct virtchnl_vlan_filter_list *vfl =
3173 	    (struct virtchnl_vlan_filter_list *)msg;
3174 	struct i40e_pf *pf = vf->pf;
3175 	struct i40e_vsi *vsi = NULL;
3176 	int aq_ret = 0;
3177 	int i;
3178 
3179 	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3180 	    !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3181 		dev_err(&pf->pdev->dev,
3182 			"VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3183 		goto error_param;
3184 	}
3185 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3186 	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3187 		aq_ret = -EINVAL;
3188 		goto error_param;
3189 	}
3190 
3191 	for (i = 0; i < vfl->num_elements; i++) {
3192 		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3193 			aq_ret = -EINVAL;
3194 			dev_err(&pf->pdev->dev,
3195 				"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3196 			goto error_param;
3197 		}
3198 	}
3199 	vsi = pf->vsi[vf->lan_vsi_idx];
3200 	if (vsi->info.pvid) {
3201 		aq_ret = -EINVAL;
3202 		goto error_param;
3203 	}
3204 
3205 	i40e_vlan_stripping_enable(vsi);
3206 	for (i = 0; i < vfl->num_elements; i++) {
3207 		/* add new VLAN filter */
3208 		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3209 		if (!ret)
3210 			vf->num_vlan++;
3211 
3212 		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3213 			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3214 							   true,
3215 							   vfl->vlan_id[i],
3216 							   NULL);
3217 		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3218 			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3219 							   true,
3220 							   vfl->vlan_id[i],
3221 							   NULL);
3222 
3223 		if (ret)
3224 			dev_err(&pf->pdev->dev,
3225 				"Unable to add VLAN filter %d for VF %d, error %d\n",
3226 				vfl->vlan_id[i], vf->vf_id, ret);
3227 	}
3228 
3229 error_param:
3230 	/* send the response to the VF */
3231 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3232 }
3233 
3234 /**
3235  * i40e_vc_remove_vlan_msg
3236  * @vf: pointer to the VF info
3237  * @msg: pointer to the msg buffer
3238  *
3239  * remove programmed guest vlan id
3240  **/
3241 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3242 {
3243 	struct virtchnl_vlan_filter_list *vfl =
3244 	    (struct virtchnl_vlan_filter_list *)msg;
3245 	struct i40e_pf *pf = vf->pf;
3246 	struct i40e_vsi *vsi = NULL;
3247 	int aq_ret = 0;
3248 	int i;
3249 
3250 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3251 	    !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3252 		aq_ret = -EINVAL;
3253 		goto error_param;
3254 	}
3255 
3256 	for (i = 0; i < vfl->num_elements; i++) {
3257 		if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3258 			aq_ret = -EINVAL;
3259 			goto error_param;
3260 		}
3261 	}
3262 
3263 	vsi = pf->vsi[vf->lan_vsi_idx];
3264 	if (vsi->info.pvid) {
3265 		if (vfl->num_elements > 1 || vfl->vlan_id[0])
3266 			aq_ret = -EINVAL;
3267 		goto error_param;
3268 	}
3269 
3270 	for (i = 0; i < vfl->num_elements; i++) {
3271 		i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3272 		vf->num_vlan--;
3273 
3274 		if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3275 			i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3276 							   false,
3277 							   vfl->vlan_id[i],
3278 							   NULL);
3279 		if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3280 			i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3281 							   false,
3282 							   vfl->vlan_id[i],
3283 							   NULL);
3284 	}
3285 
3286 error_param:
3287 	/* send the response to the VF */
3288 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3289 }
3290 
3291 /**
3292  * i40e_vc_rdma_msg
3293  * @vf: pointer to the VF info
3294  * @msg: pointer to the msg buffer
3295  * @msglen: msg length
3296  *
3297  * called from the VF for the iwarp msgs
3298  **/
3299 static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3300 {
3301 	struct i40e_pf *pf = vf->pf;
3302 	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3303 	int aq_ret = 0;
3304 
3305 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3306 	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3307 		aq_ret = -EINVAL;
3308 		goto error_param;
3309 	}
3310 
3311 	i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3312 				     msg, msglen);
3313 
3314 error_param:
3315 	/* send the response to the VF */
3316 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
3317 				       aq_ret);
3318 }
3319 
3320 /**
3321  * i40e_vc_rdma_qvmap_msg
3322  * @vf: pointer to the VF info
3323  * @msg: pointer to the msg buffer
3324  * @config: config qvmap or release it
3325  *
3326  * called from the VF for the iwarp msgs
3327  **/
3328 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3329 {
3330 	struct virtchnl_rdma_qvlist_info *qvlist_info =
3331 				(struct virtchnl_rdma_qvlist_info *)msg;
3332 	int aq_ret = 0;
3333 
3334 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3335 	    !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3336 		aq_ret = -EINVAL;
3337 		goto error_param;
3338 	}
3339 
3340 	if (config) {
3341 		if (i40e_config_rdma_qvlist(vf, qvlist_info))
3342 			aq_ret = -EINVAL;
3343 	} else {
3344 		i40e_release_rdma_qvlist(vf);
3345 	}
3346 
3347 error_param:
3348 	/* send the response to the VF */
3349 	return i40e_vc_send_resp_to_vf(vf,
3350 			       config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
3351 			       VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
3352 			       aq_ret);
3353 }
3354 
3355 /**
3356  * i40e_vc_config_rss_key
3357  * @vf: pointer to the VF info
3358  * @msg: pointer to the msg buffer
3359  *
3360  * Configure the VF's RSS key
3361  **/
3362 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3363 {
3364 	struct virtchnl_rss_key *vrk =
3365 		(struct virtchnl_rss_key *)msg;
3366 	struct i40e_pf *pf = vf->pf;
3367 	struct i40e_vsi *vsi = NULL;
3368 	int aq_ret = 0;
3369 
3370 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3371 	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3372 	    vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3373 		aq_ret = -EINVAL;
3374 		goto err;
3375 	}
3376 
3377 	vsi = pf->vsi[vf->lan_vsi_idx];
3378 	aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3379 err:
3380 	/* send the response to the VF */
3381 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3382 				       aq_ret);
3383 }
3384 
3385 /**
3386  * i40e_vc_config_rss_lut
3387  * @vf: pointer to the VF info
3388  * @msg: pointer to the msg buffer
3389  *
3390  * Configure the VF's RSS LUT
3391  **/
3392 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3393 {
3394 	struct virtchnl_rss_lut *vrl =
3395 		(struct virtchnl_rss_lut *)msg;
3396 	struct i40e_pf *pf = vf->pf;
3397 	struct i40e_vsi *vsi = NULL;
3398 	int aq_ret = 0;
3399 	u16 i;
3400 
3401 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3402 	    !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3403 	    vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3404 		aq_ret = -EINVAL;
3405 		goto err;
3406 	}
3407 
3408 	for (i = 0; i < vrl->lut_entries; i++)
3409 		if (vrl->lut[i] >= vf->num_queue_pairs) {
3410 			aq_ret = -EINVAL;
3411 			goto err;
3412 		}
3413 
3414 	vsi = pf->vsi[vf->lan_vsi_idx];
3415 	aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3416 	/* send the response to the VF */
3417 err:
3418 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3419 				       aq_ret);
3420 }
3421 
3422 /**
3423  * i40e_vc_get_rss_hena
3424  * @vf: pointer to the VF info
3425  * @msg: pointer to the msg buffer
3426  *
3427  * Return the RSS HENA bits allowed by the hardware
3428  **/
3429 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3430 {
3431 	struct virtchnl_rss_hena *vrh = NULL;
3432 	struct i40e_pf *pf = vf->pf;
3433 	int aq_ret = 0;
3434 	int len = 0;
3435 
3436 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3437 		aq_ret = -EINVAL;
3438 		goto err;
3439 	}
3440 	len = sizeof(struct virtchnl_rss_hena);
3441 
3442 	vrh = kzalloc(len, GFP_KERNEL);
3443 	if (!vrh) {
3444 		aq_ret = -ENOMEM;
3445 		len = 0;
3446 		goto err;
3447 	}
3448 	vrh->hena = i40e_pf_get_default_rss_hena(pf);
3449 err:
3450 	/* send the response back to the VF */
3451 	aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3452 					aq_ret, (u8 *)vrh, len);
3453 	kfree(vrh);
3454 	return aq_ret;
3455 }
3456 
3457 /**
3458  * i40e_vc_set_rss_hena
3459  * @vf: pointer to the VF info
3460  * @msg: pointer to the msg buffer
3461  *
3462  * Set the RSS HENA bits for the VF
3463  **/
3464 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3465 {
3466 	struct virtchnl_rss_hena *vrh =
3467 		(struct virtchnl_rss_hena *)msg;
3468 	struct i40e_pf *pf = vf->pf;
3469 	struct i40e_hw *hw = &pf->hw;
3470 	int aq_ret = 0;
3471 
3472 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3473 		aq_ret = -EINVAL;
3474 		goto err;
3475 	}
3476 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3477 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3478 			  (u32)(vrh->hena >> 32));
3479 
3480 	/* send the response to the VF */
3481 err:
3482 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3483 }
3484 
3485 /**
3486  * i40e_vc_enable_vlan_stripping
3487  * @vf: pointer to the VF info
3488  * @msg: pointer to the msg buffer
3489  *
3490  * Enable vlan header stripping for the VF
3491  **/
3492 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3493 {
3494 	struct i40e_vsi *vsi;
3495 	int aq_ret = 0;
3496 
3497 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3498 		aq_ret = -EINVAL;
3499 		goto err;
3500 	}
3501 
3502 	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3503 	i40e_vlan_stripping_enable(vsi);
3504 
3505 	/* send the response to the VF */
3506 err:
3507 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3508 				       aq_ret);
3509 }
3510 
3511 /**
3512  * i40e_vc_disable_vlan_stripping
3513  * @vf: pointer to the VF info
3514  * @msg: pointer to the msg buffer
3515  *
3516  * Disable vlan header stripping for the VF
3517  **/
3518 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3519 {
3520 	struct i40e_vsi *vsi;
3521 	int aq_ret = 0;
3522 
3523 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3524 		aq_ret = -EINVAL;
3525 		goto err;
3526 	}
3527 
3528 	vsi = vf->pf->vsi[vf->lan_vsi_idx];
3529 	i40e_vlan_stripping_disable(vsi);
3530 
3531 	/* send the response to the VF */
3532 err:
3533 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3534 				       aq_ret);
3535 }
3536 
3537 /**
3538  * i40e_validate_cloud_filter
3539  * @vf: pointer to VF structure
3540  * @tc_filter: pointer to filter requested
3541  *
3542  * This function validates cloud filter programmed as TC filter for ADq
3543  **/
3544 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3545 				      struct virtchnl_filter *tc_filter)
3546 {
3547 	struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3548 	struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3549 	struct i40e_pf *pf = vf->pf;
3550 	struct i40e_vsi *vsi = NULL;
3551 	struct i40e_mac_filter *f;
3552 	struct hlist_node *h;
3553 	bool found = false;
3554 	int bkt;
3555 
3556 	if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
3557 		dev_info(&pf->pdev->dev,
3558 			 "VF %d: ADQ doesn't support this action (%d)\n",
3559 			 vf->vf_id, tc_filter->action);
3560 		goto err;
3561 	}
3562 
3563 	/* action_meta is TC number here to which the filter is applied */
3564 	if (!tc_filter->action_meta ||
3565 	    tc_filter->action_meta > vf->num_tc) {
3566 		dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3567 			 vf->vf_id, tc_filter->action_meta);
3568 		goto err;
3569 	}
3570 
3571 	/* Check filter if it's programmed for advanced mode or basic mode.
3572 	 * There are two ADq modes (for VF only),
3573 	 * 1. Basic mode: intended to allow as many filter options as possible
3574 	 *		  to be added to a VF in Non-trusted mode. Main goal is
3575 	 *		  to add filters to its own MAC and VLAN id.
3576 	 * 2. Advanced mode: is for allowing filters to be applied other than
3577 	 *		  its own MAC or VLAN. This mode requires the VF to be
3578 	 *		  Trusted.
3579 	 */
3580 	if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3581 		vsi = pf->vsi[vf->lan_vsi_idx];
3582 		f = i40e_find_mac(vsi, data.dst_mac);
3583 
3584 		if (!f) {
3585 			dev_info(&pf->pdev->dev,
3586 				 "Destination MAC %pM doesn't belong to VF %d\n",
3587 				 data.dst_mac, vf->vf_id);
3588 			goto err;
3589 		}
3590 
3591 		if (mask.vlan_id) {
3592 			hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3593 					   hlist) {
3594 				if (f->vlan == ntohs(data.vlan_id)) {
3595 					found = true;
3596 					break;
3597 				}
3598 			}
3599 			if (!found) {
3600 				dev_info(&pf->pdev->dev,
3601 					 "VF %d doesn't have any VLAN id %u\n",
3602 					 vf->vf_id, ntohs(data.vlan_id));
3603 				goto err;
3604 			}
3605 		}
3606 	} else {
3607 		/* Check if VF is trusted */
3608 		if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3609 			dev_err(&pf->pdev->dev,
3610 				"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3611 				vf->vf_id);
3612 			return -EIO;
3613 		}
3614 	}
3615 
3616 	if (mask.dst_mac[0] & data.dst_mac[0]) {
3617 		if (is_broadcast_ether_addr(data.dst_mac) ||
3618 		    is_zero_ether_addr(data.dst_mac)) {
3619 			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3620 				 vf->vf_id, data.dst_mac);
3621 			goto err;
3622 		}
3623 	}
3624 
3625 	if (mask.src_mac[0] & data.src_mac[0]) {
3626 		if (is_broadcast_ether_addr(data.src_mac) ||
3627 		    is_zero_ether_addr(data.src_mac)) {
3628 			dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3629 				 vf->vf_id, data.src_mac);
3630 			goto err;
3631 		}
3632 	}
3633 
3634 	if (mask.dst_port & data.dst_port) {
3635 		if (!data.dst_port) {
3636 			dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3637 				 vf->vf_id);
3638 			goto err;
3639 		}
3640 	}
3641 
3642 	if (mask.src_port & data.src_port) {
3643 		if (!data.src_port) {
3644 			dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3645 				 vf->vf_id);
3646 			goto err;
3647 		}
3648 	}
3649 
3650 	if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3651 	    tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3652 		dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3653 			 vf->vf_id);
3654 		goto err;
3655 	}
3656 
3657 	if (mask.vlan_id & data.vlan_id) {
3658 		if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3659 			dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3660 				 vf->vf_id);
3661 			goto err;
3662 		}
3663 	}
3664 
3665 	return 0;
3666 err:
3667 	return -EIO;
3668 }
3669 
3670 /**
3671  * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3672  * @vf: pointer to the VF info
3673  * @seid: seid of the vsi it is searching for
3674  **/
3675 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3676 {
3677 	struct i40e_pf *pf = vf->pf;
3678 	struct i40e_vsi *vsi = NULL;
3679 	int i;
3680 
3681 	for (i = 0; i < vf->num_tc ; i++) {
3682 		vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3683 		if (vsi && vsi->seid == seid)
3684 			return vsi;
3685 	}
3686 	return NULL;
3687 }
3688 
3689 /**
3690  * i40e_del_all_cloud_filters
3691  * @vf: pointer to the VF info
3692  *
3693  * This function deletes all cloud filters
3694  **/
3695 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3696 {
3697 	struct i40e_cloud_filter *cfilter = NULL;
3698 	struct i40e_pf *pf = vf->pf;
3699 	struct i40e_vsi *vsi = NULL;
3700 	struct hlist_node *node;
3701 	int ret;
3702 
3703 	hlist_for_each_entry_safe(cfilter, node,
3704 				  &vf->cloud_filter_list, cloud_node) {
3705 		vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3706 
3707 		if (!vsi) {
3708 			dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3709 				vf->vf_id, cfilter->seid);
3710 			continue;
3711 		}
3712 
3713 		if (cfilter->dst_port)
3714 			ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3715 								false);
3716 		else
3717 			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3718 		if (ret)
3719 			dev_err(&pf->pdev->dev,
3720 				"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3721 				vf->vf_id, ERR_PTR(ret),
3722 				i40e_aq_str(&pf->hw,
3723 					    pf->hw.aq.asq_last_status));
3724 
3725 		hlist_del(&cfilter->cloud_node);
3726 		kfree(cfilter);
3727 		vf->num_cloud_filters--;
3728 	}
3729 }
3730 
3731 /**
3732  * i40e_vc_del_cloud_filter
3733  * @vf: pointer to the VF info
3734  * @msg: pointer to the msg buffer
3735  *
3736  * This function deletes a cloud filter programmed as TC filter for ADq
3737  **/
3738 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3739 {
3740 	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3741 	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3742 	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3743 	struct i40e_cloud_filter cfilter, *cf = NULL;
3744 	struct i40e_pf *pf = vf->pf;
3745 	struct i40e_vsi *vsi = NULL;
3746 	struct hlist_node *node;
3747 	int aq_ret = 0;
3748 	int i, ret;
3749 
3750 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3751 		aq_ret = -EINVAL;
3752 		goto err;
3753 	}
3754 
3755 	if (!vf->adq_enabled) {
3756 		dev_info(&pf->pdev->dev,
3757 			 "VF %d: ADq not enabled, can't apply cloud filter\n",
3758 			 vf->vf_id);
3759 		aq_ret = -EINVAL;
3760 		goto err;
3761 	}
3762 
3763 	if (i40e_validate_cloud_filter(vf, vcf)) {
3764 		dev_info(&pf->pdev->dev,
3765 			 "VF %d: Invalid input, can't apply cloud filter\n",
3766 			 vf->vf_id);
3767 		aq_ret = -EINVAL;
3768 		goto err;
3769 	}
3770 
3771 	memset(&cfilter, 0, sizeof(cfilter));
3772 	/* parse destination mac address */
3773 	for (i = 0; i < ETH_ALEN; i++)
3774 		cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3775 
3776 	/* parse source mac address */
3777 	for (i = 0; i < ETH_ALEN; i++)
3778 		cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3779 
3780 	cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3781 	cfilter.dst_port = mask.dst_port & tcf.dst_port;
3782 	cfilter.src_port = mask.src_port & tcf.src_port;
3783 
3784 	switch (vcf->flow_type) {
3785 	case VIRTCHNL_TCP_V4_FLOW:
3786 		cfilter.n_proto = ETH_P_IP;
3787 		if (mask.dst_ip[0] & tcf.dst_ip[0])
3788 			memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3789 			       ARRAY_SIZE(tcf.dst_ip));
3790 		else if (mask.src_ip[0] & tcf.dst_ip[0])
3791 			memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3792 			       ARRAY_SIZE(tcf.dst_ip));
3793 		break;
3794 	case VIRTCHNL_TCP_V6_FLOW:
3795 		cfilter.n_proto = ETH_P_IPV6;
3796 		if (mask.dst_ip[3] & tcf.dst_ip[3])
3797 			memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3798 			       sizeof(cfilter.ip.v6.dst_ip6));
3799 		if (mask.src_ip[3] & tcf.src_ip[3])
3800 			memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3801 			       sizeof(cfilter.ip.v6.src_ip6));
3802 		break;
3803 	default:
3804 		/* TC filter can be configured based on different combinations
3805 		 * and in this case IP is not a part of filter config
3806 		 */
3807 		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3808 			 vf->vf_id);
3809 	}
3810 
3811 	/* get the vsi to which the tc belongs to */
3812 	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3813 	cfilter.seid = vsi->seid;
3814 	cfilter.flags = vcf->field_flags;
3815 
3816 	/* Deleting TC filter */
3817 	if (tcf.dst_port)
3818 		ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3819 	else
3820 		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3821 	if (ret) {
3822 		dev_err(&pf->pdev->dev,
3823 			"VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3824 			vf->vf_id, ERR_PTR(ret),
3825 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3826 		goto err;
3827 	}
3828 
3829 	hlist_for_each_entry_safe(cf, node,
3830 				  &vf->cloud_filter_list, cloud_node) {
3831 		if (cf->seid != cfilter.seid)
3832 			continue;
3833 		if (mask.dst_port)
3834 			if (cfilter.dst_port != cf->dst_port)
3835 				continue;
3836 		if (mask.dst_mac[0])
3837 			if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3838 				continue;
3839 		/* for ipv4 data to be valid, only first byte of mask is set */
3840 		if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3841 			if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3842 				   ARRAY_SIZE(tcf.dst_ip)))
3843 				continue;
3844 		/* for ipv6, mask is set for all sixteen bytes (4 words) */
3845 		if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3846 			if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3847 				   sizeof(cfilter.ip.v6.src_ip6)))
3848 				continue;
3849 		if (mask.vlan_id)
3850 			if (cfilter.vlan_id != cf->vlan_id)
3851 				continue;
3852 
3853 		hlist_del(&cf->cloud_node);
3854 		kfree(cf);
3855 		vf->num_cloud_filters--;
3856 	}
3857 
3858 err:
3859 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3860 				       aq_ret);
3861 }
3862 
3863 /**
3864  * i40e_vc_add_cloud_filter
3865  * @vf: pointer to the VF info
3866  * @msg: pointer to the msg buffer
3867  *
3868  * This function adds a cloud filter programmed as TC filter for ADq
3869  **/
3870 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3871 {
3872 	struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3873 	struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3874 	struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3875 	struct i40e_cloud_filter *cfilter = NULL;
3876 	struct i40e_pf *pf = vf->pf;
3877 	struct i40e_vsi *vsi = NULL;
3878 	int aq_ret = 0;
3879 	int i;
3880 
3881 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3882 		aq_ret = -EINVAL;
3883 		goto err_out;
3884 	}
3885 
3886 	if (!vf->adq_enabled) {
3887 		dev_info(&pf->pdev->dev,
3888 			 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3889 			 vf->vf_id);
3890 		aq_ret = -EINVAL;
3891 		goto err_out;
3892 	}
3893 
3894 	if (i40e_validate_cloud_filter(vf, vcf)) {
3895 		dev_info(&pf->pdev->dev,
3896 			 "VF %d: Invalid input/s, can't apply cloud filter\n",
3897 			 vf->vf_id);
3898 		aq_ret = -EINVAL;
3899 		goto err_out;
3900 	}
3901 
3902 	cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3903 	if (!cfilter) {
3904 		aq_ret = -ENOMEM;
3905 		goto err_out;
3906 	}
3907 
3908 	/* parse destination mac address */
3909 	for (i = 0; i < ETH_ALEN; i++)
3910 		cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3911 
3912 	/* parse source mac address */
3913 	for (i = 0; i < ETH_ALEN; i++)
3914 		cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3915 
3916 	cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3917 	cfilter->dst_port = mask.dst_port & tcf.dst_port;
3918 	cfilter->src_port = mask.src_port & tcf.src_port;
3919 
3920 	switch (vcf->flow_type) {
3921 	case VIRTCHNL_TCP_V4_FLOW:
3922 		cfilter->n_proto = ETH_P_IP;
3923 		if (mask.dst_ip[0] & tcf.dst_ip[0])
3924 			memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3925 			       ARRAY_SIZE(tcf.dst_ip));
3926 		else if (mask.src_ip[0] & tcf.dst_ip[0])
3927 			memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3928 			       ARRAY_SIZE(tcf.dst_ip));
3929 		break;
3930 	case VIRTCHNL_TCP_V6_FLOW:
3931 		cfilter->n_proto = ETH_P_IPV6;
3932 		if (mask.dst_ip[3] & tcf.dst_ip[3])
3933 			memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3934 			       sizeof(cfilter->ip.v6.dst_ip6));
3935 		if (mask.src_ip[3] & tcf.src_ip[3])
3936 			memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3937 			       sizeof(cfilter->ip.v6.src_ip6));
3938 		break;
3939 	default:
3940 		/* TC filter can be configured based on different combinations
3941 		 * and in this case IP is not a part of filter config
3942 		 */
3943 		dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3944 			 vf->vf_id);
3945 	}
3946 
3947 	/* get the VSI to which the TC belongs to */
3948 	vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3949 	cfilter->seid = vsi->seid;
3950 	cfilter->flags = vcf->field_flags;
3951 
3952 	/* Adding cloud filter programmed as TC filter */
3953 	if (tcf.dst_port)
3954 		aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3955 	else
3956 		aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3957 	if (aq_ret) {
3958 		dev_err(&pf->pdev->dev,
3959 			"VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
3960 			vf->vf_id, ERR_PTR(aq_ret),
3961 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3962 		goto err_free;
3963 	}
3964 
3965 	INIT_HLIST_NODE(&cfilter->cloud_node);
3966 	hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3967 	/* release the pointer passing it to the collection */
3968 	cfilter = NULL;
3969 	vf->num_cloud_filters++;
3970 err_free:
3971 	kfree(cfilter);
3972 err_out:
3973 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3974 				       aq_ret);
3975 }
3976 
3977 /**
3978  * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3979  * @vf: pointer to the VF info
3980  * @msg: pointer to the msg buffer
3981  **/
3982 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3983 {
3984 	struct virtchnl_tc_info *tci =
3985 		(struct virtchnl_tc_info *)msg;
3986 	struct i40e_pf *pf = vf->pf;
3987 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
3988 	int i, adq_request_qps = 0;
3989 	int aq_ret = 0;
3990 	u64 speed = 0;
3991 
3992 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3993 		aq_ret = -EINVAL;
3994 		goto err;
3995 	}
3996 
3997 	/* ADq cannot be applied if spoof check is ON */
3998 	if (vf->spoofchk) {
3999 		dev_err(&pf->pdev->dev,
4000 			"Spoof check is ON, turn it OFF to enable ADq\n");
4001 		aq_ret = -EINVAL;
4002 		goto err;
4003 	}
4004 
4005 	if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
4006 		dev_err(&pf->pdev->dev,
4007 			"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
4008 			vf->vf_id);
4009 		aq_ret = -EINVAL;
4010 		goto err;
4011 	}
4012 
4013 	/* max number of traffic classes for VF currently capped at 4 */
4014 	if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
4015 		dev_err(&pf->pdev->dev,
4016 			"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
4017 			vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
4018 		aq_ret = -EINVAL;
4019 		goto err;
4020 	}
4021 
4022 	/* validate queues for each TC */
4023 	for (i = 0; i < tci->num_tc; i++)
4024 		if (!tci->list[i].count ||
4025 		    tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
4026 			dev_err(&pf->pdev->dev,
4027 				"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
4028 				vf->vf_id, i, tci->list[i].count,
4029 				I40E_DEFAULT_QUEUES_PER_VF);
4030 			aq_ret = -EINVAL;
4031 			goto err;
4032 		}
4033 
4034 	/* need Max VF queues but already have default number of queues */
4035 	adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
4036 
4037 	if (pf->queues_left < adq_request_qps) {
4038 		dev_err(&pf->pdev->dev,
4039 			"No queues left to allocate to VF %d\n",
4040 			vf->vf_id);
4041 		aq_ret = -EINVAL;
4042 		goto err;
4043 	} else {
4044 		/* we need to allocate max VF queues to enable ADq so as to
4045 		 * make sure ADq enabled VF always gets back queues when it
4046 		 * goes through a reset.
4047 		 */
4048 		vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
4049 	}
4050 
4051 	/* get link speed in MB to validate rate limit */
4052 	speed = i40e_vc_link_speed2mbps(ls->link_speed);
4053 	if (speed == SPEED_UNKNOWN) {
4054 		dev_err(&pf->pdev->dev,
4055 			"Cannot detect link speed\n");
4056 		aq_ret = -EINVAL;
4057 		goto err;
4058 	}
4059 
4060 	/* parse data from the queue channel info */
4061 	vf->num_tc = tci->num_tc;
4062 	for (i = 0; i < vf->num_tc; i++) {
4063 		if (tci->list[i].max_tx_rate) {
4064 			if (tci->list[i].max_tx_rate > speed) {
4065 				dev_err(&pf->pdev->dev,
4066 					"Invalid max tx rate %llu specified for VF %d.",
4067 					tci->list[i].max_tx_rate,
4068 					vf->vf_id);
4069 				aq_ret = -EINVAL;
4070 				goto err;
4071 			} else {
4072 				vf->ch[i].max_tx_rate =
4073 					tci->list[i].max_tx_rate;
4074 			}
4075 		}
4076 		vf->ch[i].num_qps = tci->list[i].count;
4077 	}
4078 
4079 	/* set this flag only after making sure all inputs are sane */
4080 	vf->adq_enabled = true;
4081 
4082 	/* reset the VF in order to allocate resources */
4083 	i40e_vc_reset_vf(vf, true);
4084 
4085 	return 0;
4086 
4087 	/* send the response to the VF */
4088 err:
4089 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
4090 				       aq_ret);
4091 }
4092 
4093 /**
4094  * i40e_vc_del_qch_msg
4095  * @vf: pointer to the VF info
4096  * @msg: pointer to the msg buffer
4097  **/
4098 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
4099 {
4100 	struct i40e_pf *pf = vf->pf;
4101 	int aq_ret = 0;
4102 
4103 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4104 		aq_ret = -EINVAL;
4105 		goto err;
4106 	}
4107 
4108 	if (vf->adq_enabled) {
4109 		i40e_del_all_cloud_filters(vf);
4110 		i40e_del_qch(vf);
4111 		vf->adq_enabled = false;
4112 		vf->num_tc = 0;
4113 		dev_info(&pf->pdev->dev,
4114 			 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4115 			 vf->vf_id);
4116 	} else {
4117 		dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4118 			 vf->vf_id);
4119 		aq_ret = -EINVAL;
4120 	}
4121 
4122 	/* reset the VF in order to allocate resources */
4123 	i40e_vc_reset_vf(vf, true);
4124 
4125 	return 0;
4126 
4127 err:
4128 	return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4129 				       aq_ret);
4130 }
4131 
4132 /**
4133  * i40e_vc_process_vf_msg
4134  * @pf: pointer to the PF structure
4135  * @vf_id: source VF id
4136  * @v_opcode: operation code
4137  * @v_retval: unused return value code
4138  * @msg: pointer to the msg buffer
4139  * @msglen: msg length
4140  *
4141  * called from the common aeq/arq handler to
4142  * process request from VF
4143  **/
4144 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4145 			   u32 __always_unused v_retval, u8 *msg, u16 msglen)
4146 {
4147 	struct i40e_hw *hw = &pf->hw;
4148 	int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4149 	struct i40e_vf *vf;
4150 	int ret;
4151 
4152 	pf->vf_aq_requests++;
4153 	if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4154 		return -EINVAL;
4155 	vf = &(pf->vf[local_vf_id]);
4156 
4157 	/* Check if VF is disabled. */
4158 	if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4159 		return -EINVAL;
4160 
4161 	/* perform basic checks on the msg */
4162 	ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4163 
4164 	if (ret) {
4165 		i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
4166 		dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4167 			local_vf_id, v_opcode, msglen);
4168 		return ret;
4169 	}
4170 
4171 	switch (v_opcode) {
4172 	case VIRTCHNL_OP_VERSION:
4173 		ret = i40e_vc_get_version_msg(vf, msg);
4174 		break;
4175 	case VIRTCHNL_OP_GET_VF_RESOURCES:
4176 		ret = i40e_vc_get_vf_resources_msg(vf, msg);
4177 		i40e_vc_notify_vf_link_state(vf);
4178 		break;
4179 	case VIRTCHNL_OP_RESET_VF:
4180 		i40e_vc_reset_vf(vf, false);
4181 		ret = 0;
4182 		break;
4183 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4184 		ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4185 		break;
4186 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4187 		ret = i40e_vc_config_queues_msg(vf, msg);
4188 		break;
4189 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4190 		ret = i40e_vc_config_irq_map_msg(vf, msg);
4191 		break;
4192 	case VIRTCHNL_OP_ENABLE_QUEUES:
4193 		ret = i40e_vc_enable_queues_msg(vf, msg);
4194 		i40e_vc_notify_vf_link_state(vf);
4195 		break;
4196 	case VIRTCHNL_OP_DISABLE_QUEUES:
4197 		ret = i40e_vc_disable_queues_msg(vf, msg);
4198 		break;
4199 	case VIRTCHNL_OP_ADD_ETH_ADDR:
4200 		ret = i40e_vc_add_mac_addr_msg(vf, msg);
4201 		break;
4202 	case VIRTCHNL_OP_DEL_ETH_ADDR:
4203 		ret = i40e_vc_del_mac_addr_msg(vf, msg);
4204 		break;
4205 	case VIRTCHNL_OP_ADD_VLAN:
4206 		ret = i40e_vc_add_vlan_msg(vf, msg);
4207 		break;
4208 	case VIRTCHNL_OP_DEL_VLAN:
4209 		ret = i40e_vc_remove_vlan_msg(vf, msg);
4210 		break;
4211 	case VIRTCHNL_OP_GET_STATS:
4212 		ret = i40e_vc_get_stats_msg(vf, msg);
4213 		break;
4214 	case VIRTCHNL_OP_RDMA:
4215 		ret = i40e_vc_rdma_msg(vf, msg, msglen);
4216 		break;
4217 	case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
4218 		ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
4219 		break;
4220 	case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
4221 		ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
4222 		break;
4223 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
4224 		ret = i40e_vc_config_rss_key(vf, msg);
4225 		break;
4226 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
4227 		ret = i40e_vc_config_rss_lut(vf, msg);
4228 		break;
4229 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4230 		ret = i40e_vc_get_rss_hena(vf, msg);
4231 		break;
4232 	case VIRTCHNL_OP_SET_RSS_HENA:
4233 		ret = i40e_vc_set_rss_hena(vf, msg);
4234 		break;
4235 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4236 		ret = i40e_vc_enable_vlan_stripping(vf, msg);
4237 		break;
4238 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4239 		ret = i40e_vc_disable_vlan_stripping(vf, msg);
4240 		break;
4241 	case VIRTCHNL_OP_REQUEST_QUEUES:
4242 		ret = i40e_vc_request_queues_msg(vf, msg);
4243 		break;
4244 	case VIRTCHNL_OP_ENABLE_CHANNELS:
4245 		ret = i40e_vc_add_qch_msg(vf, msg);
4246 		break;
4247 	case VIRTCHNL_OP_DISABLE_CHANNELS:
4248 		ret = i40e_vc_del_qch_msg(vf, msg);
4249 		break;
4250 	case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4251 		ret = i40e_vc_add_cloud_filter(vf, msg);
4252 		break;
4253 	case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4254 		ret = i40e_vc_del_cloud_filter(vf, msg);
4255 		break;
4256 	case VIRTCHNL_OP_UNKNOWN:
4257 	default:
4258 		dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4259 			v_opcode, local_vf_id);
4260 		ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4261 					      -EOPNOTSUPP);
4262 		break;
4263 	}
4264 
4265 	return ret;
4266 }
4267 
4268 /**
4269  * i40e_vc_process_vflr_event
4270  * @pf: pointer to the PF structure
4271  *
4272  * called from the vlfr irq handler to
4273  * free up VF resources and state variables
4274  **/
4275 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4276 {
4277 	struct i40e_hw *hw = &pf->hw;
4278 	u32 reg, reg_idx, bit_idx;
4279 	struct i40e_vf *vf;
4280 	int vf_id;
4281 
4282 	if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4283 		return 0;
4284 
4285 	/* Re-enable the VFLR interrupt cause here, before looking for which
4286 	 * VF got reset. Otherwise, if another VF gets a reset while the
4287 	 * first one is being processed, that interrupt will be lost, and
4288 	 * that VF will be stuck in reset forever.
4289 	 */
4290 	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4291 	reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4292 	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4293 	i40e_flush(hw);
4294 
4295 	clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4296 	for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4297 		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4298 		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4299 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
4300 		vf = &pf->vf[vf_id];
4301 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4302 		if (reg & BIT(bit_idx))
4303 			/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4304 			i40e_reset_vf(vf, true);
4305 	}
4306 
4307 	return 0;
4308 }
4309 
4310 /**
4311  * i40e_validate_vf
4312  * @pf: the physical function
4313  * @vf_id: VF identifier
4314  *
4315  * Check that the VF is enabled and the VSI exists.
4316  *
4317  * Returns 0 on success, negative on failure
4318  **/
4319 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4320 {
4321 	struct i40e_vsi *vsi;
4322 	struct i40e_vf *vf;
4323 	int ret = 0;
4324 
4325 	if (vf_id >= pf->num_alloc_vfs) {
4326 		dev_err(&pf->pdev->dev,
4327 			"Invalid VF Identifier %d\n", vf_id);
4328 		ret = -EINVAL;
4329 		goto err_out;
4330 	}
4331 	vf = &pf->vf[vf_id];
4332 	vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4333 	if (!vsi)
4334 		ret = -EINVAL;
4335 err_out:
4336 	return ret;
4337 }
4338 
4339 /**
4340  * i40e_check_vf_init_timeout
4341  * @vf: the virtual function
4342  *
4343  * Check that the VF's initialization was successfully done and if not
4344  * wait up to 300ms for its finish.
4345  *
4346  * Returns true when VF is initialized, false on timeout
4347  **/
4348 static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
4349 {
4350 	int i;
4351 
4352 	/* When the VF is resetting wait until it is done.
4353 	 * It can take up to 200 milliseconds, but wait for
4354 	 * up to 300 milliseconds to be safe.
4355 	 */
4356 	for (i = 0; i < 15; i++) {
4357 		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4358 			return true;
4359 		msleep(20);
4360 	}
4361 
4362 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4363 		dev_err(&vf->pf->pdev->dev,
4364 			"VF %d still in reset. Try again.\n", vf->vf_id);
4365 		return false;
4366 	}
4367 
4368 	return true;
4369 }
4370 
4371 /**
4372  * i40e_ndo_set_vf_mac
4373  * @netdev: network interface device structure
4374  * @vf_id: VF identifier
4375  * @mac: mac address
4376  *
4377  * program VF mac address
4378  **/
4379 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4380 {
4381 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4382 	struct i40e_vsi *vsi = np->vsi;
4383 	struct i40e_pf *pf = vsi->back;
4384 	struct i40e_mac_filter *f;
4385 	struct i40e_vf *vf;
4386 	int ret = 0;
4387 	struct hlist_node *h;
4388 	int bkt;
4389 
4390 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4391 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4392 		return -EAGAIN;
4393 	}
4394 
4395 	/* validate the request */
4396 	ret = i40e_validate_vf(pf, vf_id);
4397 	if (ret)
4398 		goto error_param;
4399 
4400 	vf = &pf->vf[vf_id];
4401 	if (!i40e_check_vf_init_timeout(vf)) {
4402 		ret = -EAGAIN;
4403 		goto error_param;
4404 	}
4405 	vsi = pf->vsi[vf->lan_vsi_idx];
4406 
4407 	if (is_multicast_ether_addr(mac)) {
4408 		dev_err(&pf->pdev->dev,
4409 			"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4410 		ret = -EINVAL;
4411 		goto error_param;
4412 	}
4413 
4414 	/* Lock once because below invoked function add/del_filter requires
4415 	 * mac_filter_hash_lock to be held
4416 	 */
4417 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4418 
4419 	/* delete the temporary mac address */
4420 	if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4421 		i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4422 
4423 	/* Delete all the filters for this VSI - we're going to kill it
4424 	 * anyway.
4425 	 */
4426 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4427 		__i40e_del_filter(vsi, f);
4428 
4429 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4430 
4431 	/* program mac filter */
4432 	if (i40e_sync_vsi_filters(vsi)) {
4433 		dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4434 		ret = -EIO;
4435 		goto error_param;
4436 	}
4437 	ether_addr_copy(vf->default_lan_addr.addr, mac);
4438 
4439 	if (is_zero_ether_addr(mac)) {
4440 		vf->pf_set_mac = false;
4441 		dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4442 	} else {
4443 		vf->pf_set_mac = true;
4444 		dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4445 			 mac, vf_id);
4446 	}
4447 
4448 	/* Force the VF interface down so it has to bring up with new MAC
4449 	 * address
4450 	 */
4451 	i40e_vc_reset_vf(vf, true);
4452 	dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4453 
4454 error_param:
4455 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4456 	return ret;
4457 }
4458 
4459 /**
4460  * i40e_ndo_set_vf_port_vlan
4461  * @netdev: network interface device structure
4462  * @vf_id: VF identifier
4463  * @vlan_id: mac address
4464  * @qos: priority setting
4465  * @vlan_proto: vlan protocol
4466  *
4467  * program VF vlan id and/or qos
4468  **/
4469 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4470 			      u16 vlan_id, u8 qos, __be16 vlan_proto)
4471 {
4472 	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4473 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4474 	bool allmulti = false, alluni = false;
4475 	struct i40e_pf *pf = np->vsi->back;
4476 	struct i40e_vsi *vsi;
4477 	struct i40e_vf *vf;
4478 	int ret = 0;
4479 
4480 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4481 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4482 		return -EAGAIN;
4483 	}
4484 
4485 	/* validate the request */
4486 	ret = i40e_validate_vf(pf, vf_id);
4487 	if (ret)
4488 		goto error_pvid;
4489 
4490 	if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4491 		dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4492 		ret = -EINVAL;
4493 		goto error_pvid;
4494 	}
4495 
4496 	if (vlan_proto != htons(ETH_P_8021Q)) {
4497 		dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4498 		ret = -EPROTONOSUPPORT;
4499 		goto error_pvid;
4500 	}
4501 
4502 	vf = &pf->vf[vf_id];
4503 	if (!i40e_check_vf_init_timeout(vf)) {
4504 		ret = -EAGAIN;
4505 		goto error_pvid;
4506 	}
4507 	vsi = pf->vsi[vf->lan_vsi_idx];
4508 
4509 	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4510 		/* duplicate request, so just return success */
4511 		goto error_pvid;
4512 
4513 	i40e_vlan_stripping_enable(vsi);
4514 
4515 	/* Locked once because multiple functions below iterate list */
4516 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4517 
4518 	/* Check for condition where there was already a port VLAN ID
4519 	 * filter set and now it is being deleted by setting it to zero.
4520 	 * Additionally check for the condition where there was a port
4521 	 * VLAN but now there is a new and different port VLAN being set.
4522 	 * Before deleting all the old VLAN filters we must add new ones
4523 	 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4524 	 * MAC addresses deleted.
4525 	 */
4526 	if ((!(vlan_id || qos) ||
4527 	     vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4528 	    vsi->info.pvid) {
4529 		ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4530 		if (ret) {
4531 			dev_info(&vsi->back->pdev->dev,
4532 				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4533 				 vsi->back->hw.aq.asq_last_status);
4534 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4535 			goto error_pvid;
4536 		}
4537 	}
4538 
4539 	if (vsi->info.pvid) {
4540 		/* remove all filters on the old VLAN */
4541 		i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4542 					   VLAN_VID_MASK));
4543 	}
4544 
4545 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4546 
4547 	/* disable promisc modes in case they were enabled */
4548 	ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4549 					      allmulti, alluni);
4550 	if (ret) {
4551 		dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4552 		goto error_pvid;
4553 	}
4554 
4555 	if (vlan_id || qos)
4556 		ret = i40e_vsi_add_pvid(vsi, vlanprio);
4557 	else
4558 		i40e_vsi_remove_pvid(vsi);
4559 	spin_lock_bh(&vsi->mac_filter_hash_lock);
4560 
4561 	if (vlan_id) {
4562 		dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4563 			 vlan_id, qos, vf_id);
4564 
4565 		/* add new VLAN filter for each MAC */
4566 		ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4567 		if (ret) {
4568 			dev_info(&vsi->back->pdev->dev,
4569 				 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4570 				 vsi->back->hw.aq.asq_last_status);
4571 			spin_unlock_bh(&vsi->mac_filter_hash_lock);
4572 			goto error_pvid;
4573 		}
4574 
4575 		/* remove the previously added non-VLAN MAC filters */
4576 		i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4577 	}
4578 
4579 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
4580 
4581 	if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4582 		alluni = true;
4583 
4584 	if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4585 		allmulti = true;
4586 
4587 	/* Schedule the worker thread to take care of applying changes */
4588 	i40e_service_event_schedule(vsi->back);
4589 
4590 	if (ret) {
4591 		dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4592 		goto error_pvid;
4593 	}
4594 
4595 	/* The Port VLAN needs to be saved across resets the same as the
4596 	 * default LAN MAC address.
4597 	 */
4598 	vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4599 
4600 	i40e_vc_reset_vf(vf, true);
4601 	/* During reset the VF got a new VSI, so refresh a pointer. */
4602 	vsi = pf->vsi[vf->lan_vsi_idx];
4603 
4604 	ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4605 	if (ret) {
4606 		dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4607 		goto error_pvid;
4608 	}
4609 
4610 	ret = 0;
4611 
4612 error_pvid:
4613 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4614 	return ret;
4615 }
4616 
4617 /**
4618  * i40e_ndo_set_vf_bw
4619  * @netdev: network interface device structure
4620  * @vf_id: VF identifier
4621  * @min_tx_rate: Minimum Tx rate
4622  * @max_tx_rate: Maximum Tx rate
4623  *
4624  * configure VF Tx rate
4625  **/
4626 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4627 		       int max_tx_rate)
4628 {
4629 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4630 	struct i40e_pf *pf = np->vsi->back;
4631 	struct i40e_vsi *vsi;
4632 	struct i40e_vf *vf;
4633 	int ret = 0;
4634 
4635 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4636 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4637 		return -EAGAIN;
4638 	}
4639 
4640 	/* validate the request */
4641 	ret = i40e_validate_vf(pf, vf_id);
4642 	if (ret)
4643 		goto error;
4644 
4645 	if (min_tx_rate) {
4646 		dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4647 			min_tx_rate, vf_id);
4648 		ret = -EINVAL;
4649 		goto error;
4650 	}
4651 
4652 	vf = &pf->vf[vf_id];
4653 	if (!i40e_check_vf_init_timeout(vf)) {
4654 		ret = -EAGAIN;
4655 		goto error;
4656 	}
4657 	vsi = pf->vsi[vf->lan_vsi_idx];
4658 
4659 	ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4660 	if (ret)
4661 		goto error;
4662 
4663 	vf->tx_rate = max_tx_rate;
4664 error:
4665 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4666 	return ret;
4667 }
4668 
4669 /**
4670  * i40e_ndo_get_vf_config
4671  * @netdev: network interface device structure
4672  * @vf_id: VF identifier
4673  * @ivi: VF configuration structure
4674  *
4675  * return VF configuration
4676  **/
4677 int i40e_ndo_get_vf_config(struct net_device *netdev,
4678 			   int vf_id, struct ifla_vf_info *ivi)
4679 {
4680 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4681 	struct i40e_vsi *vsi = np->vsi;
4682 	struct i40e_pf *pf = vsi->back;
4683 	struct i40e_vf *vf;
4684 	int ret = 0;
4685 
4686 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4687 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4688 		return -EAGAIN;
4689 	}
4690 
4691 	/* validate the request */
4692 	ret = i40e_validate_vf(pf, vf_id);
4693 	if (ret)
4694 		goto error_param;
4695 
4696 	vf = &pf->vf[vf_id];
4697 	/* first vsi is always the LAN vsi */
4698 	vsi = pf->vsi[vf->lan_vsi_idx];
4699 	if (!vsi) {
4700 		ret = -ENOENT;
4701 		goto error_param;
4702 	}
4703 
4704 	ivi->vf = vf_id;
4705 
4706 	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4707 
4708 	ivi->max_tx_rate = vf->tx_rate;
4709 	ivi->min_tx_rate = 0;
4710 	ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK);
4711 	ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK);
4712 	if (vf->link_forced == false)
4713 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4714 	else if (vf->link_up == true)
4715 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4716 	else
4717 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4718 	ivi->spoofchk = vf->spoofchk;
4719 	ivi->trusted = vf->trusted;
4720 	ret = 0;
4721 
4722 error_param:
4723 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4724 	return ret;
4725 }
4726 
4727 /**
4728  * i40e_ndo_set_vf_link_state
4729  * @netdev: network interface device structure
4730  * @vf_id: VF identifier
4731  * @link: required link state
4732  *
4733  * Set the link state of a specified VF, regardless of physical link state
4734  **/
4735 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4736 {
4737 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4738 	struct i40e_pf *pf = np->vsi->back;
4739 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
4740 	struct virtchnl_pf_event pfe;
4741 	struct i40e_hw *hw = &pf->hw;
4742 	struct i40e_vsi *vsi;
4743 	unsigned long q_map;
4744 	struct i40e_vf *vf;
4745 	int abs_vf_id;
4746 	int ret = 0;
4747 	int tmp;
4748 
4749 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4750 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4751 		return -EAGAIN;
4752 	}
4753 
4754 	/* validate the request */
4755 	if (vf_id >= pf->num_alloc_vfs) {
4756 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4757 		ret = -EINVAL;
4758 		goto error_out;
4759 	}
4760 
4761 	vf = &pf->vf[vf_id];
4762 	abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4763 
4764 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4765 	pfe.severity = PF_EVENT_SEVERITY_INFO;
4766 
4767 	switch (link) {
4768 	case IFLA_VF_LINK_STATE_AUTO:
4769 		vf->link_forced = false;
4770 		vf->is_disabled_from_host = false;
4771 		/* reset needed to reinit VF resources */
4772 		i40e_vc_reset_vf(vf, true);
4773 		i40e_set_vf_link_state(vf, &pfe, ls);
4774 		break;
4775 	case IFLA_VF_LINK_STATE_ENABLE:
4776 		vf->link_forced = true;
4777 		vf->link_up = true;
4778 		vf->is_disabled_from_host = false;
4779 		/* reset needed to reinit VF resources */
4780 		i40e_vc_reset_vf(vf, true);
4781 		i40e_set_vf_link_state(vf, &pfe, ls);
4782 		break;
4783 	case IFLA_VF_LINK_STATE_DISABLE:
4784 		vf->link_forced = true;
4785 		vf->link_up = false;
4786 		i40e_set_vf_link_state(vf, &pfe, ls);
4787 
4788 		vsi = pf->vsi[vf->lan_vsi_idx];
4789 		q_map = BIT(vsi->num_queue_pairs) - 1;
4790 
4791 		vf->is_disabled_from_host = true;
4792 
4793 		/* Try to stop both Tx&Rx rings even if one of the calls fails
4794 		 * to ensure we stop the rings even in case of errors.
4795 		 * If any of them returns with an error then the first
4796 		 * error that occurred will be returned.
4797 		 */
4798 		tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
4799 		ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
4800 
4801 		ret = tmp ? tmp : ret;
4802 		break;
4803 	default:
4804 		ret = -EINVAL;
4805 		goto error_out;
4806 	}
4807 	/* Notify the VF of its new link state */
4808 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4809 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
4810 
4811 error_out:
4812 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4813 	return ret;
4814 }
4815 
4816 /**
4817  * i40e_ndo_set_vf_spoofchk
4818  * @netdev: network interface device structure
4819  * @vf_id: VF identifier
4820  * @enable: flag to enable or disable feature
4821  *
4822  * Enable or disable VF spoof checking
4823  **/
4824 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4825 {
4826 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4827 	struct i40e_vsi *vsi = np->vsi;
4828 	struct i40e_pf *pf = vsi->back;
4829 	struct i40e_vsi_context ctxt;
4830 	struct i40e_hw *hw = &pf->hw;
4831 	struct i40e_vf *vf;
4832 	int ret = 0;
4833 
4834 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4835 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4836 		return -EAGAIN;
4837 	}
4838 
4839 	/* validate the request */
4840 	if (vf_id >= pf->num_alloc_vfs) {
4841 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4842 		ret = -EINVAL;
4843 		goto out;
4844 	}
4845 
4846 	vf = &(pf->vf[vf_id]);
4847 	if (!i40e_check_vf_init_timeout(vf)) {
4848 		ret = -EAGAIN;
4849 		goto out;
4850 	}
4851 
4852 	if (enable == vf->spoofchk)
4853 		goto out;
4854 
4855 	vf->spoofchk = enable;
4856 	memset(&ctxt, 0, sizeof(ctxt));
4857 	ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4858 	ctxt.pf_num = pf->hw.pf_id;
4859 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4860 	if (enable)
4861 		ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4862 					I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4863 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4864 	if (ret) {
4865 		dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4866 			ret);
4867 		ret = -EIO;
4868 	}
4869 out:
4870 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4871 	return ret;
4872 }
4873 
4874 /**
4875  * i40e_ndo_set_vf_trust
4876  * @netdev: network interface device structure of the pf
4877  * @vf_id: VF identifier
4878  * @setting: trust setting
4879  *
4880  * Enable or disable VF trust setting
4881  **/
4882 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4883 {
4884 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4885 	struct i40e_pf *pf = np->vsi->back;
4886 	struct i40e_vf *vf;
4887 	int ret = 0;
4888 
4889 	if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4890 		dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4891 		return -EAGAIN;
4892 	}
4893 
4894 	/* validate the request */
4895 	if (vf_id >= pf->num_alloc_vfs) {
4896 		dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4897 		ret = -EINVAL;
4898 		goto out;
4899 	}
4900 
4901 	if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
4902 		dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4903 		ret = -EINVAL;
4904 		goto out;
4905 	}
4906 
4907 	vf = &pf->vf[vf_id];
4908 
4909 	if (setting == vf->trusted)
4910 		goto out;
4911 
4912 	vf->trusted = setting;
4913 
4914 	/* request PF to sync mac/vlan filters for the VF */
4915 	set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4916 	pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4917 
4918 	i40e_vc_reset_vf(vf, true);
4919 	dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4920 		 vf_id, setting ? "" : "un");
4921 
4922 	if (vf->adq_enabled) {
4923 		if (!vf->trusted) {
4924 			dev_info(&pf->pdev->dev,
4925 				 "VF %u no longer Trusted, deleting all cloud filters\n",
4926 				 vf_id);
4927 			i40e_del_all_cloud_filters(vf);
4928 		}
4929 	}
4930 
4931 out:
4932 	clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4933 	return ret;
4934 }
4935 
4936 /**
4937  * i40e_get_vf_stats - populate some stats for the VF
4938  * @netdev: the netdev of the PF
4939  * @vf_id: the host OS identifier (0-127)
4940  * @vf_stats: pointer to the OS memory to be initialized
4941  */
4942 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4943 		      struct ifla_vf_stats *vf_stats)
4944 {
4945 	struct i40e_netdev_priv *np = netdev_priv(netdev);
4946 	struct i40e_pf *pf = np->vsi->back;
4947 	struct i40e_eth_stats *stats;
4948 	struct i40e_vsi *vsi;
4949 	struct i40e_vf *vf;
4950 
4951 	/* validate the request */
4952 	if (i40e_validate_vf(pf, vf_id))
4953 		return -EINVAL;
4954 
4955 	vf = &pf->vf[vf_id];
4956 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4957 		dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4958 		return -EBUSY;
4959 	}
4960 
4961 	vsi = pf->vsi[vf->lan_vsi_idx];
4962 	if (!vsi)
4963 		return -EINVAL;
4964 
4965 	i40e_update_eth_stats(vsi);
4966 	stats = &vsi->eth_stats;
4967 
4968 	memset(vf_stats, 0, sizeof(*vf_stats));
4969 
4970 	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4971 		stats->rx_multicast;
4972 	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4973 		stats->tx_multicast;
4974 	vf_stats->rx_bytes   = stats->rx_bytes;
4975 	vf_stats->tx_bytes   = stats->tx_bytes;
4976 	vf_stats->broadcast  = stats->rx_broadcast;
4977 	vf_stats->multicast  = stats->rx_multicast;
4978 	vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
4979 	vf_stats->tx_dropped = stats->tx_discards;
4980 
4981 	return 0;
4982 }
4983