xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/eq.c (revision 975ef7ff81bb000af6e6c8e63e81f89f3468dcf7)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #ifdef CONFIG_RFS_ACCEL
38 #include <linux/cpu_rmap.h>
39 #endif
40 #include "mlx5_core.h"
41 #include "fpga/core.h"
42 #include "eswitch.h"
43 
44 enum {
45 	MLX5_EQE_SIZE		= sizeof(struct mlx5_eqe),
46 	MLX5_EQE_OWNER_INIT_VAL	= 0x1,
47 };
48 
49 enum {
50 	MLX5_EQ_STATE_ARMED		= 0x9,
51 	MLX5_EQ_STATE_FIRED		= 0xa,
52 	MLX5_EQ_STATE_ALWAYS_ARMED	= 0xb,
53 };
54 
55 enum {
56 	MLX5_NUM_SPARE_EQE	= 0x80,
57 	MLX5_NUM_ASYNC_EQE	= 0x1000,
58 	MLX5_NUM_CMD_EQE	= 32,
59 	MLX5_NUM_PF_DRAIN	= 64,
60 };
61 
62 enum {
63 	MLX5_EQ_DOORBEL_OFFSET	= 0x40,
64 };
65 
66 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)	    | \
67 			       (1ull << MLX5_EVENT_TYPE_COMM_EST)	    | \
68 			       (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)	    | \
69 			       (1ull << MLX5_EVENT_TYPE_CQ_ERROR)	    | \
70 			       (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
71 			       (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
72 			       (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
73 			       (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
74 			       (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)	    | \
75 			       (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
76 			       (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)	    | \
77 			       (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
78 
79 struct map_eq_in {
80 	u64	mask;
81 	u32	reserved;
82 	u32	unmap_eqn;
83 };
84 
85 struct cre_des_eq {
86 	u8	reserved[15];
87 	u8	eqn;
88 };
89 
90 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
91 {
92 	u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
93 	u32 in[MLX5_ST_SZ_DW(destroy_eq_in)]   = {0};
94 
95 	MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
96 	MLX5_SET(destroy_eq_in, in, eq_number, eqn);
97 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
98 }
99 
100 static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
101 {
102 	return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
103 }
104 
105 static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
106 {
107 	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
108 
109 	return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
110 }
111 
112 static const char *eqe_type_str(u8 type)
113 {
114 	switch (type) {
115 	case MLX5_EVENT_TYPE_COMP:
116 		return "MLX5_EVENT_TYPE_COMP";
117 	case MLX5_EVENT_TYPE_PATH_MIG:
118 		return "MLX5_EVENT_TYPE_PATH_MIG";
119 	case MLX5_EVENT_TYPE_COMM_EST:
120 		return "MLX5_EVENT_TYPE_COMM_EST";
121 	case MLX5_EVENT_TYPE_SQ_DRAINED:
122 		return "MLX5_EVENT_TYPE_SQ_DRAINED";
123 	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
124 		return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
125 	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
126 		return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
127 	case MLX5_EVENT_TYPE_CQ_ERROR:
128 		return "MLX5_EVENT_TYPE_CQ_ERROR";
129 	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
130 		return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
131 	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
132 		return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
133 	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
134 		return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
135 	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
136 		return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
137 	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
138 		return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
139 	case MLX5_EVENT_TYPE_INTERNAL_ERROR:
140 		return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
141 	case MLX5_EVENT_TYPE_PORT_CHANGE:
142 		return "MLX5_EVENT_TYPE_PORT_CHANGE";
143 	case MLX5_EVENT_TYPE_GPIO_EVENT:
144 		return "MLX5_EVENT_TYPE_GPIO_EVENT";
145 	case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
146 		return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
147 	case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
148 		return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
149 	case MLX5_EVENT_TYPE_REMOTE_CONFIG:
150 		return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
151 	case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
152 		return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
153 	case MLX5_EVENT_TYPE_STALL_EVENT:
154 		return "MLX5_EVENT_TYPE_STALL_EVENT";
155 	case MLX5_EVENT_TYPE_CMD:
156 		return "MLX5_EVENT_TYPE_CMD";
157 	case MLX5_EVENT_TYPE_PAGE_REQUEST:
158 		return "MLX5_EVENT_TYPE_PAGE_REQUEST";
159 	case MLX5_EVENT_TYPE_PAGE_FAULT:
160 		return "MLX5_EVENT_TYPE_PAGE_FAULT";
161 	case MLX5_EVENT_TYPE_PPS_EVENT:
162 		return "MLX5_EVENT_TYPE_PPS_EVENT";
163 	case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
164 		return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
165 	case MLX5_EVENT_TYPE_FPGA_ERROR:
166 		return "MLX5_EVENT_TYPE_FPGA_ERROR";
167 	case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
168 		return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
169 	case MLX5_EVENT_TYPE_GENERAL_EVENT:
170 		return "MLX5_EVENT_TYPE_GENERAL_EVENT";
171 	default:
172 		return "Unrecognized event";
173 	}
174 }
175 
176 static enum mlx5_dev_event port_subtype_event(u8 subtype)
177 {
178 	switch (subtype) {
179 	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
180 		return MLX5_DEV_EVENT_PORT_DOWN;
181 	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
182 		return MLX5_DEV_EVENT_PORT_UP;
183 	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
184 		return MLX5_DEV_EVENT_PORT_INITIALIZED;
185 	case MLX5_PORT_CHANGE_SUBTYPE_LID:
186 		return MLX5_DEV_EVENT_LID_CHANGE;
187 	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
188 		return MLX5_DEV_EVENT_PKEY_CHANGE;
189 	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
190 		return MLX5_DEV_EVENT_GUID_CHANGE;
191 	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
192 		return MLX5_DEV_EVENT_CLIENT_REREG;
193 	}
194 	return -1;
195 }
196 
197 static void eq_update_ci(struct mlx5_eq *eq, int arm)
198 {
199 	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
200 	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
201 
202 	__raw_writel((__force u32)cpu_to_be32(val), addr);
203 	/* We still want ordering, just not swabbing, so add a barrier */
204 	mb();
205 }
206 
207 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
208 static void eqe_pf_action(struct work_struct *work)
209 {
210 	struct mlx5_pagefault *pfault = container_of(work,
211 						     struct mlx5_pagefault,
212 						     work);
213 	struct mlx5_eq *eq = pfault->eq;
214 
215 	mlx5_core_page_fault(eq->dev, pfault);
216 	mempool_free(pfault, eq->pf_ctx.pool);
217 }
218 
219 static void eq_pf_process(struct mlx5_eq *eq)
220 {
221 	struct mlx5_core_dev *dev = eq->dev;
222 	struct mlx5_eqe_page_fault *pf_eqe;
223 	struct mlx5_pagefault *pfault;
224 	struct mlx5_eqe *eqe;
225 	int set_ci = 0;
226 
227 	while ((eqe = next_eqe_sw(eq))) {
228 		pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
229 		if (!pfault) {
230 			schedule_work(&eq->pf_ctx.work);
231 			break;
232 		}
233 
234 		dma_rmb();
235 		pf_eqe = &eqe->data.page_fault;
236 		pfault->event_subtype = eqe->sub_type;
237 		pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
238 
239 		mlx5_core_dbg(dev,
240 			      "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
241 			      eqe->sub_type, pfault->bytes_committed);
242 
243 		switch (eqe->sub_type) {
244 		case MLX5_PFAULT_SUBTYPE_RDMA:
245 			/* RDMA based event */
246 			pfault->type =
247 				be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
248 			pfault->token =
249 				be32_to_cpu(pf_eqe->rdma.pftype_token) &
250 				MLX5_24BIT_MASK;
251 			pfault->rdma.r_key =
252 				be32_to_cpu(pf_eqe->rdma.r_key);
253 			pfault->rdma.packet_size =
254 				be16_to_cpu(pf_eqe->rdma.packet_length);
255 			pfault->rdma.rdma_op_len =
256 				be32_to_cpu(pf_eqe->rdma.rdma_op_len);
257 			pfault->rdma.rdma_va =
258 				be64_to_cpu(pf_eqe->rdma.rdma_va);
259 			mlx5_core_dbg(dev,
260 				      "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
261 				      pfault->type, pfault->token,
262 				      pfault->rdma.r_key);
263 			mlx5_core_dbg(dev,
264 				      "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
265 				      pfault->rdma.rdma_op_len,
266 				      pfault->rdma.rdma_va);
267 			break;
268 
269 		case MLX5_PFAULT_SUBTYPE_WQE:
270 			/* WQE based event */
271 			pfault->type =
272 				be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
273 			pfault->token =
274 				be32_to_cpu(pf_eqe->wqe.token);
275 			pfault->wqe.wq_num =
276 				be32_to_cpu(pf_eqe->wqe.pftype_wq) &
277 				MLX5_24BIT_MASK;
278 			pfault->wqe.wqe_index =
279 				be16_to_cpu(pf_eqe->wqe.wqe_index);
280 			pfault->wqe.packet_size =
281 				be16_to_cpu(pf_eqe->wqe.packet_length);
282 			mlx5_core_dbg(dev,
283 				      "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
284 				      pfault->type, pfault->token,
285 				      pfault->wqe.wq_num,
286 				      pfault->wqe.wqe_index);
287 			break;
288 
289 		default:
290 			mlx5_core_warn(dev,
291 				       "Unsupported page fault event sub-type: 0x%02hhx\n",
292 				       eqe->sub_type);
293 			/* Unsupported page faults should still be
294 			 * resolved by the page fault handler
295 			 */
296 		}
297 
298 		pfault->eq = eq;
299 		INIT_WORK(&pfault->work, eqe_pf_action);
300 		queue_work(eq->pf_ctx.wq, &pfault->work);
301 
302 		++eq->cons_index;
303 		++set_ci;
304 
305 		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
306 			eq_update_ci(eq, 0);
307 			set_ci = 0;
308 		}
309 	}
310 
311 	eq_update_ci(eq, 1);
312 }
313 
314 static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
315 {
316 	struct mlx5_eq *eq = eq_ptr;
317 	unsigned long flags;
318 
319 	if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
320 		eq_pf_process(eq);
321 		spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
322 	} else {
323 		schedule_work(&eq->pf_ctx.work);
324 	}
325 
326 	return IRQ_HANDLED;
327 }
328 
329 /* mempool_refill() was proposed but unfortunately wasn't accepted
330  * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
331  * Chip workaround.
332  */
333 static void mempool_refill(mempool_t *pool)
334 {
335 	while (pool->curr_nr < pool->min_nr)
336 		mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
337 }
338 
339 static void eq_pf_action(struct work_struct *work)
340 {
341 	struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
342 
343 	mempool_refill(eq->pf_ctx.pool);
344 
345 	spin_lock_irq(&eq->pf_ctx.lock);
346 	eq_pf_process(eq);
347 	spin_unlock_irq(&eq->pf_ctx.lock);
348 }
349 
350 static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
351 {
352 	spin_lock_init(&pf_ctx->lock);
353 	INIT_WORK(&pf_ctx->work, eq_pf_action);
354 
355 	pf_ctx->wq = alloc_ordered_workqueue(name,
356 					     WQ_MEM_RECLAIM);
357 	if (!pf_ctx->wq)
358 		return -ENOMEM;
359 
360 	pf_ctx->pool = mempool_create_kmalloc_pool
361 		(MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
362 	if (!pf_ctx->pool)
363 		goto err_wq;
364 
365 	return 0;
366 err_wq:
367 	destroy_workqueue(pf_ctx->wq);
368 	return -ENOMEM;
369 }
370 
371 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
372 				u32 wq_num, u8 type, int error)
373 {
374 	u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
375 	u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = {0};
376 
377 	MLX5_SET(page_fault_resume_in, in, opcode,
378 		 MLX5_CMD_OP_PAGE_FAULT_RESUME);
379 	MLX5_SET(page_fault_resume_in, in, error, !!error);
380 	MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
381 	MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
382 	MLX5_SET(page_fault_resume_in, in, token, token);
383 
384 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
385 }
386 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
387 #endif
388 
389 static void general_event_handler(struct mlx5_core_dev *dev,
390 				  struct mlx5_eqe *eqe)
391 {
392 	switch (eqe->sub_type) {
393 	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
394 		if (dev->event)
395 			dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
396 		break;
397 	default:
398 		mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
399 			      eqe->sub_type);
400 	}
401 }
402 
403 static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
404 				    struct mlx5_eqe *eqe)
405 {
406 	u64 value_lsb;
407 	u64 value_msb;
408 
409 	value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
410 	value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
411 
412 	mlx5_core_warn(dev,
413 		       "High temperature on sensors with bit set %llx %llx",
414 		       value_msb, value_lsb);
415 }
416 
417 /* caller must eventually call mlx5_cq_put on the returned cq */
418 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
419 {
420 	struct mlx5_cq_table *table = &eq->cq_table;
421 	struct mlx5_core_cq *cq = NULL;
422 
423 	spin_lock(&table->lock);
424 	cq = radix_tree_lookup(&table->tree, cqn);
425 	if (likely(cq))
426 		mlx5_cq_hold(cq);
427 	spin_unlock(&table->lock);
428 
429 	return cq;
430 }
431 
432 static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
433 {
434 	struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
435 
436 	if (unlikely(!cq)) {
437 		mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
438 		return;
439 	}
440 
441 	++cq->arm_sn;
442 
443 	cq->comp(cq);
444 
445 	mlx5_cq_put(cq);
446 }
447 
448 static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
449 {
450 	struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
451 
452 	if (unlikely(!cq)) {
453 		mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
454 		return;
455 	}
456 
457 	cq->event(cq, event_type);
458 
459 	mlx5_cq_put(cq);
460 }
461 
462 static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
463 {
464 	struct mlx5_eq *eq = eq_ptr;
465 	struct mlx5_core_dev *dev = eq->dev;
466 	struct mlx5_eqe *eqe;
467 	int set_ci = 0;
468 	u32 cqn = -1;
469 	u32 rsn;
470 	u8 port;
471 
472 	while ((eqe = next_eqe_sw(eq))) {
473 		/*
474 		 * Make sure we read EQ entry contents after we've
475 		 * checked the ownership bit.
476 		 */
477 		dma_rmb();
478 
479 		mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
480 			      eq->eqn, eqe_type_str(eqe->type));
481 		switch (eqe->type) {
482 		case MLX5_EVENT_TYPE_COMP:
483 			cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
484 			mlx5_eq_cq_completion(eq, cqn);
485 			break;
486 		case MLX5_EVENT_TYPE_DCT_DRAINED:
487 			rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
488 			rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
489 			mlx5_rsc_event(dev, rsn, eqe->type);
490 			break;
491 		case MLX5_EVENT_TYPE_PATH_MIG:
492 		case MLX5_EVENT_TYPE_COMM_EST:
493 		case MLX5_EVENT_TYPE_SQ_DRAINED:
494 		case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
495 		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
496 		case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
497 		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
498 		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
499 			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
500 			rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
501 			mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
502 				      eqe_type_str(eqe->type), eqe->type, rsn);
503 			mlx5_rsc_event(dev, rsn, eqe->type);
504 			break;
505 
506 		case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
507 		case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
508 			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
509 			mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
510 				      eqe_type_str(eqe->type), eqe->type, rsn);
511 			mlx5_srq_event(dev, rsn, eqe->type);
512 			break;
513 
514 		case MLX5_EVENT_TYPE_CMD:
515 			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
516 			break;
517 
518 		case MLX5_EVENT_TYPE_PORT_CHANGE:
519 			port = (eqe->data.port.port >> 4) & 0xf;
520 			switch (eqe->sub_type) {
521 			case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
522 			case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
523 			case MLX5_PORT_CHANGE_SUBTYPE_LID:
524 			case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
525 			case MLX5_PORT_CHANGE_SUBTYPE_GUID:
526 			case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
527 			case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
528 				if (dev->event)
529 					dev->event(dev, port_subtype_event(eqe->sub_type),
530 						   (unsigned long)port);
531 				break;
532 			default:
533 				mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
534 					       port, eqe->sub_type);
535 			}
536 			break;
537 		case MLX5_EVENT_TYPE_CQ_ERROR:
538 			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
539 			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
540 				       cqn, eqe->data.cq_err.syndrome);
541 			mlx5_eq_cq_event(eq, cqn, eqe->type);
542 			break;
543 
544 		case MLX5_EVENT_TYPE_PAGE_REQUEST:
545 			{
546 				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
547 				s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
548 
549 				mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
550 					      func_id, npages);
551 				mlx5_core_req_pages_handler(dev, func_id, npages);
552 			}
553 			break;
554 
555 		case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
556 			mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
557 			break;
558 
559 		case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
560 			mlx5_port_module_event(dev, eqe);
561 			break;
562 
563 		case MLX5_EVENT_TYPE_PPS_EVENT:
564 			mlx5_pps_event(dev, eqe);
565 			break;
566 
567 		case MLX5_EVENT_TYPE_FPGA_ERROR:
568 		case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
569 			mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
570 			break;
571 
572 		case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
573 			mlx5_temp_warning_event(dev, eqe);
574 			break;
575 
576 		case MLX5_EVENT_TYPE_GENERAL_EVENT:
577 			general_event_handler(dev, eqe);
578 			break;
579 		default:
580 			mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
581 				       eqe->type, eq->eqn);
582 			break;
583 		}
584 
585 		++eq->cons_index;
586 		++set_ci;
587 
588 		/* The HCA will think the queue has overflowed if we
589 		 * don't tell it we've been processing events.  We
590 		 * create our EQs with MLX5_NUM_SPARE_EQE extra
591 		 * entries, so we must update our consumer index at
592 		 * least that often.
593 		 */
594 		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
595 			eq_update_ci(eq, 0);
596 			set_ci = 0;
597 		}
598 	}
599 
600 	eq_update_ci(eq, 1);
601 
602 	if (cqn != -1)
603 		tasklet_schedule(&eq->tasklet_ctx.task);
604 
605 	return IRQ_HANDLED;
606 }
607 
608 /* Some architectures don't latch interrupts when they are disabled, so using
609  * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
610  * avoid losing them.  It is not recommended to use it, unless this is the last
611  * resort.
612  */
613 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
614 {
615 	u32 count_eqe;
616 
617 	disable_irq(eq->irqn);
618 	count_eqe = eq->cons_index;
619 	mlx5_eq_int(eq->irqn, eq);
620 	count_eqe = eq->cons_index - count_eqe;
621 	enable_irq(eq->irqn);
622 
623 	return count_eqe;
624 }
625 
626 static void init_eq_buf(struct mlx5_eq *eq)
627 {
628 	struct mlx5_eqe *eqe;
629 	int i;
630 
631 	for (i = 0; i < eq->nent; i++) {
632 		eqe = get_eqe(eq, i);
633 		eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
634 	}
635 }
636 
637 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
638 		       int nent, u64 mask, const char *name,
639 		       enum mlx5_eq_type type)
640 {
641 	struct mlx5_cq_table *cq_table = &eq->cq_table;
642 	u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
643 	struct mlx5_priv *priv = &dev->priv;
644 	irq_handler_t handler;
645 	__be64 *pas;
646 	void *eqc;
647 	int inlen;
648 	u32 *in;
649 	int err;
650 
651 	/* Init CQ table */
652 	memset(cq_table, 0, sizeof(*cq_table));
653 	spin_lock_init(&cq_table->lock);
654 	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
655 
656 	eq->type = type;
657 	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
658 	eq->cons_index = 0;
659 	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
660 	if (err)
661 		return err;
662 
663 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
664 	if (type == MLX5_EQ_TYPE_PF)
665 		handler = mlx5_eq_pf_int;
666 	else
667 #endif
668 		handler = mlx5_eq_int;
669 
670 	init_eq_buf(eq);
671 
672 	inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
673 		MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
674 
675 	in = kvzalloc(inlen, GFP_KERNEL);
676 	if (!in) {
677 		err = -ENOMEM;
678 		goto err_buf;
679 	}
680 
681 	pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
682 	mlx5_fill_page_array(&eq->buf, pas);
683 
684 	MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
685 	MLX5_SET64(create_eq_in, in, event_bitmask, mask);
686 
687 	eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
688 	MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
689 	MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
690 	MLX5_SET(eqc, eqc, intr, vecidx);
691 	MLX5_SET(eqc, eqc, log_page_size,
692 		 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
693 
694 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
695 	if (err)
696 		goto err_in;
697 
698 	snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
699 		 name, pci_name(dev->pdev));
700 
701 	eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
702 	eq->irqn = pci_irq_vector(dev->pdev, vecidx);
703 	eq->dev = dev;
704 	eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
705 	err = request_irq(eq->irqn, handler, 0,
706 			  priv->irq_info[vecidx].name, eq);
707 	if (err)
708 		goto err_eq;
709 
710 	err = mlx5_debug_eq_add(dev, eq);
711 	if (err)
712 		goto err_irq;
713 
714 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
715 	if (type == MLX5_EQ_TYPE_PF) {
716 		err = init_pf_ctx(&eq->pf_ctx, name);
717 		if (err)
718 			goto err_irq;
719 	} else
720 #endif
721 	{
722 		INIT_LIST_HEAD(&eq->tasklet_ctx.list);
723 		INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
724 		spin_lock_init(&eq->tasklet_ctx.lock);
725 		tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
726 			     (unsigned long)&eq->tasklet_ctx);
727 	}
728 
729 	/* EQs are created in ARMED state
730 	 */
731 	eq_update_ci(eq, 1);
732 
733 	kvfree(in);
734 	return 0;
735 
736 err_irq:
737 	free_irq(eq->irqn, eq);
738 
739 err_eq:
740 	mlx5_cmd_destroy_eq(dev, eq->eqn);
741 
742 err_in:
743 	kvfree(in);
744 
745 err_buf:
746 	mlx5_buf_free(dev, &eq->buf);
747 	return err;
748 }
749 
750 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
751 {
752 	int err;
753 
754 	mlx5_debug_eq_remove(dev, eq);
755 	free_irq(eq->irqn, eq);
756 	err = mlx5_cmd_destroy_eq(dev, eq->eqn);
757 	if (err)
758 		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
759 			       eq->eqn);
760 	synchronize_irq(eq->irqn);
761 
762 	if (eq->type == MLX5_EQ_TYPE_COMP) {
763 		tasklet_disable(&eq->tasklet_ctx.task);
764 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
765 	} else if (eq->type == MLX5_EQ_TYPE_PF) {
766 		cancel_work_sync(&eq->pf_ctx.work);
767 		destroy_workqueue(eq->pf_ctx.wq);
768 		mempool_destroy(eq->pf_ctx.pool);
769 #endif
770 	}
771 	mlx5_buf_free(dev, &eq->buf);
772 
773 	return err;
774 }
775 
776 int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
777 {
778 	struct mlx5_cq_table *table = &eq->cq_table;
779 	int err;
780 
781 	spin_lock_irq(&table->lock);
782 	err = radix_tree_insert(&table->tree, cq->cqn, cq);
783 	spin_unlock_irq(&table->lock);
784 
785 	return err;
786 }
787 
788 int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
789 {
790 	struct mlx5_cq_table *table = &eq->cq_table;
791 	struct mlx5_core_cq *tmp;
792 
793 	spin_lock_irq(&table->lock);
794 	tmp = radix_tree_delete(&table->tree, cq->cqn);
795 	spin_unlock_irq(&table->lock);
796 
797 	if (!tmp) {
798 		mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
799 		return -ENOENT;
800 	}
801 
802 	if (tmp != cq) {
803 		mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
804 		return -EINVAL;
805 	}
806 
807 	return 0;
808 }
809 
810 int mlx5_eq_init(struct mlx5_core_dev *dev)
811 {
812 	int err;
813 
814 	spin_lock_init(&dev->priv.eq_table.lock);
815 
816 	err = mlx5_eq_debugfs_init(dev);
817 
818 	return err;
819 }
820 
821 void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
822 {
823 	mlx5_eq_debugfs_cleanup(dev);
824 }
825 
826 int mlx5_start_eqs(struct mlx5_core_dev *dev)
827 {
828 	struct mlx5_eq_table *table = &dev->priv.eq_table;
829 	u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
830 	int err;
831 
832 	if (MLX5_VPORT_MANAGER(dev))
833 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
834 
835 	if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
836 	    MLX5_CAP_GEN(dev, general_notification_event))
837 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
838 
839 	if (MLX5_CAP_GEN(dev, port_module_event))
840 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
841 	else
842 		mlx5_core_dbg(dev, "port_module_event is not set\n");
843 
844 	if (MLX5_PPS_CAP(dev))
845 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
846 
847 	if (MLX5_CAP_GEN(dev, fpga))
848 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
849 				    (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
850 	if (MLX5_CAP_GEN_MAX(dev, dct))
851 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
852 
853 	if (MLX5_CAP_GEN(dev, temp_warn_event))
854 		async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
855 
856 	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
857 				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
858 				 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
859 	if (err) {
860 		mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
861 		return err;
862 	}
863 
864 	mlx5_cmd_use_events(dev);
865 
866 	err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
867 				 MLX5_NUM_ASYNC_EQE, async_event_mask,
868 				 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
869 	if (err) {
870 		mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
871 		goto err1;
872 	}
873 
874 	err = mlx5_create_map_eq(dev, &table->pages_eq,
875 				 MLX5_EQ_VEC_PAGES,
876 				 /* TODO: sriov max_vf + */ 1,
877 				 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
878 				 MLX5_EQ_TYPE_ASYNC);
879 	if (err) {
880 		mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
881 		goto err2;
882 	}
883 
884 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
885 	if (MLX5_CAP_GEN(dev, pg)) {
886 		err = mlx5_create_map_eq(dev, &table->pfault_eq,
887 					 MLX5_EQ_VEC_PFAULT,
888 					 MLX5_NUM_ASYNC_EQE,
889 					 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
890 					 "mlx5_page_fault_eq",
891 					 MLX5_EQ_TYPE_PF);
892 		if (err) {
893 			mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
894 				       err);
895 			goto err3;
896 		}
897 	}
898 
899 	return err;
900 err3:
901 	mlx5_destroy_unmap_eq(dev, &table->pages_eq);
902 #else
903 	return err;
904 #endif
905 
906 err2:
907 	mlx5_destroy_unmap_eq(dev, &table->async_eq);
908 
909 err1:
910 	mlx5_cmd_use_polling(dev);
911 	mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
912 	return err;
913 }
914 
915 void mlx5_stop_eqs(struct mlx5_core_dev *dev)
916 {
917 	struct mlx5_eq_table *table = &dev->priv.eq_table;
918 	int err;
919 
920 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
921 	if (MLX5_CAP_GEN(dev, pg)) {
922 		err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
923 		if (err)
924 			mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
925 				      err);
926 	}
927 #endif
928 
929 	err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
930 	if (err)
931 		mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
932 			      err);
933 
934 	err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
935 	if (err)
936 		mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
937 			      err);
938 	mlx5_cmd_use_polling(dev);
939 
940 	err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
941 	if (err)
942 		mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
943 			      err);
944 }
945 
946 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
947 		       u32 *out, int outlen)
948 {
949 	u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
950 
951 	MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
952 	MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
953 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
954 }
955 
956 /* This function should only be called after mlx5_cmd_force_teardown_hca */
957 void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
958 {
959 	struct mlx5_eq_table *table = &dev->priv.eq_table;
960 	struct mlx5_eq *eq;
961 
962 #ifdef CONFIG_RFS_ACCEL
963 	if (dev->rmap) {
964 		free_irq_cpu_rmap(dev->rmap);
965 		dev->rmap = NULL;
966 	}
967 #endif
968 	list_for_each_entry(eq, &table->comp_eqs_list, list)
969 		free_irq(eq->irqn, eq);
970 
971 	free_irq(table->pages_eq.irqn, &table->pages_eq);
972 	free_irq(table->async_eq.irqn, &table->async_eq);
973 	free_irq(table->cmd_eq.irqn, &table->cmd_eq);
974 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
975 	if (MLX5_CAP_GEN(dev, pg))
976 		free_irq(table->pfault_eq.irqn, &table->pfault_eq);
977 #endif
978 	pci_free_irq_vectors(dev->pdev);
979 }
980