xref: /linux/drivers/infiniband/hw/mana/cq.c (revision eeb9f5c2dcec90009d7cf12e780e7f9631993fc5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9 		      struct ib_udata *udata)
10 {
11 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
12 	struct ib_device *ibdev = ibcq->device;
13 	struct mana_ib_create_cq ucmd = {};
14 	struct mana_ib_dev *mdev;
15 	struct gdma_context *gc;
16 	int err;
17 
18 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
19 	gc = mdev->gdma_dev->gdma_context;
20 
21 	if (udata->inlen < sizeof(ucmd))
22 		return -EINVAL;
23 
24 	if (attr->comp_vector > gc->max_num_queues)
25 		return -EINVAL;
26 
27 	cq->comp_vector = attr->comp_vector;
28 
29 	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
30 	if (err) {
31 		ibdev_dbg(ibdev,
32 			  "Failed to copy from udata for create cq, %d\n", err);
33 		return err;
34 	}
35 
36 	if (attr->cqe > mdev->adapter_caps.max_qp_wr) {
37 		ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
38 		return -EINVAL;
39 	}
40 
41 	cq->cqe = attr->cqe;
42 	cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
43 			       IB_ACCESS_LOCAL_WRITE);
44 	if (IS_ERR(cq->umem)) {
45 		err = PTR_ERR(cq->umem);
46 		ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
47 			  err);
48 		return err;
49 	}
50 
51 	err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
52 	if (err) {
53 		ibdev_dbg(ibdev,
54 			  "Failed to create dma region for create cq, %d\n",
55 			  err);
56 		goto err_release_umem;
57 	}
58 
59 	ibdev_dbg(ibdev,
60 		  "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
61 		  err, cq->gdma_region);
62 
63 	/*
64 	 * The CQ ID is not known at this time. The ID is generated at create_qp
65 	 */
66 	cq->id = INVALID_QUEUE_ID;
67 
68 	return 0;
69 
70 err_release_umem:
71 	ib_umem_release(cq->umem);
72 	return err;
73 }
74 
75 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
76 {
77 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
78 	struct ib_device *ibdev = ibcq->device;
79 	struct mana_ib_dev *mdev;
80 	struct gdma_context *gc;
81 	int err;
82 
83 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
84 	gc = mdev->gdma_dev->gdma_context;
85 
86 	err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
87 	if (err) {
88 		ibdev_dbg(ibdev,
89 			  "Failed to destroy dma region, %d\n", err);
90 		return err;
91 	}
92 
93 	if (cq->id != INVALID_QUEUE_ID) {
94 		kfree(gc->cq_table[cq->id]);
95 		gc->cq_table[cq->id] = NULL;
96 	}
97 
98 	ib_umem_release(cq->umem);
99 
100 	return 0;
101 }
102 
103 void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
104 {
105 	struct mana_ib_cq *cq = ctx;
106 
107 	if (cq->ibcq.comp_handler)
108 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
109 }
110