xref: /linux/drivers/crypto/intel/qat/qat_common/adf_admin.c (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/types.h>
4 #include <linux/mutex.h>
5 #include <linux/slab.h>
6 #include <linux/iopoll.h>
7 #include <linux/pci.h>
8 #include <linux/dma-mapping.h>
9 #include "adf_accel_devices.h"
10 #include "adf_admin.h"
11 #include "adf_common_drv.h"
12 #include "adf_cfg.h"
13 #include "adf_heartbeat.h"
14 #include "icp_qat_fw_init_admin.h"
15 
16 #define ADF_ADMIN_MAILBOX_STRIDE 0x1000
17 #define ADF_ADMINMSG_LEN 32
18 #define ADF_CONST_TABLE_SIZE 1024
19 #define ADF_ADMIN_POLL_DELAY_US 20
20 #define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
21 #define ADF_ONE_AE 1
22 
23 static const u8 const_tab[1024] __aligned(1024) = {
24 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
25 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
26 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
27 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
28 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
29 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
30 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
31 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
32 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
33 0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
34 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
35 0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
36 0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
37 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
38 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
39 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
40 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
41 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
42 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
43 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
44 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
45 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
46 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
47 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
48 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
49 0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
50 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
51 0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
52 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
53 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
54 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
55 0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
56 0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
57 0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
58 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
59 0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
60 0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
61 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
63 0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
64 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
65 0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
66 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
67 0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
69 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
70 0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
71 0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
72 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
73 0x7e, 0x21, 0x79, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
74 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
75 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00,
76 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x18,
77 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
78 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x01, 0x00,
79 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
80 0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x02, 0x00, 0x00, 0x00,
81 0x00, 0x00, 0x00, 0x14, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x02,
82 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25,
85 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
86 0x00, 0x00, 0x12, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
88 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x01, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x01,
90 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x2B, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
92 0x00, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
93 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00,
94 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,
95 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
99 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
100 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
101 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
102 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
103 
104 struct adf_admin_comms {
105 	dma_addr_t phy_addr;
106 	dma_addr_t const_tbl_addr;
107 	void *virt_addr;
108 	void *virt_tbl_addr;
109 	void __iomem *mailbox_addr;
110 	struct mutex lock;	/* protects adf_admin_comms struct */
111 };
112 
113 static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
114 				  void *in, void *out)
115 {
116 	int ret;
117 	u32 status;
118 	struct adf_admin_comms *admin = accel_dev->admin;
119 	int offset = ae * ADF_ADMINMSG_LEN * 2;
120 	void __iomem *mailbox = admin->mailbox_addr;
121 	int mb_offset = ae * ADF_ADMIN_MAILBOX_STRIDE;
122 	struct icp_qat_fw_init_admin_req *request = in;
123 
124 	mutex_lock(&admin->lock);
125 
126 	if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
127 		mutex_unlock(&admin->lock);
128 		return -EAGAIN;
129 	}
130 
131 	memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
132 	ADF_CSR_WR(mailbox, mb_offset, 1);
133 
134 	ret = read_poll_timeout(ADF_CSR_RD, status, status == 0,
135 				ADF_ADMIN_POLL_DELAY_US,
136 				ADF_ADMIN_POLL_TIMEOUT_US, true,
137 				mailbox, mb_offset);
138 	if (ret < 0) {
139 		/* Response timeout */
140 		dev_err(&GET_DEV(accel_dev),
141 			"Failed to send admin msg %d to accelerator %d\n",
142 			request->cmd_id, ae);
143 	} else {
144 		/* Response received from admin message, we can now
145 		 * make response data available in "out" parameter.
146 		 */
147 		memcpy(out, admin->virt_addr + offset +
148 		       ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
149 	}
150 
151 	mutex_unlock(&admin->lock);
152 	return ret;
153 }
154 
155 static int adf_send_admin(struct adf_accel_dev *accel_dev,
156 			  struct icp_qat_fw_init_admin_req *req,
157 			  struct icp_qat_fw_init_admin_resp *resp,
158 			  const unsigned long ae_mask)
159 {
160 	u32 ae;
161 
162 	for_each_set_bit(ae, &ae_mask, ICP_QAT_HW_AE_DELIMITER)
163 		if (adf_put_admin_msg_sync(accel_dev, ae, req, resp) ||
164 		    resp->status)
165 			return -EFAULT;
166 
167 	return 0;
168 }
169 
170 static int adf_init_ae(struct adf_accel_dev *accel_dev)
171 {
172 	struct icp_qat_fw_init_admin_req req;
173 	struct icp_qat_fw_init_admin_resp resp;
174 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
175 	u32 ae_mask = hw_device->ae_mask;
176 
177 	memset(&req, 0, sizeof(req));
178 	memset(&resp, 0, sizeof(resp));
179 	req.cmd_id = ICP_QAT_FW_INIT_AE;
180 
181 	return adf_send_admin(accel_dev, &req, &resp, ae_mask);
182 }
183 
184 static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
185 {
186 	struct icp_qat_fw_init_admin_req req;
187 	struct icp_qat_fw_init_admin_resp resp;
188 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
189 	u32 ae_mask = hw_device->admin_ae_mask ?: hw_device->ae_mask;
190 
191 	memset(&req, 0, sizeof(req));
192 	memset(&resp, 0, sizeof(resp));
193 	req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
194 
195 	req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
196 	req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
197 
198 	return adf_send_admin(accel_dev, &req, &resp, ae_mask);
199 }
200 
201 int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
202 {
203 	struct icp_qat_fw_init_admin_req req = { };
204 	struct icp_qat_fw_init_admin_resp resp;
205 	unsigned int ae_mask = ADF_ONE_AE;
206 	int ret;
207 
208 	req.cmd_id = ICP_QAT_FW_TIMER_GET;
209 	ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
210 	if (ret)
211 		return ret;
212 
213 	*timestamp = resp.timestamp;
214 	return 0;
215 }
216 
217 static int adf_set_chaining(struct adf_accel_dev *accel_dev)
218 {
219 	u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
220 	struct icp_qat_fw_init_admin_resp resp = { };
221 	struct icp_qat_fw_init_admin_req req = { };
222 
223 	req.cmd_id = ICP_QAT_FW_DC_CHAIN_INIT;
224 
225 	return adf_send_admin(accel_dev, &req, &resp, ae_mask);
226 }
227 
228 static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
229 				   u32 *capabilities)
230 {
231 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
232 	struct icp_qat_fw_init_admin_resp resp;
233 	struct icp_qat_fw_init_admin_req req;
234 	unsigned long ae_mask;
235 	unsigned long ae;
236 	int ret;
237 
238 	/* Target only service accelerator engines */
239 	ae_mask = hw_device->ae_mask & ~hw_device->admin_ae_mask;
240 
241 	memset(&req, 0, sizeof(req));
242 	memset(&resp, 0, sizeof(resp));
243 	req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
244 
245 	*capabilities = 0;
246 	for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
247 		ret = adf_send_admin(accel_dev, &req, &resp, 1ULL << ae);
248 		if (ret)
249 			return ret;
250 
251 		*capabilities |= resp.extended_features;
252 	}
253 
254 	return 0;
255 }
256 
257 int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps)
258 {
259 	struct icp_qat_fw_init_admin_resp resp = { };
260 	struct icp_qat_fw_init_admin_req req = { };
261 	int ret;
262 
263 	req.cmd_id = ICP_QAT_FW_COUNTERS_GET;
264 
265 	ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp);
266 	if (ret || resp.status)
267 		return -EFAULT;
268 
269 	*reqs = resp.req_rec_count;
270 	*resps = resp.resp_sent_count;
271 
272 	return 0;
273 }
274 
275 int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt)
276 {
277 	u32 ae_mask = accel_dev->hw_device->ae_mask;
278 	struct icp_qat_fw_init_admin_req req = { };
279 	struct icp_qat_fw_init_admin_resp resp = { };
280 
281 	req.cmd_id = ICP_QAT_FW_SYNC;
282 	req.int_timer_ticks = cnt;
283 
284 	return adf_send_admin(accel_dev, &req, &resp, ae_mask);
285 }
286 
287 int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
288 {
289 	u32 ae_mask = accel_dev->hw_device->ae_mask;
290 	struct icp_qat_fw_init_admin_req req = { };
291 	struct icp_qat_fw_init_admin_resp resp;
292 
293 	req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
294 	req.init_cfg_ptr = accel_dev->heartbeat->dma.phy_addr;
295 	req.heartbeat_ticks = ticks;
296 
297 	return adf_send_admin(accel_dev, &req, &resp, ae_mask);
298 }
299 
300 static bool is_dcc_enabled(struct adf_accel_dev *accel_dev)
301 {
302 	char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
303 	int ret;
304 
305 	ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
306 				      ADF_SERVICES_ENABLED, services);
307 	if (ret)
308 		return false;
309 
310 	return !strcmp(services, "dcc");
311 }
312 
313 static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps)
314 {
315 	u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
316 	struct icp_qat_fw_init_admin_resp resp = { };
317 	struct icp_qat_fw_init_admin_req req = { };
318 	int ret;
319 
320 	if (!ae_mask)
321 		return 0;
322 
323 	req.cmd_id = ICP_QAT_FW_CAPABILITIES_GET;
324 	ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
325 	if (ret)
326 		return ret;
327 
328 	*caps = resp.fw_capabilities;
329 
330 	return 0;
331 }
332 
333 int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev,
334 			   struct icp_qat_fw_init_admin_slice_cnt *slices)
335 {
336 	u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
337 	struct icp_qat_fw_init_admin_resp resp = { };
338 	struct icp_qat_fw_init_admin_req req = { };
339 	int ret;
340 
341 	req.cmd_id = ICP_QAT_FW_RL_INIT;
342 
343 	ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
344 	if (ret)
345 		return ret;
346 
347 	memcpy(slices, &resp.slices, sizeof(*slices));
348 
349 	return 0;
350 }
351 
352 int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev,
353 				 struct icp_qat_fw_init_admin_req *req)
354 {
355 	u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
356 	struct icp_qat_fw_init_admin_resp resp = { };
357 
358 	/*
359 	 * req struct filled in rl implementation. Used commands
360 	 * ICP_QAT_FW_RL_ADD for a new SLA
361 	 * ICP_QAT_FW_RL_UPDATE for update SLA
362 	 */
363 	return adf_send_admin(accel_dev, req, &resp, ae_mask);
364 }
365 
366 int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id,
367 			     u8 node_type)
368 {
369 	u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
370 	struct icp_qat_fw_init_admin_resp resp = { };
371 	struct icp_qat_fw_init_admin_req req = { };
372 
373 	req.cmd_id = ICP_QAT_FW_RL_REMOVE;
374 	req.node_id = node_id;
375 	req.node_type = node_type;
376 
377 	return adf_send_admin(accel_dev, &req, &resp, ae_mask);
378 }
379 
380 /**
381  * adf_send_admin_init() - Function sends init message to FW
382  * @accel_dev: Pointer to acceleration device.
383  *
384  * Function sends admin init message to the FW
385  *
386  * Return: 0 on success, error code otherwise.
387  */
388 int adf_send_admin_init(struct adf_accel_dev *accel_dev)
389 {
390 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
391 	u32 dc_capabilities = 0;
392 	int ret;
393 
394 	ret = adf_set_fw_constants(accel_dev);
395 	if (ret)
396 		return ret;
397 
398 	if (is_dcc_enabled(accel_dev)) {
399 		ret = adf_set_chaining(accel_dev);
400 		if (ret)
401 			return ret;
402 	}
403 
404 	ret = adf_get_dc_capabilities(accel_dev, &dc_capabilities);
405 	if (ret) {
406 		dev_err(&GET_DEV(accel_dev), "Cannot get dc capabilities\n");
407 		return ret;
408 	}
409 	accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
410 
411 	adf_get_fw_capabilities(accel_dev, &hw_data->fw_capabilities);
412 
413 	return adf_init_ae(accel_dev);
414 }
415 EXPORT_SYMBOL_GPL(adf_send_admin_init);
416 
417 /**
418  * adf_init_admin_pm() - Function sends PM init message to FW
419  * @accel_dev: Pointer to acceleration device.
420  * @idle_delay: QAT HW idle time before power gating is initiated.
421  *		000 - 64us
422  *		001 - 128us
423  *		010 - 256us
424  *		011 - 512us
425  *		100 - 1ms
426  *		101 - 2ms
427  *		110 - 4ms
428  *		111 - 8ms
429  *
430  * Function sends to the FW the admin init message for the PM state
431  * configuration.
432  *
433  * Return: 0 on success, error code otherwise.
434  */
435 int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
436 {
437 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
438 	struct icp_qat_fw_init_admin_resp resp = {0};
439 	struct icp_qat_fw_init_admin_req req = {0};
440 	u32 ae_mask = hw_data->admin_ae_mask;
441 
442 	if (!accel_dev->admin) {
443 		dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
444 		return -EFAULT;
445 	}
446 
447 	req.cmd_id = ICP_QAT_FW_PM_STATE_CONFIG;
448 	req.idle_filter = idle_delay;
449 
450 	return adf_send_admin(accel_dev, &req, &resp, ae_mask);
451 }
452 
453 int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr,
454 		    size_t buff_size)
455 {
456 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
457 	struct icp_qat_fw_init_admin_req req = { };
458 	struct icp_qat_fw_init_admin_resp resp;
459 	u32 ae_mask = hw_data->admin_ae_mask;
460 	int ret;
461 
462 	/* Query pm info via init/admin cmd */
463 	if (!accel_dev->admin) {
464 		dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
465 		return -EFAULT;
466 	}
467 
468 	req.cmd_id = ICP_QAT_FW_PM_INFO;
469 	req.init_cfg_sz = buff_size;
470 	req.init_cfg_ptr = p_state_addr;
471 
472 	ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
473 	if (ret)
474 		dev_err(&GET_DEV(accel_dev),
475 			"Failed to query power-management info\n");
476 
477 	return ret;
478 }
479 
480 int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt,
481 		      u16 *latest_err)
482 {
483 	struct icp_qat_fw_init_admin_req req = { };
484 	struct icp_qat_fw_init_admin_resp resp;
485 	int ret;
486 
487 	req.cmd_id = ICP_QAT_FW_CNV_STATS_GET;
488 
489 	ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp);
490 	if (ret)
491 		return ret;
492 	if (resp.status)
493 		return -EPROTONOSUPPORT;
494 
495 	*err_cnt = resp.error_count;
496 	*latest_err = resp.latest_error;
497 
498 	return ret;
499 }
500 
501 int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
502 {
503 	struct adf_admin_comms *admin;
504 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
505 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
506 	struct admin_info admin_csrs_info;
507 	u32 mailbox_offset, adminmsg_u, adminmsg_l;
508 	void __iomem *mailbox;
509 	u64 reg_val;
510 
511 	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
512 			     dev_to_node(&GET_DEV(accel_dev)));
513 	if (!admin)
514 		return -ENOMEM;
515 	admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
516 					      &admin->phy_addr, GFP_KERNEL);
517 	if (!admin->virt_addr) {
518 		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
519 		kfree(admin);
520 		return -ENOMEM;
521 	}
522 
523 	admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
524 						  PAGE_SIZE,
525 						  &admin->const_tbl_addr,
526 						  GFP_KERNEL);
527 	if (!admin->virt_tbl_addr) {
528 		dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
529 		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
530 				  admin->virt_addr, admin->phy_addr);
531 		kfree(admin);
532 		return -ENOMEM;
533 	}
534 
535 	memcpy(admin->virt_tbl_addr, const_tab, sizeof(const_tab));
536 	hw_data->get_admin_info(&admin_csrs_info);
537 
538 	mailbox_offset = admin_csrs_info.mailbox_offset;
539 	mailbox = pmisc_addr + mailbox_offset;
540 	adminmsg_u = admin_csrs_info.admin_msg_ur;
541 	adminmsg_l = admin_csrs_info.admin_msg_lr;
542 
543 	reg_val = (u64)admin->phy_addr;
544 	ADF_CSR_WR(pmisc_addr, adminmsg_u, upper_32_bits(reg_val));
545 	ADF_CSR_WR(pmisc_addr, adminmsg_l, lower_32_bits(reg_val));
546 
547 	mutex_init(&admin->lock);
548 	admin->mailbox_addr = mailbox;
549 	accel_dev->admin = admin;
550 	return 0;
551 }
552 EXPORT_SYMBOL_GPL(adf_init_admin_comms);
553 
554 void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
555 {
556 	struct adf_admin_comms *admin = accel_dev->admin;
557 
558 	if (!admin)
559 		return;
560 
561 	if (admin->virt_addr)
562 		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
563 				  admin->virt_addr, admin->phy_addr);
564 	if (admin->virt_tbl_addr)
565 		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
566 				  admin->virt_tbl_addr, admin->const_tbl_addr);
567 
568 	mutex_destroy(&admin->lock);
569 	kfree(admin);
570 	accel_dev->admin = NULL;
571 }
572 EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
573