xref: /linux/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c (revision e2be04c7f9958dde770eeb8b30e829ca969b37bb)
1 /*
2  * Huawei HiNIC PCI Express Linux driver
3  * Copyright(c) 2017 Huawei Technologies Co., Ltd
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/pci.h>
19 #include <linux/device.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/semaphore.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/io.h>
25 #include <linux/err.h>
26 
27 #include "hinic_hw_if.h"
28 #include "hinic_hw_eqs.h"
29 #include "hinic_hw_wqe.h"
30 #include "hinic_hw_wq.h"
31 #include "hinic_hw_cmdq.h"
32 #include "hinic_hw_qp_ctxt.h"
33 #include "hinic_hw_qp.h"
34 #include "hinic_hw_io.h"
35 
36 #define CI_Q_ADDR_SIZE                  sizeof(u32)
37 
38 #define CI_ADDR(base_addr, q_id)        ((base_addr) + \
39 					 (q_id) * CI_Q_ADDR_SIZE)
40 
41 #define CI_TABLE_SIZE(num_qps)          ((num_qps) * CI_Q_ADDR_SIZE)
42 
43 #define DB_IDX(db, db_base)             \
44 	(((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
45 
46 enum io_cmd {
47 	IO_CMD_MODIFY_QUEUE_CTXT = 0,
48 };
49 
50 static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
51 {
52 	int i;
53 
54 	for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
55 		free_db_area->db_idx[i] = i;
56 
57 	free_db_area->alloc_pos = 0;
58 	free_db_area->return_pos = HINIC_DB_MAX_AREAS;
59 
60 	free_db_area->num_free = HINIC_DB_MAX_AREAS;
61 
62 	sema_init(&free_db_area->idx_lock, 1);
63 }
64 
65 static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
66 {
67 	struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
68 	int pos, idx;
69 
70 	down(&free_db_area->idx_lock);
71 
72 	free_db_area->num_free--;
73 
74 	if (free_db_area->num_free < 0) {
75 		free_db_area->num_free++;
76 		up(&free_db_area->idx_lock);
77 		return ERR_PTR(-ENOMEM);
78 	}
79 
80 	pos = free_db_area->alloc_pos++;
81 	pos &= HINIC_DB_MAX_AREAS - 1;
82 
83 	idx = free_db_area->db_idx[pos];
84 
85 	free_db_area->db_idx[pos] = -1;
86 
87 	up(&free_db_area->idx_lock);
88 
89 	return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
90 }
91 
92 static void return_db_area(struct hinic_func_to_io *func_to_io,
93 			   void __iomem *db_base)
94 {
95 	struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
96 	int pos, idx = DB_IDX(db_base, func_to_io->db_base);
97 
98 	down(&free_db_area->idx_lock);
99 
100 	pos = free_db_area->return_pos++;
101 	pos &= HINIC_DB_MAX_AREAS - 1;
102 
103 	free_db_area->db_idx[pos] = idx;
104 
105 	free_db_area->num_free++;
106 
107 	up(&free_db_area->idx_lock);
108 }
109 
110 static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
111 			  u16 num_sqs)
112 {
113 	struct hinic_hwif *hwif = func_to_io->hwif;
114 	struct hinic_sq_ctxt_block *sq_ctxt_block;
115 	struct pci_dev *pdev = hwif->pdev;
116 	struct hinic_cmdq_buf cmdq_buf;
117 	struct hinic_sq_ctxt *sq_ctxt;
118 	struct hinic_qp *qp;
119 	u64 out_param;
120 	int err, i;
121 
122 	err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
123 	if (err) {
124 		dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
125 		return err;
126 	}
127 
128 	sq_ctxt_block = cmdq_buf.buf;
129 	sq_ctxt = sq_ctxt_block->sq_ctxt;
130 
131 	hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
132 				num_sqs, func_to_io->max_qps);
133 	for (i = 0; i < num_sqs; i++) {
134 		qp = &func_to_io->qps[i];
135 
136 		hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
137 				      base_qpn + qp->q_id);
138 	}
139 
140 	cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
141 
142 	err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
143 				     IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
144 				     &out_param);
145 	if ((err) || (out_param != 0)) {
146 		dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
147 		err = -EFAULT;
148 	}
149 
150 	hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
151 	return err;
152 }
153 
154 static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
155 			  u16 num_rqs)
156 {
157 	struct hinic_hwif *hwif = func_to_io->hwif;
158 	struct hinic_rq_ctxt_block *rq_ctxt_block;
159 	struct pci_dev *pdev = hwif->pdev;
160 	struct hinic_cmdq_buf cmdq_buf;
161 	struct hinic_rq_ctxt *rq_ctxt;
162 	struct hinic_qp *qp;
163 	u64 out_param;
164 	int err, i;
165 
166 	err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
167 	if (err) {
168 		dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
169 		return err;
170 	}
171 
172 	rq_ctxt_block = cmdq_buf.buf;
173 	rq_ctxt = rq_ctxt_block->rq_ctxt;
174 
175 	hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
176 				num_rqs, func_to_io->max_qps);
177 	for (i = 0; i < num_rqs; i++) {
178 		qp = &func_to_io->qps[i];
179 
180 		hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
181 				      base_qpn + qp->q_id);
182 	}
183 
184 	cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
185 
186 	err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
187 				     IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
188 				     &out_param);
189 	if ((err) || (out_param != 0)) {
190 		dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
191 		err = -EFAULT;
192 	}
193 
194 	hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
195 	return err;
196 }
197 
198 /**
199  * write_qp_ctxts - write the qp ctxt to HW
200  * @func_to_io: func to io channel that holds the IO components
201  * @base_qpn: first qp number
202  * @num_qps: number of qps to write
203  *
204  * Return 0 - Success, negative - Failure
205  **/
206 static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
207 			  u16 num_qps)
208 {
209 	return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
210 		write_rq_ctxts(func_to_io, base_qpn, num_qps));
211 }
212 
213 /**
214  * init_qp - Initialize a Queue Pair
215  * @func_to_io: func to io channel that holds the IO components
216  * @qp: pointer to the qp to initialize
217  * @q_id: the id of the qp
218  * @sq_msix_entry: msix entry for sq
219  * @rq_msix_entry: msix entry for rq
220  *
221  * Return 0 - Success, negative - Failure
222  **/
223 static int init_qp(struct hinic_func_to_io *func_to_io,
224 		   struct hinic_qp *qp, int q_id,
225 		   struct msix_entry *sq_msix_entry,
226 		   struct msix_entry *rq_msix_entry)
227 {
228 	struct hinic_hwif *hwif = func_to_io->hwif;
229 	struct pci_dev *pdev = hwif->pdev;
230 	void __iomem *db_base;
231 	int err;
232 
233 	qp->q_id = q_id;
234 
235 	err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
236 				HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
237 				HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
238 	if (err) {
239 		dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
240 		return err;
241 	}
242 
243 	err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
244 				HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
245 				HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
246 	if (err) {
247 		dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
248 		goto err_rq_alloc;
249 	}
250 
251 	db_base = get_db_area(func_to_io);
252 	if (IS_ERR(db_base)) {
253 		dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
254 		err = PTR_ERR(db_base);
255 		goto err_get_db;
256 	}
257 
258 	func_to_io->sq_db[q_id] = db_base;
259 
260 	err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
261 			    sq_msix_entry,
262 			    CI_ADDR(func_to_io->ci_addr_base, q_id),
263 			    CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
264 	if (err) {
265 		dev_err(&pdev->dev, "Failed to init SQ\n");
266 		goto err_sq_init;
267 	}
268 
269 	err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
270 			    rq_msix_entry);
271 	if (err) {
272 		dev_err(&pdev->dev, "Failed to init RQ\n");
273 		goto err_rq_init;
274 	}
275 
276 	return 0;
277 
278 err_rq_init:
279 	hinic_clean_sq(&qp->sq);
280 
281 err_sq_init:
282 	return_db_area(func_to_io, db_base);
283 
284 err_get_db:
285 	hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
286 
287 err_rq_alloc:
288 	hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
289 	return err;
290 }
291 
292 /**
293  * destroy_qp - Clean the resources of a Queue Pair
294  * @func_to_io: func to io channel that holds the IO components
295  * @qp: pointer to the qp to clean
296  **/
297 static void destroy_qp(struct hinic_func_to_io *func_to_io,
298 		       struct hinic_qp *qp)
299 {
300 	int q_id = qp->q_id;
301 
302 	hinic_clean_rq(&qp->rq);
303 	hinic_clean_sq(&qp->sq);
304 
305 	return_db_area(func_to_io, func_to_io->sq_db[q_id]);
306 
307 	hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
308 	hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
309 }
310 
311 /**
312  * hinic_io_create_qps - Create Queue Pairs
313  * @func_to_io: func to io channel that holds the IO components
314  * @base_qpn: base qp number
315  * @num_qps: number queue pairs to create
316  * @sq_msix_entry: msix entries for sq
317  * @rq_msix_entry: msix entries for rq
318  *
319  * Return 0 - Success, negative - Failure
320  **/
321 int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
322 			u16 base_qpn, int num_qps,
323 			struct msix_entry *sq_msix_entries,
324 			struct msix_entry *rq_msix_entries)
325 {
326 	struct hinic_hwif *hwif = func_to_io->hwif;
327 	struct pci_dev *pdev = hwif->pdev;
328 	size_t qps_size, wq_size, db_size;
329 	void *ci_addr_base;
330 	int i, j, err;
331 
332 	qps_size = num_qps * sizeof(*func_to_io->qps);
333 	func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
334 	if (!func_to_io->qps)
335 		return -ENOMEM;
336 
337 	wq_size = num_qps * sizeof(*func_to_io->sq_wq);
338 	func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
339 	if (!func_to_io->sq_wq) {
340 		err = -ENOMEM;
341 		goto err_sq_wq;
342 	}
343 
344 	wq_size = num_qps * sizeof(*func_to_io->rq_wq);
345 	func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
346 	if (!func_to_io->rq_wq) {
347 		err = -ENOMEM;
348 		goto err_rq_wq;
349 	}
350 
351 	db_size = num_qps * sizeof(*func_to_io->sq_db);
352 	func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
353 	if (!func_to_io->sq_db) {
354 		err = -ENOMEM;
355 		goto err_sq_db;
356 	}
357 
358 	ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
359 					   &func_to_io->ci_dma_base,
360 					   GFP_KERNEL);
361 	if (!ci_addr_base) {
362 		dev_err(&pdev->dev, "Failed to allocate CI area\n");
363 		err = -ENOMEM;
364 		goto err_ci_base;
365 	}
366 
367 	func_to_io->ci_addr_base = ci_addr_base;
368 
369 	for (i = 0; i < num_qps; i++) {
370 		err = init_qp(func_to_io, &func_to_io->qps[i], i,
371 			      &sq_msix_entries[i], &rq_msix_entries[i]);
372 		if (err) {
373 			dev_err(&pdev->dev, "Failed to create QP %d\n", i);
374 			goto err_init_qp;
375 		}
376 	}
377 
378 	err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
379 	if (err) {
380 		dev_err(&pdev->dev, "Failed to init QP ctxts\n");
381 		goto err_write_qp_ctxts;
382 	}
383 
384 	return 0;
385 
386 err_write_qp_ctxts:
387 err_init_qp:
388 	for (j = 0; j < i; j++)
389 		destroy_qp(func_to_io, &func_to_io->qps[j]);
390 
391 	dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
392 			  func_to_io->ci_addr_base, func_to_io->ci_dma_base);
393 
394 err_ci_base:
395 	devm_kfree(&pdev->dev, func_to_io->sq_db);
396 
397 err_sq_db:
398 	devm_kfree(&pdev->dev, func_to_io->rq_wq);
399 
400 err_rq_wq:
401 	devm_kfree(&pdev->dev, func_to_io->sq_wq);
402 
403 err_sq_wq:
404 	devm_kfree(&pdev->dev, func_to_io->qps);
405 	return err;
406 }
407 
408 /**
409  * hinic_io_destroy_qps - Destroy the IO Queue Pairs
410  * @func_to_io: func to io channel that holds the IO components
411  * @num_qps: number queue pairs to destroy
412  **/
413 void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
414 {
415 	struct hinic_hwif *hwif = func_to_io->hwif;
416 	struct pci_dev *pdev = hwif->pdev;
417 	size_t ci_table_size;
418 	int i;
419 
420 	ci_table_size = CI_TABLE_SIZE(num_qps);
421 
422 	for (i = 0; i < num_qps; i++)
423 		destroy_qp(func_to_io, &func_to_io->qps[i]);
424 
425 	dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
426 			  func_to_io->ci_dma_base);
427 
428 	devm_kfree(&pdev->dev, func_to_io->sq_db);
429 
430 	devm_kfree(&pdev->dev, func_to_io->rq_wq);
431 	devm_kfree(&pdev->dev, func_to_io->sq_wq);
432 
433 	devm_kfree(&pdev->dev, func_to_io->qps);
434 }
435 
436 /**
437  * hinic_io_init - Initialize the IO components
438  * @func_to_io: func to io channel that holds the IO components
439  * @hwif: HW interface for accessing IO
440  * @max_qps: maximum QPs in HW
441  * @num_ceqs: number completion event queues
442  * @ceq_msix_entries: msix entries for ceqs
443  *
444  * Return 0 - Success, negative - Failure
445  **/
446 int hinic_io_init(struct hinic_func_to_io *func_to_io,
447 		  struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
448 		  struct msix_entry *ceq_msix_entries)
449 {
450 	struct pci_dev *pdev = hwif->pdev;
451 	enum hinic_cmdq_type cmdq, type;
452 	void __iomem *db_area;
453 	int err;
454 
455 	func_to_io->hwif = hwif;
456 	func_to_io->qps = NULL;
457 	func_to_io->max_qps = max_qps;
458 
459 	err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
460 			      HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
461 			      ceq_msix_entries);
462 	if (err) {
463 		dev_err(&pdev->dev, "Failed to init CEQs\n");
464 		return err;
465 	}
466 
467 	err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
468 	if (err) {
469 		dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
470 		goto err_wqs_alloc;
471 	}
472 
473 	func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
474 	if (!func_to_io->db_base) {
475 		dev_err(&pdev->dev, "Failed to remap IO DB area\n");
476 		err = -ENOMEM;
477 		goto err_db_ioremap;
478 	}
479 
480 	init_db_area_idx(&func_to_io->free_db_area);
481 
482 	for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
483 		db_area = get_db_area(func_to_io);
484 		if (IS_ERR(db_area)) {
485 			dev_err(&pdev->dev, "Failed to get cmdq db area\n");
486 			err = PTR_ERR(db_area);
487 			goto err_db_area;
488 		}
489 
490 		func_to_io->cmdq_db_area[cmdq] = db_area;
491 	}
492 
493 	err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
494 			       func_to_io->cmdq_db_area);
495 	if (err) {
496 		dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
497 		goto err_init_cmdqs;
498 	}
499 
500 	return 0;
501 
502 err_init_cmdqs:
503 err_db_area:
504 	for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
505 		return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
506 
507 	iounmap(func_to_io->db_base);
508 
509 err_db_ioremap:
510 	hinic_wqs_free(&func_to_io->wqs);
511 
512 err_wqs_alloc:
513 	hinic_ceqs_free(&func_to_io->ceqs);
514 	return err;
515 }
516 
517 /**
518  * hinic_io_free - Free the IO components
519  * @func_to_io: func to io channel that holds the IO components
520  **/
521 void hinic_io_free(struct hinic_func_to_io *func_to_io)
522 {
523 	enum hinic_cmdq_type cmdq;
524 
525 	hinic_free_cmdqs(&func_to_io->cmdqs);
526 
527 	for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
528 		return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
529 
530 	iounmap(func_to_io->db_base);
531 	hinic_wqs_free(&func_to_io->wqs);
532 	hinic_ceqs_free(&func_to_io->ceqs);
533 }
534