xref: /linux/sound/soc/sh/rcar/dma.c (revision 307797159ac25fe5a2048bf5c6a5718298edca57)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Renesas R-Car Audio DMAC support
4 //
5 // Copyright (C) 2015 Renesas Electronics Corp.
6 // Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
7 
8 #include <linux/delay.h>
9 #include <linux/of_dma.h>
10 #include "rsnd.h"
11 
12 /*
13  * Audio DMAC peri peri register
14  */
15 #define PDMASAR		0x00
16 #define PDMADAR		0x04
17 #define PDMACHCR	0x0c
18 
19 /* PDMACHCR */
20 #define PDMACHCR_DE		(1 << 0)
21 
22 
23 struct rsnd_dmaen {
24 	struct dma_chan		*chan;
25 	dma_cookie_t		cookie;
26 	unsigned int		dma_len;
27 };
28 
29 struct rsnd_dmapp {
30 	int			dmapp_id;
31 	u32			chcr;
32 };
33 
34 struct rsnd_dma {
35 	struct rsnd_mod		mod;
36 	struct rsnd_mod		*mod_from;
37 	struct rsnd_mod		*mod_to;
38 	dma_addr_t		src_addr;
39 	dma_addr_t		dst_addr;
40 	union {
41 		struct rsnd_dmaen en;
42 		struct rsnd_dmapp pp;
43 	} dma;
44 };
45 
46 struct rsnd_dma_ctrl {
47 	void __iomem *base;
48 	int dmaen_num;
49 	int dmapp_num;
50 };
51 
52 #define rsnd_priv_to_dmac(p)	((struct rsnd_dma_ctrl *)(p)->dma)
53 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
54 #define rsnd_dma_to_dmaen(dma)	(&(dma)->dma.en)
55 #define rsnd_dma_to_dmapp(dma)	(&(dma)->dma.pp)
56 
57 /* for DEBUG */
58 static struct rsnd_mod_ops mem_ops = {
59 	.name = "mem",
60 };
61 
62 static struct rsnd_mod mem = {
63 };
64 
65 /*
66  *		Audio DMAC
67  */
68 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
69 				  struct rsnd_dai_stream *io)
70 {
71 	if (rsnd_io_is_working(io))
72 		rsnd_dai_period_elapsed(io);
73 }
74 
75 static void rsnd_dmaen_complete(void *data)
76 {
77 	struct rsnd_mod *mod = data;
78 
79 	rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
80 }
81 
82 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
83 						   struct rsnd_mod *mod_from,
84 						   struct rsnd_mod *mod_to)
85 {
86 	if ((!mod_from && !mod_to) ||
87 	    (mod_from && mod_to))
88 		return NULL;
89 
90 	if (mod_from)
91 		return rsnd_mod_dma_req(io, mod_from);
92 	else
93 		return rsnd_mod_dma_req(io, mod_to);
94 }
95 
96 static int rsnd_dmaen_stop(struct rsnd_mod *mod,
97 			   struct rsnd_dai_stream *io,
98 			   struct rsnd_priv *priv)
99 {
100 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
101 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
102 
103 	if (dmaen->chan)
104 		dmaengine_terminate_all(dmaen->chan);
105 
106 	return 0;
107 }
108 
109 static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
110 				   struct rsnd_dai_stream *io,
111 				   struct rsnd_priv *priv)
112 {
113 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
114 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
115 
116 	/*
117 	 * DMAEngine release uses mutex lock.
118 	 * Thus, it shouldn't be called under spinlock.
119 	 * Let's call it under nolock_start
120 	 */
121 	if (dmaen->chan)
122 		dma_release_channel(dmaen->chan);
123 
124 	dmaen->chan = NULL;
125 
126 	return 0;
127 }
128 
129 static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
130 			    struct rsnd_dai_stream *io,
131 			    struct rsnd_priv *priv)
132 {
133 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
134 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
135 	struct device *dev = rsnd_priv_to_dev(priv);
136 
137 	if (dmaen->chan) {
138 		dev_err(dev, "it already has dma channel\n");
139 		return -EIO;
140 	}
141 
142 	/*
143 	 * DMAEngine request uses mutex lock.
144 	 * Thus, it shouldn't be called under spinlock.
145 	 * Let's call it under nolock_start
146 	 */
147 	dmaen->chan = rsnd_dmaen_request_channel(io,
148 						 dma->mod_from,
149 						 dma->mod_to);
150 	if (IS_ERR_OR_NULL(dmaen->chan)) {
151 		dmaen->chan = NULL;
152 		dev_err(dev, "can't get dma channel\n");
153 		return -EIO;
154 	}
155 
156 	return 0;
157 }
158 
159 static int rsnd_dmaen_start(struct rsnd_mod *mod,
160 			    struct rsnd_dai_stream *io,
161 			    struct rsnd_priv *priv)
162 {
163 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
164 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
165 	struct snd_pcm_substream *substream = io->substream;
166 	struct device *dev = rsnd_priv_to_dev(priv);
167 	struct dma_async_tx_descriptor *desc;
168 	struct dma_slave_config cfg = {};
169 	int is_play = rsnd_io_is_play(io);
170 	int ret;
171 
172 	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
173 	cfg.src_addr	= dma->src_addr;
174 	cfg.dst_addr	= dma->dst_addr;
175 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
176 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
177 
178 	dev_dbg(dev, "%s[%d] %pad -> %pad\n",
179 		rsnd_mod_name(mod), rsnd_mod_id(mod),
180 		&cfg.src_addr, &cfg.dst_addr);
181 
182 	ret = dmaengine_slave_config(dmaen->chan, &cfg);
183 	if (ret < 0)
184 		return ret;
185 
186 	desc = dmaengine_prep_dma_cyclic(dmaen->chan,
187 					 substream->runtime->dma_addr,
188 					 snd_pcm_lib_buffer_bytes(substream),
189 					 snd_pcm_lib_period_bytes(substream),
190 					 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
191 					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
192 
193 	if (!desc) {
194 		dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
195 		return -EIO;
196 	}
197 
198 	desc->callback		= rsnd_dmaen_complete;
199 	desc->callback_param	= rsnd_mod_get(dma);
200 
201 	dmaen->dma_len		= snd_pcm_lib_buffer_bytes(substream);
202 
203 	dmaen->cookie = dmaengine_submit(desc);
204 	if (dmaen->cookie < 0) {
205 		dev_err(dev, "dmaengine_submit() fail\n");
206 		return -EIO;
207 	}
208 
209 	dma_async_issue_pending(dmaen->chan);
210 
211 	return 0;
212 }
213 
214 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
215 					  struct rsnd_mod *mod, char *name)
216 {
217 	struct dma_chan *chan = NULL;
218 	struct device_node *np;
219 	int i = 0;
220 
221 	for_each_child_of_node(of_node, np) {
222 		if (i == rsnd_mod_id(mod) && (!chan))
223 			chan = of_dma_request_slave_channel(np, name);
224 		i++;
225 	}
226 
227 	/* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
228 	of_node_put(of_node);
229 
230 	return chan;
231 }
232 
233 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
234 			   struct rsnd_dma *dma,
235 			   struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
236 {
237 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
238 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
239 	struct dma_chan *chan;
240 
241 	/* try to get DMAEngine channel */
242 	chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
243 	if (IS_ERR_OR_NULL(chan)) {
244 		/*
245 		 * DMA failed. try to PIO mode
246 		 * see
247 		 *	rsnd_ssi_fallback()
248 		 *	rsnd_rdai_continuance_probe()
249 		 */
250 		return -EAGAIN;
251 	}
252 
253 	/*
254 	 * use it for IPMMU if needed
255 	 * see
256 	 *	rsnd_preallocate_pages()
257 	 */
258 	io->dmac_dev = chan->device->dev;
259 
260 	dma_release_channel(chan);
261 
262 	dmac->dmaen_num++;
263 
264 	return 0;
265 }
266 
267 static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
268 			      struct rsnd_dai_stream *io,
269 			      snd_pcm_uframes_t *pointer)
270 {
271 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
272 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
273 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
274 	struct dma_tx_state state;
275 	enum dma_status status;
276 	unsigned int pos = 0;
277 
278 	status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
279 	if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
280 		if (state.residue > 0 && state.residue <= dmaen->dma_len)
281 			pos = dmaen->dma_len - state.residue;
282 	}
283 	*pointer = bytes_to_frames(runtime, pos);
284 
285 	return 0;
286 }
287 
288 static struct rsnd_mod_ops rsnd_dmaen_ops = {
289 	.name	= "audmac",
290 	.nolock_start = rsnd_dmaen_nolock_start,
291 	.nolock_stop  = rsnd_dmaen_nolock_stop,
292 	.start	= rsnd_dmaen_start,
293 	.stop	= rsnd_dmaen_stop,
294 	.pointer= rsnd_dmaen_pointer,
295 };
296 
297 /*
298  *		Audio DMAC peri peri
299  */
300 static const u8 gen2_id_table_ssiu[] = {
301 	0x00, /* SSI00 */
302 	0x04, /* SSI10 */
303 	0x08, /* SSI20 */
304 	0x0c, /* SSI3  */
305 	0x0d, /* SSI4  */
306 	0x0e, /* SSI5  */
307 	0x0f, /* SSI6  */
308 	0x10, /* SSI7  */
309 	0x11, /* SSI8  */
310 	0x12, /* SSI90 */
311 };
312 static const u8 gen2_id_table_scu[] = {
313 	0x2d, /* SCU_SRCI0 */
314 	0x2e, /* SCU_SRCI1 */
315 	0x2f, /* SCU_SRCI2 */
316 	0x30, /* SCU_SRCI3 */
317 	0x31, /* SCU_SRCI4 */
318 	0x32, /* SCU_SRCI5 */
319 	0x33, /* SCU_SRCI6 */
320 	0x34, /* SCU_SRCI7 */
321 	0x35, /* SCU_SRCI8 */
322 	0x36, /* SCU_SRCI9 */
323 };
324 static const u8 gen2_id_table_cmd[] = {
325 	0x37, /* SCU_CMD0 */
326 	0x38, /* SCU_CMD1 */
327 };
328 
329 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
330 			     struct rsnd_mod *mod)
331 {
332 	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
333 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
334 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
335 	const u8 *entry = NULL;
336 	int id = rsnd_mod_id(mod);
337 	int size = 0;
338 
339 	if (mod == ssi) {
340 		entry = gen2_id_table_ssiu;
341 		size = ARRAY_SIZE(gen2_id_table_ssiu);
342 	} else if (mod == src) {
343 		entry = gen2_id_table_scu;
344 		size = ARRAY_SIZE(gen2_id_table_scu);
345 	} else if (mod == dvc) {
346 		entry = gen2_id_table_cmd;
347 		size = ARRAY_SIZE(gen2_id_table_cmd);
348 	}
349 
350 	if ((!entry) || (size <= id)) {
351 		struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
352 
353 		dev_err(dev, "unknown connection (%s[%d])\n",
354 			rsnd_mod_name(mod), rsnd_mod_id(mod));
355 
356 		/* use non-prohibited SRS number as error */
357 		return 0x00; /* SSI00 */
358 	}
359 
360 	return entry[id];
361 }
362 
363 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
364 			       struct rsnd_mod *mod_from,
365 			       struct rsnd_mod *mod_to)
366 {
367 	return	(rsnd_dmapp_get_id(io, mod_from) << 24) +
368 		(rsnd_dmapp_get_id(io, mod_to) << 16);
369 }
370 
371 #define rsnd_dmapp_addr(dmac, dma, reg) \
372 	(dmac->base + 0x20 + reg + \
373 	 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
374 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
375 {
376 	struct rsnd_mod *mod = rsnd_mod_get(dma);
377 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
378 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
379 	struct device *dev = rsnd_priv_to_dev(priv);
380 
381 	dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
382 
383 	iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
384 }
385 
386 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
387 {
388 	struct rsnd_mod *mod = rsnd_mod_get(dma);
389 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
390 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
391 
392 	return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
393 }
394 
395 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
396 {
397 	struct rsnd_mod *mod = rsnd_mod_get(dma);
398 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
399 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
400 	void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
401 	u32 val = ioread32(addr);
402 
403 	val &= ~mask;
404 	val |= (data & mask);
405 
406 	iowrite32(val, addr);
407 }
408 
409 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
410 			   struct rsnd_dai_stream *io,
411 			   struct rsnd_priv *priv)
412 {
413 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
414 	int i;
415 
416 	rsnd_dmapp_bset(dma, 0,  PDMACHCR_DE, PDMACHCR);
417 
418 	for (i = 0; i < 1024; i++) {
419 		if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
420 			return 0;
421 		udelay(1);
422 	}
423 
424 	return -EIO;
425 }
426 
427 static int rsnd_dmapp_start(struct rsnd_mod *mod,
428 			    struct rsnd_dai_stream *io,
429 			    struct rsnd_priv *priv)
430 {
431 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
432 	struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
433 
434 	rsnd_dmapp_write(dma, dma->src_addr,	PDMASAR);
435 	rsnd_dmapp_write(dma, dma->dst_addr,	PDMADAR);
436 	rsnd_dmapp_write(dma, dmapp->chcr,	PDMACHCR);
437 
438 	return 0;
439 }
440 
441 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
442 			     struct rsnd_dma *dma,
443 			     struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
444 {
445 	struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
446 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
447 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
448 	struct device *dev = rsnd_priv_to_dev(priv);
449 
450 	dmapp->dmapp_id = dmac->dmapp_num;
451 	dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
452 
453 	dmac->dmapp_num++;
454 
455 	dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
456 		dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
457 
458 	return 0;
459 }
460 
461 static struct rsnd_mod_ops rsnd_dmapp_ops = {
462 	.name	= "audmac-pp",
463 	.start	= rsnd_dmapp_start,
464 	.stop	= rsnd_dmapp_stop,
465 	.quit	= rsnd_dmapp_stop,
466 };
467 
468 /*
469  *		Common DMAC Interface
470  */
471 
472 /*
473  *	DMA read/write register offset
474  *
475  *	RSND_xxx_I_N	for Audio DMAC input
476  *	RSND_xxx_O_N	for Audio DMAC output
477  *	RSND_xxx_I_P	for Audio DMAC peri peri input
478  *	RSND_xxx_O_P	for Audio DMAC peri peri output
479  *
480  *	ex) R-Car H2 case
481  *	      mod        / DMAC in    / DMAC out   / DMAC PP in / DMAC pp out
482  *	SSI : 0xec541000 / 0xec241008 / 0xec24100c
483  *	SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
484  *	SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
485  *	CMD : 0xec500000 /            / 0xec008000                0xec308000
486  */
487 #define RDMA_SSI_I_N(addr, i)	(addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
488 #define RDMA_SSI_O_N(addr, i)	(addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
489 
490 #define RDMA_SSIU_I_N(addr, i)	(addr ##_reg - 0x00441000 + (0x1000 * i))
491 #define RDMA_SSIU_O_N(addr, i)	(addr ##_reg - 0x00441000 + (0x1000 * i))
492 
493 #define RDMA_SSIU_I_P(addr, i)	(addr ##_reg - 0x00141000 + (0x1000 * i))
494 #define RDMA_SSIU_O_P(addr, i)	(addr ##_reg - 0x00141000 + (0x1000 * i))
495 
496 #define RDMA_SRC_I_N(addr, i)	(addr ##_reg - 0x00500000 + (0x400 * i))
497 #define RDMA_SRC_O_N(addr, i)	(addr ##_reg - 0x004fc000 + (0x400 * i))
498 
499 #define RDMA_SRC_I_P(addr, i)	(addr ##_reg - 0x00200000 + (0x400 * i))
500 #define RDMA_SRC_O_P(addr, i)	(addr ##_reg - 0x001fc000 + (0x400 * i))
501 
502 #define RDMA_CMD_O_N(addr, i)	(addr ##_reg - 0x004f8000 + (0x400 * i))
503 #define RDMA_CMD_O_P(addr, i)	(addr ##_reg - 0x001f8000 + (0x400 * i))
504 
505 static dma_addr_t
506 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
507 		   struct rsnd_mod *mod,
508 		   int is_play, int is_from)
509 {
510 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
511 	struct device *dev = rsnd_priv_to_dev(priv);
512 	phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
513 	phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
514 	int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
515 	int use_src = !!rsnd_io_to_mod_src(io);
516 	int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
517 		      !!rsnd_io_to_mod_mix(io) ||
518 		      !!rsnd_io_to_mod_ctu(io);
519 	int id = rsnd_mod_id(mod);
520 	struct dma_addr {
521 		dma_addr_t out_addr;
522 		dma_addr_t in_addr;
523 	} dma_addrs[3][2][3] = {
524 		/* SRC */
525 		/* Capture */
526 		{{{ 0,				0 },
527 		  { RDMA_SRC_O_N(src, id),	RDMA_SRC_I_P(src, id) },
528 		  { RDMA_CMD_O_N(src, id),	RDMA_SRC_I_P(src, id) } },
529 		 /* Playback */
530 		 {{ 0,				0, },
531 		  { RDMA_SRC_O_P(src, id),	RDMA_SRC_I_N(src, id) },
532 		  { RDMA_CMD_O_P(src, id),	RDMA_SRC_I_N(src, id) } }
533 		},
534 		/* SSI */
535 		/* Capture */
536 		{{{ RDMA_SSI_O_N(ssi, id),	0 },
537 		  { RDMA_SSIU_O_P(ssi, id),	0 },
538 		  { RDMA_SSIU_O_P(ssi, id),	0 } },
539 		 /* Playback */
540 		 {{ 0,				RDMA_SSI_I_N(ssi, id) },
541 		  { 0,				RDMA_SSIU_I_P(ssi, id) },
542 		  { 0,				RDMA_SSIU_I_P(ssi, id) } }
543 		},
544 		/* SSIU */
545 		/* Capture */
546 		{{{ RDMA_SSIU_O_N(ssi, id),	0 },
547 		  { RDMA_SSIU_O_P(ssi, id),	0 },
548 		  { RDMA_SSIU_O_P(ssi, id),	0 } },
549 		 /* Playback */
550 		 {{ 0,				RDMA_SSIU_I_N(ssi, id) },
551 		  { 0,				RDMA_SSIU_I_P(ssi, id) },
552 		  { 0,				RDMA_SSIU_I_P(ssi, id) } } },
553 	};
554 
555 	/* it shouldn't happen */
556 	if (use_cmd && !use_src)
557 		dev_err(dev, "DVC is selected without SRC\n");
558 
559 	/* use SSIU or SSI ? */
560 	if (is_ssi && rsnd_ssi_use_busif(io))
561 		is_ssi++;
562 
563 	return (is_from) ?
564 		dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
565 		dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
566 }
567 
568 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
569 				struct rsnd_mod *mod,
570 				int is_play, int is_from)
571 {
572 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
573 
574 	/*
575 	 * gen1 uses default DMA addr
576 	 */
577 	if (rsnd_is_gen1(priv))
578 		return 0;
579 
580 	if (!mod)
581 		return 0;
582 
583 	return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
584 }
585 
586 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
587 static void rsnd_dma_of_path(struct rsnd_mod *this,
588 			     struct rsnd_dai_stream *io,
589 			     int is_play,
590 			     struct rsnd_mod **mod_from,
591 			     struct rsnd_mod **mod_to)
592 {
593 	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
594 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
595 	struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
596 	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
597 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
598 	struct rsnd_mod *mod[MOD_MAX];
599 	struct rsnd_mod *mod_start, *mod_end;
600 	struct rsnd_priv *priv = rsnd_mod_to_priv(this);
601 	struct device *dev = rsnd_priv_to_dev(priv);
602 	int nr, i, idx;
603 
604 	if (!ssi)
605 		return;
606 
607 	nr = 0;
608 	for (i = 0; i < MOD_MAX; i++) {
609 		mod[i] = NULL;
610 		nr += !!rsnd_io_to_mod(io, i);
611 	}
612 
613 	/*
614 	 * [S] -*-> [E]
615 	 * [S] -*-> SRC -o-> [E]
616 	 * [S] -*-> SRC -> DVC -o-> [E]
617 	 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
618 	 *
619 	 * playback	[S] = mem
620 	 *		[E] = SSI
621 	 *
622 	 * capture	[S] = SSI
623 	 *		[E] = mem
624 	 *
625 	 * -*->		Audio DMAC
626 	 * -o->		Audio DMAC peri peri
627 	 */
628 	mod_start	= (is_play) ? NULL : ssi;
629 	mod_end		= (is_play) ? ssi  : NULL;
630 
631 	idx = 0;
632 	mod[idx++] = mod_start;
633 	for (i = 1; i < nr; i++) {
634 		if (src) {
635 			mod[idx++] = src;
636 			src = NULL;
637 		} else if (ctu) {
638 			mod[idx++] = ctu;
639 			ctu = NULL;
640 		} else if (mix) {
641 			mod[idx++] = mix;
642 			mix = NULL;
643 		} else if (dvc) {
644 			mod[idx++] = dvc;
645 			dvc = NULL;
646 		}
647 	}
648 	mod[idx] = mod_end;
649 
650 	/*
651 	 *		| SSI | SRC |
652 	 * -------------+-----+-----+
653 	 *  is_play	|  o  |  *  |
654 	 * !is_play	|  *  |  o  |
655 	 */
656 	if ((this == ssi) == (is_play)) {
657 		*mod_from	= mod[idx - 1];
658 		*mod_to		= mod[idx];
659 	} else {
660 		*mod_from	= mod[0];
661 		*mod_to		= mod[1];
662 	}
663 
664 	dev_dbg(dev, "module connection (this is %s[%d])\n",
665 		rsnd_mod_name(this), rsnd_mod_id(this));
666 	for (i = 0; i <= idx; i++) {
667 		dev_dbg(dev, "  %s[%d]%s\n",
668 			rsnd_mod_name(mod[i] ? mod[i] : &mem),
669 			rsnd_mod_id  (mod[i] ? mod[i] : &mem),
670 			(mod[i] == *mod_from) ? " from" :
671 			(mod[i] == *mod_to)   ? " to" : "");
672 	}
673 }
674 
675 static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
676 			  struct rsnd_mod **dma_mod)
677 {
678 	struct rsnd_mod *mod_from = NULL;
679 	struct rsnd_mod *mod_to = NULL;
680 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
681 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
682 	struct device *dev = rsnd_priv_to_dev(priv);
683 	struct rsnd_dma *dma;
684 	struct rsnd_mod_ops *ops;
685 	enum rsnd_mod_type type;
686 	int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
687 		      struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
688 	int is_play = rsnd_io_is_play(io);
689 	int ret, dma_id;
690 
691 	/*
692 	 * DMA failed. try to PIO mode
693 	 * see
694 	 *	rsnd_ssi_fallback()
695 	 *	rsnd_rdai_continuance_probe()
696 	 */
697 	if (!dmac)
698 		return -EAGAIN;
699 
700 	rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
701 
702 	/* for Gen2 or later */
703 	if (mod_from && mod_to) {
704 		ops	= &rsnd_dmapp_ops;
705 		attach	= rsnd_dmapp_attach;
706 		dma_id	= dmac->dmapp_num;
707 		type	= RSND_MOD_AUDMAPP;
708 	} else {
709 		ops	= &rsnd_dmaen_ops;
710 		attach	= rsnd_dmaen_attach;
711 		dma_id	= dmac->dmaen_num;
712 		type	= RSND_MOD_AUDMA;
713 	}
714 
715 	/* for Gen1, overwrite */
716 	if (rsnd_is_gen1(priv)) {
717 		ops	= &rsnd_dmaen_ops;
718 		attach	= rsnd_dmaen_attach;
719 		dma_id	= dmac->dmaen_num;
720 		type	= RSND_MOD_AUDMA;
721 	}
722 
723 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
724 	if (!dma)
725 		return -ENOMEM;
726 
727 	*dma_mod = rsnd_mod_get(dma);
728 
729 	ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
730 			    rsnd_mod_get_status, type, dma_id);
731 	if (ret < 0)
732 		return ret;
733 
734 	dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
735 		rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
736 		rsnd_mod_name(mod_from ? mod_from : &mem),
737 		rsnd_mod_id  (mod_from ? mod_from : &mem),
738 		rsnd_mod_name(mod_to   ? mod_to   : &mem),
739 		rsnd_mod_id  (mod_to   ? mod_to   : &mem));
740 
741 	ret = attach(io, dma, mod_from, mod_to);
742 	if (ret < 0)
743 		return ret;
744 
745 	dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
746 	dma->dst_addr = rsnd_dma_addr(io, mod_to,   is_play, 0);
747 	dma->mod_from = mod_from;
748 	dma->mod_to   = mod_to;
749 
750 	return 0;
751 }
752 
753 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
754 		    struct rsnd_mod **dma_mod)
755 {
756 	if (!(*dma_mod)) {
757 		int ret = rsnd_dma_alloc(io, mod, dma_mod);
758 
759 		if (ret < 0)
760 			return ret;
761 	}
762 
763 	return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
764 }
765 
766 int rsnd_dma_probe(struct rsnd_priv *priv)
767 {
768 	struct platform_device *pdev = rsnd_priv_to_pdev(priv);
769 	struct device *dev = rsnd_priv_to_dev(priv);
770 	struct rsnd_dma_ctrl *dmac;
771 	struct resource *res;
772 
773 	/*
774 	 * for Gen1
775 	 */
776 	if (rsnd_is_gen1(priv))
777 		return 0;
778 
779 	/*
780 	 * for Gen2 or later
781 	 */
782 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
783 	dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
784 	if (!dmac || !res) {
785 		dev_err(dev, "dma allocate failed\n");
786 		return 0; /* it will be PIO mode */
787 	}
788 
789 	dmac->dmapp_num = 0;
790 	dmac->base = devm_ioremap_resource(dev, res);
791 	if (IS_ERR(dmac->base))
792 		return PTR_ERR(dmac->base);
793 
794 	priv->dma = dmac;
795 
796 	/* dummy mem mod for debug */
797 	return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, NULL, 0, 0);
798 }
799