xref: /linux/drivers/spi/spi-ti-qspi.c (revision 58f6259b7a08f8d47d4629609703d358b042f0fd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI QSPI driver
4  *
5  * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
6  * Author: Sourav Poddar <sourav.poddar@ti.com>
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/omap-dma.h>
18 #include <linux/platform_device.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
21 #include <linux/io.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/pinctrl/consumer.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/regmap.h>
29 #include <linux/sizes.h>
30 
31 #include <linux/spi/spi.h>
32 #include <linux/spi/spi-mem.h>
33 
34 struct ti_qspi_regs {
35 	u32 clkctrl;
36 };
37 
38 struct ti_qspi {
39 	struct completion	transfer_complete;
40 
41 	/* list synchronization */
42 	struct mutex            list_lock;
43 
44 	struct spi_master	*master;
45 	void __iomem            *base;
46 	void __iomem            *mmap_base;
47 	size_t			mmap_size;
48 	struct regmap		*ctrl_base;
49 	unsigned int		ctrl_reg;
50 	struct clk		*fclk;
51 	struct device           *dev;
52 
53 	struct ti_qspi_regs     ctx_reg;
54 
55 	dma_addr_t		mmap_phys_base;
56 	dma_addr_t		rx_bb_dma_addr;
57 	void			*rx_bb_addr;
58 	struct dma_chan		*rx_chan;
59 
60 	u32 cmd;
61 	u32 dc;
62 
63 	bool mmap_enabled;
64 	int current_cs;
65 };
66 
67 #define QSPI_PID			(0x0)
68 #define QSPI_SYSCONFIG			(0x10)
69 #define QSPI_SPI_CLOCK_CNTRL_REG	(0x40)
70 #define QSPI_SPI_DC_REG			(0x44)
71 #define QSPI_SPI_CMD_REG		(0x48)
72 #define QSPI_SPI_STATUS_REG		(0x4c)
73 #define QSPI_SPI_DATA_REG		(0x50)
74 #define QSPI_SPI_SETUP_REG(n)		((0x54 + 4 * n))
75 #define QSPI_SPI_SWITCH_REG		(0x64)
76 #define QSPI_SPI_DATA_REG_1		(0x68)
77 #define QSPI_SPI_DATA_REG_2		(0x6c)
78 #define QSPI_SPI_DATA_REG_3		(0x70)
79 
80 #define QSPI_COMPLETION_TIMEOUT		msecs_to_jiffies(2000)
81 
82 /* Clock Control */
83 #define QSPI_CLK_EN			(1 << 31)
84 #define QSPI_CLK_DIV_MAX		0xffff
85 
86 /* Command */
87 #define QSPI_EN_CS(n)			(n << 28)
88 #define QSPI_WLEN(n)			((n - 1) << 19)
89 #define QSPI_3_PIN			(1 << 18)
90 #define QSPI_RD_SNGL			(1 << 16)
91 #define QSPI_WR_SNGL			(2 << 16)
92 #define QSPI_RD_DUAL			(3 << 16)
93 #define QSPI_RD_QUAD			(7 << 16)
94 #define QSPI_INVAL			(4 << 16)
95 #define QSPI_FLEN(n)			((n - 1) << 0)
96 #define QSPI_WLEN_MAX_BITS		128
97 #define QSPI_WLEN_MAX_BYTES		16
98 #define QSPI_WLEN_MASK			QSPI_WLEN(QSPI_WLEN_MAX_BITS)
99 
100 /* STATUS REGISTER */
101 #define BUSY				0x01
102 #define WC				0x02
103 
104 /* Device Control */
105 #define QSPI_DD(m, n)			(m << (3 + n * 8))
106 #define QSPI_CKPHA(n)			(1 << (2 + n * 8))
107 #define QSPI_CSPOL(n)			(1 << (1 + n * 8))
108 #define QSPI_CKPOL(n)			(1 << (n * 8))
109 
110 #define	QSPI_FRAME			4096
111 
112 #define QSPI_AUTOSUSPEND_TIMEOUT         2000
113 
114 #define MEM_CS_EN(n)			((n + 1) << 8)
115 #define MEM_CS_MASK			(7 << 8)
116 
117 #define MM_SWITCH			0x1
118 
119 #define QSPI_SETUP_RD_NORMAL		(0x0 << 12)
120 #define QSPI_SETUP_RD_DUAL		(0x1 << 12)
121 #define QSPI_SETUP_RD_QUAD		(0x3 << 12)
122 #define QSPI_SETUP_ADDR_SHIFT		8
123 #define QSPI_SETUP_DUMMY_SHIFT		10
124 
125 #define QSPI_DMA_BUFFER_SIZE            SZ_64K
126 
127 static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
128 		unsigned long reg)
129 {
130 	return readl(qspi->base + reg);
131 }
132 
133 static inline void ti_qspi_write(struct ti_qspi *qspi,
134 		unsigned long val, unsigned long reg)
135 {
136 	writel(val, qspi->base + reg);
137 }
138 
139 static int ti_qspi_setup(struct spi_device *spi)
140 {
141 	struct ti_qspi	*qspi = spi_master_get_devdata(spi->master);
142 	int ret;
143 
144 	if (spi->master->busy) {
145 		dev_dbg(qspi->dev, "master busy doing other transfers\n");
146 		return -EBUSY;
147 	}
148 
149 	if (!qspi->master->max_speed_hz) {
150 		dev_err(qspi->dev, "spi max frequency not defined\n");
151 		return -EINVAL;
152 	}
153 
154 	spi->max_speed_hz = min(spi->max_speed_hz, qspi->master->max_speed_hz);
155 
156 	ret = pm_runtime_resume_and_get(qspi->dev);
157 	if (ret < 0) {
158 		dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
159 		return ret;
160 	}
161 
162 	pm_runtime_mark_last_busy(qspi->dev);
163 	ret = pm_runtime_put_autosuspend(qspi->dev);
164 	if (ret < 0) {
165 		dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
166 		return ret;
167 	}
168 
169 	return 0;
170 }
171 
172 static void ti_qspi_setup_clk(struct ti_qspi *qspi, u32 speed_hz)
173 {
174 	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
175 	int clk_div;
176 	u32 clk_ctrl_reg, clk_rate, clk_ctrl_new;
177 
178 	clk_rate = clk_get_rate(qspi->fclk);
179 	clk_div = DIV_ROUND_UP(clk_rate, speed_hz) - 1;
180 	clk_div = clamp(clk_div, 0, QSPI_CLK_DIV_MAX);
181 	dev_dbg(qspi->dev, "hz: %d, clock divider %d\n", speed_hz, clk_div);
182 
183 	pm_runtime_resume_and_get(qspi->dev);
184 
185 	clk_ctrl_new = QSPI_CLK_EN | clk_div;
186 	if (ctx_reg->clkctrl != clk_ctrl_new) {
187 		clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
188 
189 		clk_ctrl_reg &= ~QSPI_CLK_EN;
190 
191 		/* disable SCLK */
192 		ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
193 
194 		/* enable SCLK */
195 		ti_qspi_write(qspi, clk_ctrl_new, QSPI_SPI_CLOCK_CNTRL_REG);
196 		ctx_reg->clkctrl = clk_ctrl_new;
197 	}
198 
199 	pm_runtime_mark_last_busy(qspi->dev);
200 	pm_runtime_put_autosuspend(qspi->dev);
201 }
202 
203 static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
204 {
205 	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
206 
207 	ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
208 }
209 
210 static inline u32 qspi_is_busy(struct ti_qspi *qspi)
211 {
212 	u32 stat;
213 	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
214 
215 	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
216 	while ((stat & BUSY) && time_after(timeout, jiffies)) {
217 		cpu_relax();
218 		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
219 	}
220 
221 	WARN(stat & BUSY, "qspi busy\n");
222 	return stat & BUSY;
223 }
224 
225 static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
226 {
227 	u32 stat;
228 	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
229 
230 	do {
231 		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
232 		if (stat & WC)
233 			return 0;
234 		cpu_relax();
235 	} while (time_after(timeout, jiffies));
236 
237 	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
238 	if (stat & WC)
239 		return 0;
240 	return  -ETIMEDOUT;
241 }
242 
243 static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
244 			  int count)
245 {
246 	int wlen, xfer_len;
247 	unsigned int cmd;
248 	const u8 *txbuf;
249 	u32 data;
250 
251 	txbuf = t->tx_buf;
252 	cmd = qspi->cmd | QSPI_WR_SNGL;
253 	wlen = t->bits_per_word >> 3;	/* in bytes */
254 	xfer_len = wlen;
255 
256 	while (count) {
257 		if (qspi_is_busy(qspi))
258 			return -EBUSY;
259 
260 		switch (wlen) {
261 		case 1:
262 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
263 					cmd, qspi->dc, *txbuf);
264 			if (count >= QSPI_WLEN_MAX_BYTES) {
265 				u32 *txp = (u32 *)txbuf;
266 
267 				data = cpu_to_be32(*txp++);
268 				writel(data, qspi->base +
269 				       QSPI_SPI_DATA_REG_3);
270 				data = cpu_to_be32(*txp++);
271 				writel(data, qspi->base +
272 				       QSPI_SPI_DATA_REG_2);
273 				data = cpu_to_be32(*txp++);
274 				writel(data, qspi->base +
275 				       QSPI_SPI_DATA_REG_1);
276 				data = cpu_to_be32(*txp++);
277 				writel(data, qspi->base +
278 				       QSPI_SPI_DATA_REG);
279 				xfer_len = QSPI_WLEN_MAX_BYTES;
280 				cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
281 			} else {
282 				writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
283 				cmd = qspi->cmd | QSPI_WR_SNGL;
284 				xfer_len = wlen;
285 				cmd |= QSPI_WLEN(wlen);
286 			}
287 			break;
288 		case 2:
289 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
290 					cmd, qspi->dc, *txbuf);
291 			writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
292 			break;
293 		case 4:
294 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
295 					cmd, qspi->dc, *txbuf);
296 			writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
297 			break;
298 		}
299 
300 		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
301 		if (ti_qspi_poll_wc(qspi)) {
302 			dev_err(qspi->dev, "write timed out\n");
303 			return -ETIMEDOUT;
304 		}
305 		txbuf += xfer_len;
306 		count -= xfer_len;
307 	}
308 
309 	return 0;
310 }
311 
312 static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
313 			 int count)
314 {
315 	int wlen;
316 	unsigned int cmd;
317 	u32 rx;
318 	u8 rxlen, rx_wlen;
319 	u8 *rxbuf;
320 
321 	rxbuf = t->rx_buf;
322 	cmd = qspi->cmd;
323 	switch (t->rx_nbits) {
324 	case SPI_NBITS_DUAL:
325 		cmd |= QSPI_RD_DUAL;
326 		break;
327 	case SPI_NBITS_QUAD:
328 		cmd |= QSPI_RD_QUAD;
329 		break;
330 	default:
331 		cmd |= QSPI_RD_SNGL;
332 		break;
333 	}
334 	wlen = t->bits_per_word >> 3;	/* in bytes */
335 	rx_wlen = wlen;
336 
337 	while (count) {
338 		dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
339 		if (qspi_is_busy(qspi))
340 			return -EBUSY;
341 
342 		switch (wlen) {
343 		case 1:
344 			/*
345 			 * Optimize the 8-bit words transfers, as used by
346 			 * the SPI flash devices.
347 			 */
348 			if (count >= QSPI_WLEN_MAX_BYTES) {
349 				rxlen = QSPI_WLEN_MAX_BYTES;
350 			} else {
351 				rxlen = min(count, 4);
352 			}
353 			rx_wlen = rxlen << 3;
354 			cmd &= ~QSPI_WLEN_MASK;
355 			cmd |= QSPI_WLEN(rx_wlen);
356 			break;
357 		default:
358 			rxlen = wlen;
359 			break;
360 		}
361 
362 		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
363 		if (ti_qspi_poll_wc(qspi)) {
364 			dev_err(qspi->dev, "read timed out\n");
365 			return -ETIMEDOUT;
366 		}
367 
368 		switch (wlen) {
369 		case 1:
370 			/*
371 			 * Optimize the 8-bit words transfers, as used by
372 			 * the SPI flash devices.
373 			 */
374 			if (count >= QSPI_WLEN_MAX_BYTES) {
375 				u32 *rxp = (u32 *) rxbuf;
376 				rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
377 				*rxp++ = be32_to_cpu(rx);
378 				rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
379 				*rxp++ = be32_to_cpu(rx);
380 				rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
381 				*rxp++ = be32_to_cpu(rx);
382 				rx = readl(qspi->base + QSPI_SPI_DATA_REG);
383 				*rxp++ = be32_to_cpu(rx);
384 			} else {
385 				u8 *rxp = rxbuf;
386 				rx = readl(qspi->base + QSPI_SPI_DATA_REG);
387 				if (rx_wlen >= 8)
388 					*rxp++ = rx >> (rx_wlen - 8);
389 				if (rx_wlen >= 16)
390 					*rxp++ = rx >> (rx_wlen - 16);
391 				if (rx_wlen >= 24)
392 					*rxp++ = rx >> (rx_wlen - 24);
393 				if (rx_wlen >= 32)
394 					*rxp++ = rx;
395 			}
396 			break;
397 		case 2:
398 			*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
399 			break;
400 		case 4:
401 			*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
402 			break;
403 		}
404 		rxbuf += rxlen;
405 		count -= rxlen;
406 	}
407 
408 	return 0;
409 }
410 
411 static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
412 			     int count)
413 {
414 	int ret;
415 
416 	if (t->tx_buf) {
417 		ret = qspi_write_msg(qspi, t, count);
418 		if (ret) {
419 			dev_dbg(qspi->dev, "Error while writing\n");
420 			return ret;
421 		}
422 	}
423 
424 	if (t->rx_buf) {
425 		ret = qspi_read_msg(qspi, t, count);
426 		if (ret) {
427 			dev_dbg(qspi->dev, "Error while reading\n");
428 			return ret;
429 		}
430 	}
431 
432 	return 0;
433 }
434 
435 static void ti_qspi_dma_callback(void *param)
436 {
437 	struct ti_qspi *qspi = param;
438 
439 	complete(&qspi->transfer_complete);
440 }
441 
442 static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
443 			    dma_addr_t dma_src, size_t len)
444 {
445 	struct dma_chan *chan = qspi->rx_chan;
446 	dma_cookie_t cookie;
447 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
448 	struct dma_async_tx_descriptor *tx;
449 	int ret;
450 	unsigned long time_left;
451 
452 	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
453 	if (!tx) {
454 		dev_err(qspi->dev, "device_prep_dma_memcpy error\n");
455 		return -EIO;
456 	}
457 
458 	tx->callback = ti_qspi_dma_callback;
459 	tx->callback_param = qspi;
460 	cookie = tx->tx_submit(tx);
461 	reinit_completion(&qspi->transfer_complete);
462 
463 	ret = dma_submit_error(cookie);
464 	if (ret) {
465 		dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
466 		return -EIO;
467 	}
468 
469 	dma_async_issue_pending(chan);
470 	time_left = wait_for_completion_timeout(&qspi->transfer_complete,
471 					  msecs_to_jiffies(len));
472 	if (time_left == 0) {
473 		dmaengine_terminate_sync(chan);
474 		dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
475 		return -ETIMEDOUT;
476 	}
477 
478 	return 0;
479 }
480 
481 static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs,
482 				     void *to, size_t readsize)
483 {
484 	dma_addr_t dma_src = qspi->mmap_phys_base + offs;
485 	int ret = 0;
486 
487 	/*
488 	 * Use bounce buffer as FS like jffs2, ubifs may pass
489 	 * buffers that does not belong to kernel lowmem region.
490 	 */
491 	while (readsize != 0) {
492 		size_t xfer_len = min_t(size_t, QSPI_DMA_BUFFER_SIZE,
493 					readsize);
494 
495 		ret = ti_qspi_dma_xfer(qspi, qspi->rx_bb_dma_addr,
496 				       dma_src, xfer_len);
497 		if (ret != 0)
498 			return ret;
499 		memcpy(to, qspi->rx_bb_addr, xfer_len);
500 		readsize -= xfer_len;
501 		dma_src += xfer_len;
502 		to += xfer_len;
503 	}
504 
505 	return ret;
506 }
507 
508 static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
509 			       loff_t from)
510 {
511 	struct scatterlist *sg;
512 	dma_addr_t dma_src = qspi->mmap_phys_base + from;
513 	dma_addr_t dma_dst;
514 	int i, len, ret;
515 
516 	for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
517 		dma_dst = sg_dma_address(sg);
518 		len = sg_dma_len(sg);
519 		ret = ti_qspi_dma_xfer(qspi, dma_dst, dma_src, len);
520 		if (ret)
521 			return ret;
522 		dma_src += len;
523 	}
524 
525 	return 0;
526 }
527 
528 static void ti_qspi_enable_memory_map(struct spi_device *spi)
529 {
530 	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
531 
532 	ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
533 	if (qspi->ctrl_base) {
534 		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
535 				   MEM_CS_MASK,
536 				   MEM_CS_EN(spi_get_chipselect(spi, 0)));
537 	}
538 	qspi->mmap_enabled = true;
539 	qspi->current_cs = spi_get_chipselect(spi, 0);
540 }
541 
542 static void ti_qspi_disable_memory_map(struct spi_device *spi)
543 {
544 	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
545 
546 	ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
547 	if (qspi->ctrl_base)
548 		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
549 				   MEM_CS_MASK, 0);
550 	qspi->mmap_enabled = false;
551 	qspi->current_cs = -1;
552 }
553 
554 static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
555 				    u8 data_nbits, u8 addr_width,
556 				    u8 dummy_bytes)
557 {
558 	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
559 	u32 memval = opcode;
560 
561 	switch (data_nbits) {
562 	case SPI_NBITS_QUAD:
563 		memval |= QSPI_SETUP_RD_QUAD;
564 		break;
565 	case SPI_NBITS_DUAL:
566 		memval |= QSPI_SETUP_RD_DUAL;
567 		break;
568 	default:
569 		memval |= QSPI_SETUP_RD_NORMAL;
570 		break;
571 	}
572 	memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
573 		   dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
574 	ti_qspi_write(qspi, memval,
575 		      QSPI_SPI_SETUP_REG(spi_get_chipselect(spi, 0)));
576 }
577 
578 static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
579 {
580 	struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
581 	size_t max_len;
582 
583 	if (op->data.dir == SPI_MEM_DATA_IN) {
584 		if (op->addr.val < qspi->mmap_size) {
585 			/* Limit MMIO to the mmaped region */
586 			if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
587 				max_len = qspi->mmap_size - op->addr.val;
588 				op->data.nbytes = min((size_t) op->data.nbytes,
589 						      max_len);
590 			}
591 		} else {
592 			/*
593 			 * Use fallback mode (SW generated transfers) above the
594 			 * mmaped region.
595 			 * Adjust size to comply with the QSPI max frame length.
596 			 */
597 			max_len = QSPI_FRAME;
598 			max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
599 			op->data.nbytes = min((size_t) op->data.nbytes,
600 					      max_len);
601 		}
602 	}
603 
604 	return 0;
605 }
606 
607 static int ti_qspi_exec_mem_op(struct spi_mem *mem,
608 			       const struct spi_mem_op *op)
609 {
610 	struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
611 	u32 from = 0;
612 	int ret = 0;
613 
614 	/* Only optimize read path. */
615 	if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
616 	    !op->addr.nbytes || op->addr.nbytes > 4)
617 		return -ENOTSUPP;
618 
619 	/* Address exceeds MMIO window size, fall back to regular mode. */
620 	from = op->addr.val;
621 	if (from + op->data.nbytes > qspi->mmap_size)
622 		return -ENOTSUPP;
623 
624 	mutex_lock(&qspi->list_lock);
625 
626 	if (!qspi->mmap_enabled || qspi->current_cs != spi_get_chipselect(mem->spi, 0)) {
627 		ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz);
628 		ti_qspi_enable_memory_map(mem->spi);
629 	}
630 	ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
631 				op->addr.nbytes, op->dummy.nbytes);
632 
633 	if (qspi->rx_chan) {
634 		struct sg_table sgt;
635 
636 		if (virt_addr_valid(op->data.buf.in) &&
637 		    !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
638 							&sgt)) {
639 			ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
640 			spi_controller_dma_unmap_mem_op_data(mem->spi->master,
641 							     op, &sgt);
642 		} else {
643 			ret = ti_qspi_dma_bounce_buffer(qspi, from,
644 							op->data.buf.in,
645 							op->data.nbytes);
646 		}
647 	} else {
648 		memcpy_fromio(op->data.buf.in, qspi->mmap_base + from,
649 			      op->data.nbytes);
650 	}
651 
652 	mutex_unlock(&qspi->list_lock);
653 
654 	return ret;
655 }
656 
657 static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
658 	.exec_op = ti_qspi_exec_mem_op,
659 	.adjust_op_size = ti_qspi_adjust_op_size,
660 };
661 
662 static int ti_qspi_start_transfer_one(struct spi_master *master,
663 		struct spi_message *m)
664 {
665 	struct ti_qspi *qspi = spi_master_get_devdata(master);
666 	struct spi_device *spi = m->spi;
667 	struct spi_transfer *t;
668 	int status = 0, ret;
669 	unsigned int frame_len_words, transfer_len_words;
670 	int wlen;
671 
672 	/* setup device control reg */
673 	qspi->dc = 0;
674 
675 	if (spi->mode & SPI_CPHA)
676 		qspi->dc |= QSPI_CKPHA(spi_get_chipselect(spi, 0));
677 	if (spi->mode & SPI_CPOL)
678 		qspi->dc |= QSPI_CKPOL(spi_get_chipselect(spi, 0));
679 	if (spi->mode & SPI_CS_HIGH)
680 		qspi->dc |= QSPI_CSPOL(spi_get_chipselect(spi, 0));
681 
682 	frame_len_words = 0;
683 	list_for_each_entry(t, &m->transfers, transfer_list)
684 		frame_len_words += t->len / (t->bits_per_word >> 3);
685 	frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
686 
687 	/* setup command reg */
688 	qspi->cmd = 0;
689 	qspi->cmd |= QSPI_EN_CS(spi_get_chipselect(spi, 0));
690 	qspi->cmd |= QSPI_FLEN(frame_len_words);
691 
692 	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
693 
694 	mutex_lock(&qspi->list_lock);
695 
696 	if (qspi->mmap_enabled)
697 		ti_qspi_disable_memory_map(spi);
698 
699 	list_for_each_entry(t, &m->transfers, transfer_list) {
700 		qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
701 			     QSPI_WLEN(t->bits_per_word));
702 
703 		wlen = t->bits_per_word >> 3;
704 		transfer_len_words = min(t->len / wlen, frame_len_words);
705 
706 		ti_qspi_setup_clk(qspi, t->speed_hz);
707 		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
708 		if (ret) {
709 			dev_dbg(qspi->dev, "transfer message failed\n");
710 			mutex_unlock(&qspi->list_lock);
711 			return -EINVAL;
712 		}
713 
714 		m->actual_length += transfer_len_words * wlen;
715 		frame_len_words -= transfer_len_words;
716 		if (frame_len_words == 0)
717 			break;
718 	}
719 
720 	mutex_unlock(&qspi->list_lock);
721 
722 	ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
723 	m->status = status;
724 	spi_finalize_current_message(master);
725 
726 	return status;
727 }
728 
729 static int ti_qspi_runtime_resume(struct device *dev)
730 {
731 	struct ti_qspi      *qspi;
732 
733 	qspi = dev_get_drvdata(dev);
734 	ti_qspi_restore_ctx(qspi);
735 
736 	return 0;
737 }
738 
739 static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
740 {
741 	if (qspi->rx_bb_addr)
742 		dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
743 				  qspi->rx_bb_addr,
744 				  qspi->rx_bb_dma_addr);
745 
746 	if (qspi->rx_chan)
747 		dma_release_channel(qspi->rx_chan);
748 }
749 
750 static const struct of_device_id ti_qspi_match[] = {
751 	{.compatible = "ti,dra7xxx-qspi" },
752 	{.compatible = "ti,am4372-qspi" },
753 	{},
754 };
755 MODULE_DEVICE_TABLE(of, ti_qspi_match);
756 
757 static int ti_qspi_probe(struct platform_device *pdev)
758 {
759 	struct  ti_qspi *qspi;
760 	struct spi_master *master;
761 	struct resource         *r, *res_mmap;
762 	struct device_node *np = pdev->dev.of_node;
763 	u32 max_freq;
764 	int ret = 0, num_cs, irq;
765 	dma_cap_mask_t mask;
766 
767 	master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
768 	if (!master)
769 		return -ENOMEM;
770 
771 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
772 
773 	master->flags = SPI_MASTER_HALF_DUPLEX;
774 	master->setup = ti_qspi_setup;
775 	master->auto_runtime_pm = true;
776 	master->transfer_one_message = ti_qspi_start_transfer_one;
777 	master->dev.of_node = pdev->dev.of_node;
778 	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
779 				     SPI_BPW_MASK(8);
780 	master->mem_ops = &ti_qspi_mem_ops;
781 
782 	if (!of_property_read_u32(np, "num-cs", &num_cs))
783 		master->num_chipselect = num_cs;
784 
785 	qspi = spi_master_get_devdata(master);
786 	qspi->master = master;
787 	qspi->dev = &pdev->dev;
788 	platform_set_drvdata(pdev, qspi);
789 
790 	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
791 	if (r == NULL) {
792 		r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
793 		if (r == NULL) {
794 			dev_err(&pdev->dev, "missing platform data\n");
795 			ret = -ENODEV;
796 			goto free_master;
797 		}
798 	}
799 
800 	res_mmap = platform_get_resource_byname(pdev,
801 			IORESOURCE_MEM, "qspi_mmap");
802 	if (res_mmap == NULL) {
803 		res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
804 		if (res_mmap == NULL) {
805 			dev_err(&pdev->dev,
806 				"memory mapped resource not required\n");
807 		}
808 	}
809 
810 	if (res_mmap)
811 		qspi->mmap_size = resource_size(res_mmap);
812 
813 	irq = platform_get_irq(pdev, 0);
814 	if (irq < 0) {
815 		ret = irq;
816 		goto free_master;
817 	}
818 
819 	mutex_init(&qspi->list_lock);
820 
821 	qspi->base = devm_ioremap_resource(&pdev->dev, r);
822 	if (IS_ERR(qspi->base)) {
823 		ret = PTR_ERR(qspi->base);
824 		goto free_master;
825 	}
826 
827 
828 	if (of_property_read_bool(np, "syscon-chipselects")) {
829 		qspi->ctrl_base =
830 		syscon_regmap_lookup_by_phandle(np,
831 						"syscon-chipselects");
832 		if (IS_ERR(qspi->ctrl_base)) {
833 			ret = PTR_ERR(qspi->ctrl_base);
834 			goto free_master;
835 		}
836 		ret = of_property_read_u32_index(np,
837 						 "syscon-chipselects",
838 						 1, &qspi->ctrl_reg);
839 		if (ret) {
840 			dev_err(&pdev->dev,
841 				"couldn't get ctrl_mod reg index\n");
842 			goto free_master;
843 		}
844 	}
845 
846 	qspi->fclk = devm_clk_get(&pdev->dev, "fck");
847 	if (IS_ERR(qspi->fclk)) {
848 		ret = PTR_ERR(qspi->fclk);
849 		dev_err(&pdev->dev, "could not get clk: %d\n", ret);
850 	}
851 
852 	pm_runtime_use_autosuspend(&pdev->dev);
853 	pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
854 	pm_runtime_enable(&pdev->dev);
855 
856 	if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
857 		master->max_speed_hz = max_freq;
858 
859 	dma_cap_zero(mask);
860 	dma_cap_set(DMA_MEMCPY, mask);
861 
862 	qspi->rx_chan = dma_request_chan_by_mask(&mask);
863 	if (IS_ERR(qspi->rx_chan)) {
864 		dev_err(qspi->dev,
865 			"No Rx DMA available, trying mmap mode\n");
866 		qspi->rx_chan = NULL;
867 		ret = 0;
868 		goto no_dma;
869 	}
870 	qspi->rx_bb_addr = dma_alloc_coherent(qspi->dev,
871 					      QSPI_DMA_BUFFER_SIZE,
872 					      &qspi->rx_bb_dma_addr,
873 					      GFP_KERNEL | GFP_DMA);
874 	if (!qspi->rx_bb_addr) {
875 		dev_err(qspi->dev,
876 			"dma_alloc_coherent failed, using PIO mode\n");
877 		dma_release_channel(qspi->rx_chan);
878 		goto no_dma;
879 	}
880 	master->dma_rx = qspi->rx_chan;
881 	init_completion(&qspi->transfer_complete);
882 	if (res_mmap)
883 		qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
884 
885 no_dma:
886 	if (!qspi->rx_chan && res_mmap) {
887 		qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
888 		if (IS_ERR(qspi->mmap_base)) {
889 			dev_info(&pdev->dev,
890 				 "mmap failed with error %ld using PIO mode\n",
891 				 PTR_ERR(qspi->mmap_base));
892 			qspi->mmap_base = NULL;
893 			master->mem_ops = NULL;
894 		}
895 	}
896 	qspi->mmap_enabled = false;
897 	qspi->current_cs = -1;
898 
899 	ret = devm_spi_register_master(&pdev->dev, master);
900 	if (!ret)
901 		return 0;
902 
903 	ti_qspi_dma_cleanup(qspi);
904 
905 	pm_runtime_disable(&pdev->dev);
906 free_master:
907 	spi_master_put(master);
908 	return ret;
909 }
910 
911 static int ti_qspi_remove(struct platform_device *pdev)
912 {
913 	struct ti_qspi *qspi = platform_get_drvdata(pdev);
914 	int rc;
915 
916 	rc = spi_master_suspend(qspi->master);
917 	if (rc)
918 		return rc;
919 
920 	pm_runtime_put_sync(&pdev->dev);
921 	pm_runtime_disable(&pdev->dev);
922 
923 	ti_qspi_dma_cleanup(qspi);
924 
925 	return 0;
926 }
927 
928 static const struct dev_pm_ops ti_qspi_pm_ops = {
929 	.runtime_resume = ti_qspi_runtime_resume,
930 };
931 
932 static struct platform_driver ti_qspi_driver = {
933 	.probe	= ti_qspi_probe,
934 	.remove = ti_qspi_remove,
935 	.driver = {
936 		.name	= "ti-qspi",
937 		.pm =   &ti_qspi_pm_ops,
938 		.of_match_table = ti_qspi_match,
939 	}
940 };
941 
942 module_platform_driver(ti_qspi_driver);
943 
944 MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
945 MODULE_LICENSE("GPL v2");
946 MODULE_DESCRIPTION("TI QSPI controller driver");
947 MODULE_ALIAS("platform:ti-qspi");
948