xref: /linux/drivers/mtd/nand/raw/davinci_nand.c (revision 307797159ac25fe5a2048bf5c6a5718298edca57)
1 /*
2  * davinci_nand.c - NAND Flash Driver for DaVinci family chips
3  *
4  * Copyright © 2006 Texas Instruments.
5  *
6  * Port to 2.6.23 Copyright © 2008 by:
7  *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
8  *   Troy Kisky <troy.kisky@boundarydevices.com>
9  *   Dirk Behme <Dirk.Behme@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24  */
25 
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/err.h>
30 #include <linux/io.h>
31 #include <linux/mtd/rawnand.h>
32 #include <linux/mtd/partitions.h>
33 #include <linux/slab.h>
34 #include <linux/of_device.h>
35 #include <linux/of.h>
36 
37 #include <linux/platform_data/mtd-davinci.h>
38 #include <linux/platform_data/mtd-davinci-aemif.h>
39 
40 /*
41  * This is a device driver for the NAND flash controller found on the
42  * various DaVinci family chips.  It handles up to four SoC chipselects,
43  * and some flavors of secondary chipselect (e.g. based on A12) as used
44  * with multichip packages.
45  *
46  * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
47  * available on chips like the DM355 and OMAP-L137 and needed with the
48  * more error-prone MLC NAND chips.
49  *
50  * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
51  * outputs in a "wire-AND" configuration, with no per-chip signals.
52  */
53 struct davinci_nand_info {
54 	struct nand_chip	chip;
55 
56 	struct platform_device	*pdev;
57 
58 	bool			is_readmode;
59 
60 	void __iomem		*base;
61 	void __iomem		*vaddr;
62 
63 	void __iomem		*current_cs;
64 
65 	uint32_t		mask_chipsel;
66 	uint32_t		mask_ale;
67 	uint32_t		mask_cle;
68 
69 	uint32_t		core_chipsel;
70 
71 	struct davinci_aemif_timing	*timing;
72 };
73 
74 static DEFINE_SPINLOCK(davinci_nand_lock);
75 static bool ecc4_busy;
76 
77 static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
78 {
79 	return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
80 }
81 
82 static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
83 		int offset)
84 {
85 	return __raw_readl(info->base + offset);
86 }
87 
88 static inline void davinci_nand_writel(struct davinci_nand_info *info,
89 		int offset, unsigned long value)
90 {
91 	__raw_writel(value, info->base + offset);
92 }
93 
94 /*----------------------------------------------------------------------*/
95 
96 /*
97  * Access to hardware control lines:  ALE, CLE, secondary chipselect.
98  */
99 
100 static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
101 				   unsigned int ctrl)
102 {
103 	struct davinci_nand_info	*info = to_davinci_nand(mtd);
104 	void __iomem			*addr = info->current_cs;
105 	struct nand_chip		*nand = mtd_to_nand(mtd);
106 
107 	/* Did the control lines change? */
108 	if (ctrl & NAND_CTRL_CHANGE) {
109 		if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
110 			addr += info->mask_cle;
111 		else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
112 			addr += info->mask_ale;
113 
114 		nand->IO_ADDR_W = addr;
115 	}
116 
117 	if (cmd != NAND_CMD_NONE)
118 		iowrite8(cmd, nand->IO_ADDR_W);
119 }
120 
121 static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
122 {
123 	struct davinci_nand_info	*info = to_davinci_nand(mtd);
124 
125 	info->current_cs = info->vaddr;
126 
127 	/* maybe kick in a second chipselect */
128 	if (chip > 0)
129 		info->current_cs += info->mask_chipsel;
130 
131 	info->chip.IO_ADDR_W = info->current_cs;
132 	info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
133 }
134 
135 /*----------------------------------------------------------------------*/
136 
137 /*
138  * 1-bit hardware ECC ... context maintained for each core chipselect
139  */
140 
141 static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
142 {
143 	struct davinci_nand_info *info = to_davinci_nand(mtd);
144 
145 	return davinci_nand_readl(info, NANDF1ECC_OFFSET
146 			+ 4 * info->core_chipsel);
147 }
148 
149 static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
150 {
151 	struct davinci_nand_info *info;
152 	uint32_t nandcfr;
153 	unsigned long flags;
154 
155 	info = to_davinci_nand(mtd);
156 
157 	/* Reset ECC hardware */
158 	nand_davinci_readecc_1bit(mtd);
159 
160 	spin_lock_irqsave(&davinci_nand_lock, flags);
161 
162 	/* Restart ECC hardware */
163 	nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
164 	nandcfr |= BIT(8 + info->core_chipsel);
165 	davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
166 
167 	spin_unlock_irqrestore(&davinci_nand_lock, flags);
168 }
169 
170 /*
171  * Read hardware ECC value and pack into three bytes
172  */
173 static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
174 				      const u_char *dat, u_char *ecc_code)
175 {
176 	unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
177 	unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
178 
179 	/* invert so that erased block ecc is correct */
180 	ecc24 = ~ecc24;
181 	ecc_code[0] = (u_char)(ecc24);
182 	ecc_code[1] = (u_char)(ecc24 >> 8);
183 	ecc_code[2] = (u_char)(ecc24 >> 16);
184 
185 	return 0;
186 }
187 
188 static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
189 				     u_char *read_ecc, u_char *calc_ecc)
190 {
191 	struct nand_chip *chip = mtd_to_nand(mtd);
192 	uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
193 					  (read_ecc[2] << 16);
194 	uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
195 					  (calc_ecc[2] << 16);
196 	uint32_t diff = eccCalc ^ eccNand;
197 
198 	if (diff) {
199 		if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
200 			/* Correctable error */
201 			if ((diff >> (12 + 3)) < chip->ecc.size) {
202 				dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
203 				return 1;
204 			} else {
205 				return -EBADMSG;
206 			}
207 		} else if (!(diff & (diff - 1))) {
208 			/* Single bit ECC error in the ECC itself,
209 			 * nothing to fix */
210 			return 1;
211 		} else {
212 			/* Uncorrectable error */
213 			return -EBADMSG;
214 		}
215 
216 	}
217 	return 0;
218 }
219 
220 /*----------------------------------------------------------------------*/
221 
222 /*
223  * 4-bit hardware ECC ... context maintained over entire AEMIF
224  *
225  * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
226  * since that forces use of a problematic "infix OOB" layout.
227  * Among other things, it trashes manufacturer bad block markers.
228  * Also, and specific to this hardware, it ECC-protects the "prepad"
229  * in the OOB ... while having ECC protection for parts of OOB would
230  * seem useful, the current MTD stack sometimes wants to update the
231  * OOB without recomputing ECC.
232  */
233 
234 static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
235 {
236 	struct davinci_nand_info *info = to_davinci_nand(mtd);
237 	unsigned long flags;
238 	u32 val;
239 
240 	/* Reset ECC hardware */
241 	davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
242 
243 	spin_lock_irqsave(&davinci_nand_lock, flags);
244 
245 	/* Start 4-bit ECC calculation for read/write */
246 	val = davinci_nand_readl(info, NANDFCR_OFFSET);
247 	val &= ~(0x03 << 4);
248 	val |= (info->core_chipsel << 4) | BIT(12);
249 	davinci_nand_writel(info, NANDFCR_OFFSET, val);
250 
251 	info->is_readmode = (mode == NAND_ECC_READ);
252 
253 	spin_unlock_irqrestore(&davinci_nand_lock, flags);
254 }
255 
256 /* Read raw ECC code after writing to NAND. */
257 static void
258 nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
259 {
260 	const u32 mask = 0x03ff03ff;
261 
262 	code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
263 	code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
264 	code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
265 	code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
266 }
267 
268 /* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
269 static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
270 		const u_char *dat, u_char *ecc_code)
271 {
272 	struct davinci_nand_info *info = to_davinci_nand(mtd);
273 	u32 raw_ecc[4], *p;
274 	unsigned i;
275 
276 	/* After a read, terminate ECC calculation by a dummy read
277 	 * of some 4-bit ECC register.  ECC covers everything that
278 	 * was read; correct() just uses the hardware state, so
279 	 * ecc_code is not needed.
280 	 */
281 	if (info->is_readmode) {
282 		davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
283 		return 0;
284 	}
285 
286 	/* Pack eight raw 10-bit ecc values into ten bytes, making
287 	 * two passes which each convert four values (in upper and
288 	 * lower halves of two 32-bit words) into five bytes.  The
289 	 * ROM boot loader uses this same packing scheme.
290 	 */
291 	nand_davinci_readecc_4bit(info, raw_ecc);
292 	for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
293 		*ecc_code++ =   p[0]        & 0xff;
294 		*ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
295 		*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
296 		*ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
297 		*ecc_code++ =  (p[1] >> 18) & 0xff;
298 	}
299 
300 	return 0;
301 }
302 
303 /* Correct up to 4 bits in data we just read, using state left in the
304  * hardware plus the ecc_code computed when it was first written.
305  */
306 static int nand_davinci_correct_4bit(struct mtd_info *mtd,
307 		u_char *data, u_char *ecc_code, u_char *null)
308 {
309 	int i;
310 	struct davinci_nand_info *info = to_davinci_nand(mtd);
311 	unsigned short ecc10[8];
312 	unsigned short *ecc16;
313 	u32 syndrome[4];
314 	u32 ecc_state;
315 	unsigned num_errors, corrected;
316 	unsigned long timeo;
317 
318 	/* Unpack ten bytes into eight 10 bit values.  We know we're
319 	 * little-endian, and use type punning for less shifting/masking.
320 	 */
321 	if (WARN_ON(0x01 & (uintptr_t)ecc_code))
322 		return -EINVAL;
323 	ecc16 = (unsigned short *)ecc_code;
324 
325 	ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
326 	ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
327 	ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
328 	ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
329 	ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
330 	ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
331 	ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
332 	ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
333 
334 	/* Tell ECC controller about the expected ECC codes. */
335 	for (i = 7; i >= 0; i--)
336 		davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
337 
338 	/* Allow time for syndrome calculation ... then read it.
339 	 * A syndrome of all zeroes 0 means no detected errors.
340 	 */
341 	davinci_nand_readl(info, NANDFSR_OFFSET);
342 	nand_davinci_readecc_4bit(info, syndrome);
343 	if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
344 		return 0;
345 
346 	/*
347 	 * Clear any previous address calculation by doing a dummy read of an
348 	 * error address register.
349 	 */
350 	davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
351 
352 	/* Start address calculation, and wait for it to complete.
353 	 * We _could_ start reading more data while this is working,
354 	 * to speed up the overall page read.
355 	 */
356 	davinci_nand_writel(info, NANDFCR_OFFSET,
357 			davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
358 
359 	/*
360 	 * ECC_STATE field reads 0x3 (Error correction complete) immediately
361 	 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
362 	 * begin trying to poll for the state, you may fall right out of your
363 	 * loop without any of the correction calculations having taken place.
364 	 * The recommendation from the hardware team is to initially delay as
365 	 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
366 	 * correction state.
367 	 */
368 	timeo = jiffies + usecs_to_jiffies(100);
369 	do {
370 		ecc_state = (davinci_nand_readl(info,
371 				NANDFSR_OFFSET) >> 8) & 0x0f;
372 		cpu_relax();
373 	} while ((ecc_state < 4) && time_before(jiffies, timeo));
374 
375 	for (;;) {
376 		u32	fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
377 
378 		switch ((fsr >> 8) & 0x0f) {
379 		case 0:		/* no error, should not happen */
380 			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
381 			return 0;
382 		case 1:		/* five or more errors detected */
383 			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
384 			return -EBADMSG;
385 		case 2:		/* error addresses computed */
386 		case 3:
387 			num_errors = 1 + ((fsr >> 16) & 0x03);
388 			goto correct;
389 		default:	/* still working on it */
390 			cpu_relax();
391 			continue;
392 		}
393 	}
394 
395 correct:
396 	/* correct each error */
397 	for (i = 0, corrected = 0; i < num_errors; i++) {
398 		int error_address, error_value;
399 
400 		if (i > 1) {
401 			error_address = davinci_nand_readl(info,
402 						NAND_ERR_ADD2_OFFSET);
403 			error_value = davinci_nand_readl(info,
404 						NAND_ERR_ERRVAL2_OFFSET);
405 		} else {
406 			error_address = davinci_nand_readl(info,
407 						NAND_ERR_ADD1_OFFSET);
408 			error_value = davinci_nand_readl(info,
409 						NAND_ERR_ERRVAL1_OFFSET);
410 		}
411 
412 		if (i & 1) {
413 			error_address >>= 16;
414 			error_value >>= 16;
415 		}
416 		error_address &= 0x3ff;
417 		error_address = (512 + 7) - error_address;
418 
419 		if (error_address < 512) {
420 			data[error_address] ^= error_value;
421 			corrected++;
422 		}
423 	}
424 
425 	return corrected;
426 }
427 
428 /*----------------------------------------------------------------------*/
429 
430 /*
431  * NOTE:  NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
432  * how these chips are normally wired.  This translates to both 8 and 16
433  * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
434  *
435  * For now we assume that configuration, or any other one which ignores
436  * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
437  * and have that transparently morphed into multiple NAND operations.
438  */
439 static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
440 {
441 	struct nand_chip *chip = mtd_to_nand(mtd);
442 
443 	if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
444 		ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
445 	else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
446 		ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
447 	else
448 		ioread8_rep(chip->IO_ADDR_R, buf, len);
449 }
450 
451 static void nand_davinci_write_buf(struct mtd_info *mtd,
452 		const uint8_t *buf, int len)
453 {
454 	struct nand_chip *chip = mtd_to_nand(mtd);
455 
456 	if ((0x03 & ((uintptr_t)buf)) == 0 && (0x03 & len) == 0)
457 		iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
458 	else if ((0x01 & ((uintptr_t)buf)) == 0 && (0x01 & len) == 0)
459 		iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
460 	else
461 		iowrite8_rep(chip->IO_ADDR_R, buf, len);
462 }
463 
464 /*
465  * Check hardware register for wait status. Returns 1 if device is ready,
466  * 0 if it is still busy.
467  */
468 static int nand_davinci_dev_ready(struct mtd_info *mtd)
469 {
470 	struct davinci_nand_info *info = to_davinci_nand(mtd);
471 
472 	return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
473 }
474 
475 /*----------------------------------------------------------------------*/
476 
477 /* An ECC layout for using 4-bit ECC with small-page flash, storing
478  * ten ECC bytes plus the manufacturer's bad block marker byte, and
479  * and not overlapping the default BBT markers.
480  */
481 static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
482 				      struct mtd_oob_region *oobregion)
483 {
484 	if (section > 2)
485 		return -ERANGE;
486 
487 	if (!section) {
488 		oobregion->offset = 0;
489 		oobregion->length = 5;
490 	} else if (section == 1) {
491 		oobregion->offset = 6;
492 		oobregion->length = 2;
493 	} else {
494 		oobregion->offset = 13;
495 		oobregion->length = 3;
496 	}
497 
498 	return 0;
499 }
500 
501 static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
502 				       struct mtd_oob_region *oobregion)
503 {
504 	if (section > 1)
505 		return -ERANGE;
506 
507 	if (!section) {
508 		oobregion->offset = 8;
509 		oobregion->length = 5;
510 	} else {
511 		oobregion->offset = 16;
512 		oobregion->length = mtd->oobsize - 16;
513 	}
514 
515 	return 0;
516 }
517 
518 static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
519 	.ecc = hwecc4_ooblayout_small_ecc,
520 	.free = hwecc4_ooblayout_small_free,
521 };
522 
523 #if defined(CONFIG_OF)
524 static const struct of_device_id davinci_nand_of_match[] = {
525 	{.compatible = "ti,davinci-nand", },
526 	{.compatible = "ti,keystone-nand", },
527 	{},
528 };
529 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
530 
531 static struct davinci_nand_pdata
532 	*nand_davinci_get_pdata(struct platform_device *pdev)
533 {
534 	if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
535 		struct davinci_nand_pdata *pdata;
536 		const char *mode;
537 		u32 prop;
538 
539 		pdata =  devm_kzalloc(&pdev->dev,
540 				sizeof(struct davinci_nand_pdata),
541 				GFP_KERNEL);
542 		pdev->dev.platform_data = pdata;
543 		if (!pdata)
544 			return ERR_PTR(-ENOMEM);
545 		if (!of_property_read_u32(pdev->dev.of_node,
546 			"ti,davinci-chipselect", &prop))
547 			pdata->core_chipsel = prop;
548 		else
549 			return ERR_PTR(-EINVAL);
550 
551 		if (!of_property_read_u32(pdev->dev.of_node,
552 			"ti,davinci-mask-ale", &prop))
553 			pdata->mask_ale = prop;
554 		if (!of_property_read_u32(pdev->dev.of_node,
555 			"ti,davinci-mask-cle", &prop))
556 			pdata->mask_cle = prop;
557 		if (!of_property_read_u32(pdev->dev.of_node,
558 			"ti,davinci-mask-chipsel", &prop))
559 			pdata->mask_chipsel = prop;
560 		if (!of_property_read_string(pdev->dev.of_node,
561 			"ti,davinci-ecc-mode", &mode)) {
562 			if (!strncmp("none", mode, 4))
563 				pdata->ecc_mode = NAND_ECC_NONE;
564 			if (!strncmp("soft", mode, 4))
565 				pdata->ecc_mode = NAND_ECC_SOFT;
566 			if (!strncmp("hw", mode, 2))
567 				pdata->ecc_mode = NAND_ECC_HW;
568 		}
569 		if (!of_property_read_u32(pdev->dev.of_node,
570 			"ti,davinci-ecc-bits", &prop))
571 			pdata->ecc_bits = prop;
572 
573 		if (!of_property_read_u32(pdev->dev.of_node,
574 			"ti,davinci-nand-buswidth", &prop) && prop == 16)
575 			pdata->options |= NAND_BUSWIDTH_16;
576 
577 		if (of_property_read_bool(pdev->dev.of_node,
578 			"ti,davinci-nand-use-bbt"))
579 			pdata->bbt_options = NAND_BBT_USE_FLASH;
580 
581 		/*
582 		 * Since kernel v4.8, this driver has been fixed to enable
583 		 * use of 4-bit hardware ECC with subpages and verified on
584 		 * TI's keystone EVMs (K2L, K2HK and K2E).
585 		 * However, in the interest of not breaking systems using
586 		 * existing UBI partitions, sub-page writes are not being
587 		 * (re)enabled. If you want to use subpage writes on Keystone
588 		 * platforms (i.e. do not have any existing UBI partitions),
589 		 * then use "ti,davinci-nand" as the compatible in your
590 		 * device-tree file.
591 		 */
592 		if (of_device_is_compatible(pdev->dev.of_node,
593 					    "ti,keystone-nand")) {
594 			pdata->options |= NAND_NO_SUBPAGE_WRITE;
595 		}
596 	}
597 
598 	return dev_get_platdata(&pdev->dev);
599 }
600 #else
601 static struct davinci_nand_pdata
602 	*nand_davinci_get_pdata(struct platform_device *pdev)
603 {
604 	return dev_get_platdata(&pdev->dev);
605 }
606 #endif
607 
608 static int davinci_nand_attach_chip(struct nand_chip *chip)
609 {
610 	struct mtd_info *mtd = nand_to_mtd(chip);
611 	struct davinci_nand_info *info = to_davinci_nand(mtd);
612 	struct davinci_nand_pdata *pdata = nand_davinci_get_pdata(info->pdev);
613 	int ret = 0;
614 
615 	if (IS_ERR(pdata))
616 		return PTR_ERR(pdata);
617 
618 	switch (info->chip.ecc.mode) {
619 	case NAND_ECC_NONE:
620 		pdata->ecc_bits = 0;
621 		break;
622 	case NAND_ECC_SOFT:
623 		pdata->ecc_bits = 0;
624 		/*
625 		 * This driver expects Hamming based ECC when ecc_mode is set
626 		 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
627 		 * avoid adding an extra ->ecc_algo field to
628 		 * davinci_nand_pdata.
629 		 */
630 		info->chip.ecc.algo = NAND_ECC_HAMMING;
631 		break;
632 	case NAND_ECC_HW:
633 		if (pdata->ecc_bits == 4) {
634 			/*
635 			 * No sanity checks:  CPUs must support this,
636 			 * and the chips may not use NAND_BUSWIDTH_16.
637 			 */
638 
639 			/* No sharing 4-bit hardware between chipselects yet */
640 			spin_lock_irq(&davinci_nand_lock);
641 			if (ecc4_busy)
642 				ret = -EBUSY;
643 			else
644 				ecc4_busy = true;
645 			spin_unlock_irq(&davinci_nand_lock);
646 
647 			if (ret == -EBUSY)
648 				return ret;
649 
650 			info->chip.ecc.calculate = nand_davinci_calculate_4bit;
651 			info->chip.ecc.correct = nand_davinci_correct_4bit;
652 			info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
653 			info->chip.ecc.bytes = 10;
654 			info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
655 			info->chip.ecc.algo = NAND_ECC_BCH;
656 		} else {
657 			/* 1bit ecc hamming */
658 			info->chip.ecc.calculate = nand_davinci_calculate_1bit;
659 			info->chip.ecc.correct = nand_davinci_correct_1bit;
660 			info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
661 			info->chip.ecc.bytes = 3;
662 			info->chip.ecc.algo = NAND_ECC_HAMMING;
663 		}
664 		info->chip.ecc.size = 512;
665 		info->chip.ecc.strength = pdata->ecc_bits;
666 		break;
667 	default:
668 		return -EINVAL;
669 	}
670 
671 	/*
672 	 * Update ECC layout if needed ... for 1-bit HW ECC, the default
673 	 * is OK, but it allocates 6 bytes when only 3 are needed (for
674 	 * each 512 bytes).  For the 4-bit HW ECC, that default is not
675 	 * usable:  10 bytes are needed, not 6.
676 	 */
677 	if (pdata->ecc_bits == 4) {
678 		int chunks = mtd->writesize / 512;
679 
680 		if (!chunks || mtd->oobsize < 16) {
681 			dev_dbg(&info->pdev->dev, "too small\n");
682 			return -EINVAL;
683 		}
684 
685 		/* For small page chips, preserve the manufacturer's
686 		 * badblock marking data ... and make sure a flash BBT
687 		 * table marker fits in the free bytes.
688 		 */
689 		if (chunks == 1) {
690 			mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
691 		} else if (chunks == 4 || chunks == 8) {
692 			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
693 			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
694 		} else {
695 			return -EIO;
696 		}
697 	}
698 
699 	return ret;
700 }
701 
702 static const struct nand_controller_ops davinci_nand_controller_ops = {
703 	.attach_chip = davinci_nand_attach_chip,
704 };
705 
706 static int nand_davinci_probe(struct platform_device *pdev)
707 {
708 	struct davinci_nand_pdata	*pdata;
709 	struct davinci_nand_info	*info;
710 	struct resource			*res1;
711 	struct resource			*res2;
712 	void __iomem			*vaddr;
713 	void __iomem			*base;
714 	int				ret;
715 	uint32_t			val;
716 	struct mtd_info			*mtd;
717 
718 	pdata = nand_davinci_get_pdata(pdev);
719 	if (IS_ERR(pdata))
720 		return PTR_ERR(pdata);
721 
722 	/* insist on board-specific configuration */
723 	if (!pdata)
724 		return -ENODEV;
725 
726 	/* which external chipselect will we be managing? */
727 	if (pdata->core_chipsel < 0 || pdata->core_chipsel > 3)
728 		return -ENODEV;
729 
730 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
731 	if (!info)
732 		return -ENOMEM;
733 
734 	platform_set_drvdata(pdev, info);
735 
736 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
737 	res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
738 	if (!res1 || !res2) {
739 		dev_err(&pdev->dev, "resource missing\n");
740 		return -EINVAL;
741 	}
742 
743 	vaddr = devm_ioremap_resource(&pdev->dev, res1);
744 	if (IS_ERR(vaddr))
745 		return PTR_ERR(vaddr);
746 
747 	/*
748 	 * This registers range is used to setup NAND settings. In case with
749 	 * TI AEMIF driver, the same memory address range is requested already
750 	 * by AEMIF, so we cannot request it twice, just ioremap.
751 	 * The AEMIF and NAND drivers not use the same registers in this range.
752 	 */
753 	base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
754 	if (!base) {
755 		dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
756 		return -EADDRNOTAVAIL;
757 	}
758 
759 	info->pdev		= pdev;
760 	info->base		= base;
761 	info->vaddr		= vaddr;
762 
763 	mtd			= nand_to_mtd(&info->chip);
764 	mtd->dev.parent		= &pdev->dev;
765 	nand_set_flash_node(&info->chip, pdev->dev.of_node);
766 
767 	info->chip.IO_ADDR_R	= vaddr;
768 	info->chip.IO_ADDR_W	= vaddr;
769 	info->chip.chip_delay	= 0;
770 	info->chip.select_chip	= nand_davinci_select_chip;
771 
772 	/* options such as NAND_BBT_USE_FLASH */
773 	info->chip.bbt_options	= pdata->bbt_options;
774 	/* options such as 16-bit widths */
775 	info->chip.options	= pdata->options;
776 	info->chip.bbt_td	= pdata->bbt_td;
777 	info->chip.bbt_md	= pdata->bbt_md;
778 	info->timing		= pdata->timing;
779 
780 	info->current_cs	= info->vaddr;
781 	info->core_chipsel	= pdata->core_chipsel;
782 	info->mask_chipsel	= pdata->mask_chipsel;
783 
784 	/* use nandboot-capable ALE/CLE masks by default */
785 	info->mask_ale		= pdata->mask_ale ? : MASK_ALE;
786 	info->mask_cle		= pdata->mask_cle ? : MASK_CLE;
787 
788 	/* Set address of hardware control function */
789 	info->chip.cmd_ctrl	= nand_davinci_hwcontrol;
790 	info->chip.dev_ready	= nand_davinci_dev_ready;
791 
792 	/* Speed up buffer I/O */
793 	info->chip.read_buf     = nand_davinci_read_buf;
794 	info->chip.write_buf    = nand_davinci_write_buf;
795 
796 	/* Use board-specific ECC config */
797 	info->chip.ecc.mode	= pdata->ecc_mode;
798 
799 	spin_lock_irq(&davinci_nand_lock);
800 
801 	/* put CSxNAND into NAND mode */
802 	val = davinci_nand_readl(info, NANDFCR_OFFSET);
803 	val |= BIT(info->core_chipsel);
804 	davinci_nand_writel(info, NANDFCR_OFFSET, val);
805 
806 	spin_unlock_irq(&davinci_nand_lock);
807 
808 	/* Scan to find existence of the device(s) */
809 	info->chip.dummy_controller.ops = &davinci_nand_controller_ops;
810 	ret = nand_scan(mtd, pdata->mask_chipsel ? 2 : 1);
811 	if (ret < 0) {
812 		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
813 		return ret;
814 	}
815 
816 	if (pdata->parts)
817 		ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
818 	else
819 		ret = mtd_device_register(mtd, NULL, 0);
820 	if (ret < 0)
821 		goto err_cleanup_nand;
822 
823 	val = davinci_nand_readl(info, NRCSR_OFFSET);
824 	dev_info(&pdev->dev, "controller rev. %d.%d\n",
825 	       (val >> 8) & 0xff, val & 0xff);
826 
827 	return 0;
828 
829 err_cleanup_nand:
830 	nand_cleanup(&info->chip);
831 
832 	return ret;
833 }
834 
835 static int nand_davinci_remove(struct platform_device *pdev)
836 {
837 	struct davinci_nand_info *info = platform_get_drvdata(pdev);
838 
839 	spin_lock_irq(&davinci_nand_lock);
840 	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
841 		ecc4_busy = false;
842 	spin_unlock_irq(&davinci_nand_lock);
843 
844 	nand_release(nand_to_mtd(&info->chip));
845 
846 	return 0;
847 }
848 
849 static struct platform_driver nand_davinci_driver = {
850 	.probe		= nand_davinci_probe,
851 	.remove		= nand_davinci_remove,
852 	.driver		= {
853 		.name	= "davinci_nand",
854 		.of_match_table = of_match_ptr(davinci_nand_of_match),
855 	},
856 };
857 MODULE_ALIAS("platform:davinci_nand");
858 
859 module_platform_driver(nand_davinci_driver);
860 
861 MODULE_LICENSE("GPL");
862 MODULE_AUTHOR("Texas Instruments");
863 MODULE_DESCRIPTION("Davinci NAND flash driver");
864 
865