1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/raw/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
21 #include <asm/arch/cpu.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/rawnand.h>
24 #include <linux/types.h>
26 #include "pxa3xx_nand.h"
28 DECLARE_GLOBAL_DATA_PTR;
30 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
31 #define CHIP_DELAY_TIMEOUT 200
32 #define NAND_STOP_DELAY 40
35 * Define a buffer size for the initial command that detects the flash device:
36 * STATUS, READID and PARAM.
37 * ONFI param page is 256 bytes, and there are three redundant copies
38 * to be read. JEDEC param page is 512 bytes, and there are also three
39 * redundant copies to be read.
40 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
42 #define INIT_BUFFER_SIZE 2048
44 /* registers and bit definitions */
45 #define NDCR (0x00) /* Control register */
46 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
47 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
48 #define NDSR (0x14) /* Status Register */
49 #define NDPCR (0x18) /* Page Count Register */
50 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
51 #define NDBDR1 (0x20) /* Bad Block Register 1 */
52 #define NDECCCTRL (0x28) /* ECC control */
53 #define NDDB (0x40) /* Data Buffer */
54 #define NDCB0 (0x48) /* Command Buffer0 */
55 #define NDCB1 (0x4C) /* Command Buffer1 */
56 #define NDCB2 (0x50) /* Command Buffer2 */
58 #define NDCR_SPARE_EN (0x1 << 31)
59 #define NDCR_ECC_EN (0x1 << 30)
60 #define NDCR_DMA_EN (0x1 << 29)
61 #define NDCR_ND_RUN (0x1 << 28)
62 #define NDCR_DWIDTH_C (0x1 << 27)
63 #define NDCR_DWIDTH_M (0x1 << 26)
64 #define NDCR_PAGE_SZ (0x1 << 24)
65 #define NDCR_NCSX (0x1 << 23)
66 #define NDCR_ND_MODE (0x3 << 21)
67 #define NDCR_NAND_MODE (0x0)
68 #define NDCR_CLR_PG_CNT (0x1 << 20)
69 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
70 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
71 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
73 #define NDCR_RA_START (0x1 << 15)
74 #define NDCR_PG_PER_BLK (0x1 << 14)
75 #define NDCR_ND_ARB_EN (0x1 << 12)
76 #define NDCR_INT_MASK (0xFFF)
78 #define NDSR_MASK (0xfff)
79 #define NDSR_ERR_CNT_OFF (16)
80 #define NDSR_ERR_CNT_MASK (0x1f)
81 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
82 #define NDSR_RDY (0x1 << 12)
83 #define NDSR_FLASH_RDY (0x1 << 11)
84 #define NDSR_CS0_PAGED (0x1 << 10)
85 #define NDSR_CS1_PAGED (0x1 << 9)
86 #define NDSR_CS0_CMDD (0x1 << 8)
87 #define NDSR_CS1_CMDD (0x1 << 7)
88 #define NDSR_CS0_BBD (0x1 << 6)
89 #define NDSR_CS1_BBD (0x1 << 5)
90 #define NDSR_UNCORERR (0x1 << 4)
91 #define NDSR_CORERR (0x1 << 3)
92 #define NDSR_WRDREQ (0x1 << 2)
93 #define NDSR_RDDREQ (0x1 << 1)
94 #define NDSR_WRCMDREQ (0x1)
96 #define NDCB0_LEN_OVRD (0x1 << 28)
97 #define NDCB0_ST_ROW_EN (0x1 << 26)
98 #define NDCB0_AUTO_RS (0x1 << 25)
99 #define NDCB0_CSEL (0x1 << 24)
100 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
101 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
102 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
103 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
104 #define NDCB0_NC (0x1 << 20)
105 #define NDCB0_DBC (0x1 << 19)
106 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
107 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
108 #define NDCB0_CMD2_MASK (0xff << 8)
109 #define NDCB0_CMD1_MASK (0xff)
110 #define NDCB0_ADDR_CYC_SHIFT (16)
112 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
113 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
114 #define EXT_CMD_TYPE_READ 4 /* Read */
115 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
116 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
117 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
118 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
121 * This should be large enough to read 'ONFI' and 'JEDEC'.
122 * Let's use 7 bytes, which is the maximum ID count supported
123 * by the controller (see NDCR_RD_ID_CNT_MASK).
125 #define READ_ID_BYTES 7
127 /* macros for registers read/write */
128 #define nand_writel(info, off, val) \
129 writel((val), (info)->mmio_base + (off))
131 #define nand_readl(info, off) \
132 readl((info)->mmio_base + (off))
134 /* error code and state */
157 enum pxa3xx_nand_variant {
158 PXA3XX_NAND_VARIANT_PXA,
159 PXA3XX_NAND_VARIANT_ARMADA370,
162 struct pxa3xx_nand_host {
163 struct nand_chip chip;
166 /* page size of attached chip */
170 /* calculated from pxa3xx_nand_flash data */
171 unsigned int col_addr_cycles;
172 unsigned int row_addr_cycles;
175 struct pxa3xx_nand_info {
176 struct nand_hw_control controller;
177 struct pxa3xx_nand_platform_data *pdata;
180 void __iomem *mmio_base;
181 unsigned long mmio_phys;
182 int cmd_complete, dev_ready;
184 unsigned int buf_start;
185 unsigned int buf_count;
186 unsigned int buf_size;
187 unsigned int data_buff_pos;
188 unsigned int oob_buff_pos;
190 unsigned char *data_buff;
191 unsigned char *oob_buff;
193 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
197 * This driver supports NFCv1 (as found in PXA SoC)
198 * and NFCv2 (as found in Armada 370/XP SoC).
200 enum pxa3xx_nand_variant variant;
203 int use_ecc; /* use HW ECC ? */
204 int force_raw; /* prevent use_ecc to be set */
205 int ecc_bch; /* using BCH ECC? */
206 int use_spare; /* use spare ? */
209 /* Amount of real data per full chunk */
210 unsigned int chunk_size;
212 /* Amount of spare data per full chunk */
213 unsigned int spare_size;
215 /* Number of full chunks (i.e chunk_size + spare_size) */
216 unsigned int nfullchunks;
219 * Total number of chunks. If equal to nfullchunks, then there
220 * are only full chunks. Otherwise, there is one last chunk of
221 * size (last_chunk_size + last_spare_size)
223 unsigned int ntotalchunks;
225 /* Amount of real data in the last chunk */
226 unsigned int last_chunk_size;
228 /* Amount of spare data in the last chunk */
229 unsigned int last_spare_size;
231 unsigned int ecc_size;
232 unsigned int ecc_err_cnt;
233 unsigned int max_bitflips;
237 * Variables only valid during command
238 * execution. step_chunk_size and step_spare_size is the
239 * amount of real data and spare data in the current
240 * chunk. cur_chunk is the current chunk being
243 unsigned int step_chunk_size;
244 unsigned int step_spare_size;
245 unsigned int cur_chunk;
247 /* cached register value */
252 /* generated NDCBx register values */
259 static struct pxa3xx_nand_timing timing[] = {
261 * tCH Enable signal hold time
262 * tCS Enable signal setup time
263 * tWH ND_nWE high duration
264 * tWP ND_nWE pulse time
265 * tRH ND_nRE high duration
266 * tRP ND_nRE pulse width
267 * tR ND_nWE high to ND_nRE low for read
268 * tWHR ND_nWE high to ND_nRE low for status read
269 * tAR ND_ALE low to ND_nRE low delay
271 /*ch cs wh wp rh rp r whr ar */
272 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
273 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
274 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
275 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
276 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
279 static struct pxa3xx_nand_flash builtin_flash_types[] = {
282 * flash_width Width of Flash memory (DWIDTH_M)
283 * dfc_width Width of flash controller(DWIDTH_C)
285 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
287 { 0x46ec, 16, 16, &timing[1] },
288 { 0xdaec, 8, 8, &timing[1] },
289 { 0xd7ec, 8, 8, &timing[1] },
290 { 0xa12c, 8, 8, &timing[2] },
291 { 0xb12c, 16, 16, &timing[2] },
292 { 0xdc2c, 8, 8, &timing[2] },
293 { 0xcc2c, 16, 16, &timing[2] },
294 { 0xba20, 16, 16, &timing[3] },
295 { 0xda98, 8, 8, &timing[4] },
298 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
299 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
302 static struct nand_bbt_descr bbt_main_descr = {
303 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304 | NAND_BBT_2BIT | NAND_BBT_VERSION,
308 .maxblocks = 8, /* Last 8 blocks in each chip */
309 .pattern = bbt_pattern
312 static struct nand_bbt_descr bbt_mirror_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_mirror_pattern
323 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
326 32, 33, 34, 35, 36, 37, 38, 39,
327 40, 41, 42, 43, 44, 45, 46, 47,
328 48, 49, 50, 51, 52, 53, 54, 55,
329 56, 57, 58, 59, 60, 61, 62, 63},
330 .oobfree = { {2, 30} }
333 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
336 32, 33, 34, 35, 36, 37, 38, 39,
337 40, 41, 42, 43, 44, 45, 46, 47,
338 48, 49, 50, 51, 52, 53, 54, 55,
339 56, 57, 58, 59, 60, 61, 62, 63,
340 64, 65, 66, 67, 68, 69, 70, 71,
341 72, 73, 74, 75, 76, 77, 78, 79,
342 80, 81, 82, 83, 84, 85, 86, 87,
343 88, 89, 90, 91, 92, 93, 94, 95},
344 .oobfree = { {1, 4}, {6, 26} }
347 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
350 32, 33, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 46, 47,
352 48, 49, 50, 51, 52, 53, 54, 55,
353 56, 57, 58, 59, 60, 61, 62, 63,
354 96, 97, 98, 99, 100, 101, 102, 103,
355 104, 105, 106, 107, 108, 109, 110, 111,
356 112, 113, 114, 115, 116, 117, 118, 119,
357 120, 121, 122, 123, 124, 125, 126, 127},
358 /* Bootrom looks in bytes 0 & 5 for bad blocks */
359 .oobfree = { {6, 26}, { 64, 32} }
362 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
365 32, 33, 34, 35, 36, 37, 38, 39,
366 40, 41, 42, 43, 44, 45, 46, 47,
367 48, 49, 50, 51, 52, 53, 54, 55,
368 56, 57, 58, 59, 60, 61, 62, 63,
370 96, 97, 98, 99, 100, 101, 102, 103,
371 104, 105, 106, 107, 108, 109, 110, 111,
372 112, 113, 114, 115, 116, 117, 118, 119,
373 120, 121, 122, 123, 124, 125, 126, 127,
375 160, 161, 162, 163, 164, 165, 166, 167,
376 168, 169, 170, 171, 172, 173, 174, 175,
377 176, 177, 178, 179, 180, 181, 182, 183,
378 184, 185, 186, 187, 188, 189, 190, 191,
380 224, 225, 226, 227, 228, 229, 230, 231,
381 232, 233, 234, 235, 236, 237, 238, 239,
382 240, 241, 242, 243, 244, 245, 246, 247,
383 248, 249, 250, 251, 252, 253, 254, 255},
385 /* Bootrom looks in bytes 0 & 5 for bad blocks */
386 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
389 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
392 32, 33, 34, 35, 36, 37, 38, 39,
393 40, 41, 42, 43, 44, 45, 46, 47,
394 48, 49, 50, 51, 52, 53, 54, 55,
395 56, 57, 58, 59, 60, 61, 62, 63},
399 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
402 /* HW ECC handles all ECC data and all spare area is free for OOB */
403 .oobfree = {{0, 160} }
406 #define NDTR0_tCH(c) (min((c), 7) << 19)
407 #define NDTR0_tCS(c) (min((c), 7) << 16)
408 #define NDTR0_tWH(c) (min((c), 7) << 11)
409 #define NDTR0_tWP(c) (min((c), 7) << 8)
410 #define NDTR0_tRH(c) (min((c), 7) << 3)
411 #define NDTR0_tRP(c) (min((c), 7) << 0)
413 #define NDTR1_tR(c) (min((c), 65535) << 16)
414 #define NDTR1_tWHR(c) (min((c), 15) << 4)
415 #define NDTR1_tAR(c) (min((c), 15) << 0)
417 /* convert nano-seconds to nand flash controller clock cycles */
418 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
420 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
422 /* We only support the Armada 370/XP/38x for now */
423 return PXA3XX_NAND_VARIANT_ARMADA370;
426 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
427 const struct pxa3xx_nand_timing *t)
429 struct pxa3xx_nand_info *info = host->info_data;
430 unsigned long nand_clk = mvebu_get_nand_clock();
431 uint32_t ndtr0, ndtr1;
433 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
434 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
435 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
436 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
437 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
438 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
440 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
441 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
442 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
444 info->ndtr0cs0 = ndtr0;
445 info->ndtr1cs0 = ndtr1;
446 nand_writel(info, NDTR0CS0, ndtr0);
447 nand_writel(info, NDTR1CS0, ndtr1);
450 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
451 const struct nand_sdr_timings *t)
453 struct pxa3xx_nand_info *info = host->info_data;
454 struct nand_chip *chip = &host->chip;
455 unsigned long nand_clk = mvebu_get_nand_clock();
456 uint32_t ndtr0, ndtr1;
458 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
459 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
460 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
461 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
462 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
463 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
464 u32 tR = chip->chip_delay * 1000;
465 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
466 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
468 /* fallback to a default value if tR = 0 */
472 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
473 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
474 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
475 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
476 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
477 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
479 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
480 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
481 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
483 info->ndtr0cs0 = ndtr0;
484 info->ndtr1cs0 = ndtr1;
485 nand_writel(info, NDTR0CS0, ndtr0);
486 nand_writel(info, NDTR1CS0, ndtr1);
489 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
491 const struct nand_sdr_timings *timings;
492 struct nand_chip *chip = &host->chip;
493 struct pxa3xx_nand_info *info = host->info_data;
494 const struct pxa3xx_nand_flash *f = NULL;
495 struct mtd_info *mtd = nand_to_mtd(&host->chip);
496 int mode, id, ntypes, i;
498 mode = onfi_get_async_timing_mode(chip);
499 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
500 ntypes = ARRAY_SIZE(builtin_flash_types);
502 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
504 id = chip->read_byte(mtd);
505 id |= chip->read_byte(mtd) << 0x8;
507 for (i = 0; i < ntypes; i++) {
508 f = &builtin_flash_types[i];
510 if (f->chip_id == id)
515 dev_err(&info->pdev->dev, "Error: timings not found\n");
519 pxa3xx_nand_set_timing(host, f->timing);
521 if (f->flash_width == 16) {
522 info->reg_ndcr |= NDCR_DWIDTH_M;
523 chip->options |= NAND_BUSWIDTH_16;
526 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
528 mode = fls(mode) - 1;
532 timings = onfi_async_timing_mode_to_sdr_timings(mode);
534 return PTR_ERR(timings);
536 pxa3xx_nand_set_sdr_timing(host, timings);
543 * NOTE: it is a must to set ND_RUN first, then write
544 * command buffer, otherwise, it does not work.
545 * We enable all the interrupt at the same time, and
546 * let pxa3xx_nand_irq to handle all logic.
548 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
552 ndcr = info->reg_ndcr;
557 nand_writel(info, NDECCCTRL, 0x1);
559 ndcr &= ~NDCR_ECC_EN;
561 nand_writel(info, NDECCCTRL, 0x0);
564 ndcr &= ~NDCR_DMA_EN;
567 ndcr |= NDCR_SPARE_EN;
569 ndcr &= ~NDCR_SPARE_EN;
573 /* clear status bits and run */
574 nand_writel(info, NDSR, NDSR_MASK);
575 nand_writel(info, NDCR, 0);
576 nand_writel(info, NDCR, ndcr);
579 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
583 ndcr = nand_readl(info, NDCR);
584 nand_writel(info, NDCR, ndcr | int_mask);
587 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
589 if (info->ecc_bch && !info->force_raw) {
593 * According to the datasheet, when reading from NDDB
594 * with BCH enabled, after each 32 bytes reads, we
595 * have to make sure that the NDSR.RDDREQ bit is set.
597 * Drain the FIFO 8 32 bits reads at a time, and skip
598 * the polling on the last read.
601 readsl(info->mmio_base + NDDB, data, 8);
604 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
605 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
606 dev_err(&info->pdev->dev,
607 "Timeout on RDDREQ while draining the FIFO\n");
617 readsl(info->mmio_base + NDDB, data, len);
620 static void handle_data_pio(struct pxa3xx_nand_info *info)
622 int data_len = info->step_chunk_size;
625 * In raw mode, include the spare area and the ECC bytes that are not
626 * consumed by the controller in the data section. Do not reorganize
627 * here, do it in the ->read_page_raw() handler instead.
630 data_len += info->step_spare_size + info->ecc_size;
632 switch (info->state) {
633 case STATE_PIO_WRITING:
634 if (info->step_chunk_size)
635 writesl(info->mmio_base + NDDB,
636 info->data_buff + info->data_buff_pos,
637 DIV_ROUND_UP(data_len, 4));
639 if (info->step_spare_size)
640 writesl(info->mmio_base + NDDB,
641 info->oob_buff + info->oob_buff_pos,
642 DIV_ROUND_UP(info->step_spare_size, 4));
644 case STATE_PIO_READING:
647 info->data_buff + info->data_buff_pos,
648 DIV_ROUND_UP(data_len, 4));
653 if (info->step_spare_size)
655 info->oob_buff + info->oob_buff_pos,
656 DIV_ROUND_UP(info->step_spare_size, 4));
659 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
664 /* Update buffer pointers for multi-page read/write */
665 info->data_buff_pos += data_len;
666 info->oob_buff_pos += info->step_spare_size;
669 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
671 handle_data_pio(info);
673 info->state = STATE_CMD_DONE;
674 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
677 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
679 unsigned int status, is_completed = 0, is_ready = 0;
680 unsigned int ready, cmd_done;
681 irqreturn_t ret = IRQ_HANDLED;
684 ready = NDSR_FLASH_RDY;
685 cmd_done = NDSR_CS0_CMDD;
688 cmd_done = NDSR_CS1_CMDD;
691 /* TODO - find out why we need the delay during write operation. */
694 status = nand_readl(info, NDSR);
696 if (status & NDSR_UNCORERR)
697 info->retcode = ERR_UNCORERR;
698 if (status & NDSR_CORERR) {
699 info->retcode = ERR_CORERR;
700 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
702 info->ecc_err_cnt = NDSR_ERR_CNT(status);
704 info->ecc_err_cnt = 1;
707 * Each chunk composing a page is corrected independently,
708 * and we need to store maximum number of corrected bitflips
709 * to return it to the MTD layer in ecc.read_page().
711 info->max_bitflips = max_t(unsigned int,
715 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
716 info->state = (status & NDSR_RDDREQ) ?
717 STATE_PIO_READING : STATE_PIO_WRITING;
718 /* Call the IRQ thread in U-Boot directly */
719 pxa3xx_nand_irq_thread(info);
722 if (status & cmd_done) {
723 info->state = STATE_CMD_DONE;
726 if (status & ready) {
727 info->state = STATE_READY;
732 * Clear all status bit before issuing the next command, which
733 * can and will alter the status bits and will deserve a new
734 * interrupt on its own. This lets the controller exit the IRQ
736 nand_writel(info, NDSR, status);
738 if (status & NDSR_WRCMDREQ) {
739 status &= ~NDSR_WRCMDREQ;
740 info->state = STATE_CMD_HANDLE;
743 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
744 * must be loaded by writing directly either 12 or 16
745 * bytes directly to NDCB0, four bytes at a time.
747 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
748 * but each NDCBx register can be read.
750 nand_writel(info, NDCB0, info->ndcb0);
751 nand_writel(info, NDCB0, info->ndcb1);
752 nand_writel(info, NDCB0, info->ndcb2);
754 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
755 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
756 nand_writel(info, NDCB0, info->ndcb3);
760 info->cmd_complete = 1;
767 static inline int is_buf_blank(uint8_t *buf, size_t len)
769 for (; len > 0; len--)
775 static void set_command_address(struct pxa3xx_nand_info *info,
776 unsigned int page_size, uint16_t column, int page_addr)
778 /* small page addr setting */
779 if (page_size < info->chunk_size) {
780 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
785 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
788 if (page_addr & 0xFF0000)
789 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
795 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
797 struct pxa3xx_nand_host *host = info->host[info->cs];
798 struct mtd_info *mtd = nand_to_mtd(&host->chip);
800 /* reset data and oob column point to handle data */
803 info->data_buff_pos = 0;
804 info->oob_buff_pos = 0;
805 info->step_chunk_size = 0;
806 info->step_spare_size = 0;
810 info->retcode = ERR_NONE;
811 info->ecc_err_cnt = 0;
817 case NAND_CMD_READOOB:
818 case NAND_CMD_PAGEPROG:
819 if (!info->force_raw)
832 * If we are about to issue a read command, or about to set
833 * the write address, then clean the data buffer.
835 if (command == NAND_CMD_READ0 ||
836 command == NAND_CMD_READOOB ||
837 command == NAND_CMD_SEQIN) {
838 info->buf_count = mtd->writesize + mtd->oobsize;
839 memset(info->data_buff, 0xFF, info->buf_count);
843 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
844 int ext_cmd_type, uint16_t column, int page_addr)
846 int addr_cycle, exec_cmd;
847 struct pxa3xx_nand_host *host;
848 struct mtd_info *mtd;
850 host = info->host[info->cs];
851 mtd = nand_to_mtd(&host->chip);
856 info->ndcb0 = NDCB0_CSEL;
860 if (command == NAND_CMD_SEQIN)
863 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
864 + host->col_addr_cycles);
867 case NAND_CMD_READOOB:
869 info->buf_start = column;
870 info->ndcb0 |= NDCB0_CMD_TYPE(0)
874 if (command == NAND_CMD_READOOB)
875 info->buf_start += mtd->writesize;
877 if (info->cur_chunk < info->nfullchunks) {
878 info->step_chunk_size = info->chunk_size;
879 info->step_spare_size = info->spare_size;
881 info->step_chunk_size = info->last_chunk_size;
882 info->step_spare_size = info->last_spare_size;
886 * Multiple page read needs an 'extended command type' field,
887 * which is either naked-read or last-read according to the
890 if (info->force_raw) {
891 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
893 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
894 info->ndcb3 = info->step_chunk_size +
895 info->step_spare_size + info->ecc_size;
896 } else if (mtd->writesize == info->chunk_size) {
897 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
898 } else if (mtd->writesize > info->chunk_size) {
899 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
901 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
902 info->ndcb3 = info->step_chunk_size +
903 info->step_spare_size;
906 set_command_address(info, mtd->writesize, column, page_addr);
911 info->buf_start = column;
912 set_command_address(info, mtd->writesize, 0, page_addr);
915 * Multiple page programming needs to execute the initial
916 * SEQIN command that sets the page address.
918 if (mtd->writesize > info->chunk_size) {
919 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
920 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
927 case NAND_CMD_PAGEPROG:
928 if (is_buf_blank(info->data_buff,
929 (mtd->writesize + mtd->oobsize))) {
934 if (info->cur_chunk < info->nfullchunks) {
935 info->step_chunk_size = info->chunk_size;
936 info->step_spare_size = info->spare_size;
938 info->step_chunk_size = info->last_chunk_size;
939 info->step_spare_size = info->last_spare_size;
942 /* Second command setting for large pages */
943 if (mtd->writesize > info->chunk_size) {
945 * Multiple page write uses the 'extended command'
946 * field. This can be used to issue a command dispatch
947 * or a naked-write depending on the current stage.
949 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
951 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
952 info->ndcb3 = info->step_chunk_size +
953 info->step_spare_size;
956 * This is the command dispatch that completes a chunked
957 * page program operation.
959 if (info->cur_chunk == info->ntotalchunks) {
960 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
961 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
968 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
972 | (NAND_CMD_PAGEPROG << 8)
979 info->buf_count = INIT_BUFFER_SIZE;
980 info->ndcb0 |= NDCB0_CMD_TYPE(0)
984 info->ndcb1 = (column & 0xFF);
985 info->ndcb3 = INIT_BUFFER_SIZE;
986 info->step_chunk_size = INIT_BUFFER_SIZE;
989 case NAND_CMD_READID:
990 info->buf_count = READ_ID_BYTES;
991 info->ndcb0 |= NDCB0_CMD_TYPE(3)
994 info->ndcb1 = (column & 0xFF);
996 info->step_chunk_size = 8;
998 case NAND_CMD_STATUS:
1000 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1004 info->step_chunk_size = 8;
1007 case NAND_CMD_ERASE1:
1008 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1012 | (NAND_CMD_ERASE2 << 8)
1014 info->ndcb1 = page_addr;
1018 case NAND_CMD_RESET:
1019 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1024 case NAND_CMD_ERASE2:
1030 dev_err(&info->pdev->dev, "non-supported command %x\n",
1038 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1039 int column, int page_addr)
1041 struct nand_chip *chip = mtd_to_nand(mtd);
1042 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1043 struct pxa3xx_nand_info *info = host->info_data;
1047 * if this is a x16 device ,then convert the input
1048 * "byte" address into a "word" address appropriate
1049 * for indexing a word-oriented device
1051 if (info->reg_ndcr & NDCR_DWIDTH_M)
1055 * There may be different NAND chip hooked to
1056 * different chip select, so check whether
1057 * chip select has been changed, if yes, reset the timing
1059 if (info->cs != host->cs) {
1060 info->cs = host->cs;
1061 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1062 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1065 prepare_start_command(info, command);
1067 info->state = STATE_PREPARED;
1068 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1073 info->cmd_complete = 0;
1074 info->dev_ready = 0;
1075 info->need_wait = 1;
1076 pxa3xx_nand_start(info);
1082 status = nand_readl(info, NDSR);
1084 pxa3xx_nand_irq(info);
1086 if (info->cmd_complete)
1089 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1090 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1095 info->state = STATE_IDLE;
1098 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1099 const unsigned command,
1100 int column, int page_addr)
1102 struct nand_chip *chip = mtd_to_nand(mtd);
1103 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1104 struct pxa3xx_nand_info *info = host->info_data;
1105 int exec_cmd, ext_cmd_type;
1108 * if this is a x16 device then convert the input
1109 * "byte" address into a "word" address appropriate
1110 * for indexing a word-oriented device
1112 if (info->reg_ndcr & NDCR_DWIDTH_M)
1116 * There may be different NAND chip hooked to
1117 * different chip select, so check whether
1118 * chip select has been changed, if yes, reset the timing
1120 if (info->cs != host->cs) {
1121 info->cs = host->cs;
1122 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1123 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1126 /* Select the extended command for the first command */
1128 case NAND_CMD_READ0:
1129 case NAND_CMD_READOOB:
1130 ext_cmd_type = EXT_CMD_TYPE_MONO;
1132 case NAND_CMD_SEQIN:
1133 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1135 case NAND_CMD_PAGEPROG:
1136 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1143 prepare_start_command(info, command);
1146 * Prepare the "is ready" completion before starting a command
1147 * transaction sequence. If the command is not executed the
1148 * completion will be completed, see below.
1150 * We can do that inside the loop because the command variable
1151 * is invariant and thus so is the exec_cmd.
1153 info->need_wait = 1;
1154 info->dev_ready = 0;
1159 info->state = STATE_PREPARED;
1160 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1163 info->need_wait = 0;
1164 info->dev_ready = 1;
1168 info->cmd_complete = 0;
1169 pxa3xx_nand_start(info);
1175 status = nand_readl(info, NDSR);
1177 pxa3xx_nand_irq(info);
1179 if (info->cmd_complete)
1182 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1183 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1188 /* Only a few commands need several steps */
1189 if (command != NAND_CMD_PAGEPROG &&
1190 command != NAND_CMD_READ0 &&
1191 command != NAND_CMD_READOOB)
1196 /* Check if the sequence is complete */
1197 if (info->cur_chunk == info->ntotalchunks &&
1198 command != NAND_CMD_PAGEPROG)
1202 * After a splitted program command sequence has issued
1203 * the command dispatch, the command sequence is complete.
1205 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1206 command == NAND_CMD_PAGEPROG &&
1207 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1210 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1211 /* Last read: issue a 'last naked read' */
1212 if (info->cur_chunk == info->ntotalchunks - 1)
1213 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1215 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1218 * If a splitted program command has no more data to transfer,
1219 * the command dispatch must be issued to complete.
1221 } else if (command == NAND_CMD_PAGEPROG &&
1222 info->cur_chunk == info->ntotalchunks) {
1223 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1227 info->state = STATE_IDLE;
1230 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1231 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1234 chip->write_buf(mtd, buf, mtd->writesize);
1235 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1240 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1241 struct nand_chip *chip, uint8_t *buf, int oob_required,
1244 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1245 struct pxa3xx_nand_info *info = host->info_data;
1248 chip->read_buf(mtd, buf, mtd->writesize);
1249 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1251 if (info->retcode == ERR_CORERR && info->use_ecc) {
1252 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1254 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1256 * Empty pages will trigger uncorrectable errors. Re-read the
1257 * entire page in raw mode and check for bits not being "1".
1258 * If there are more than the supported strength, then it means
1259 * this is an actual uncorrectable error.
1261 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1262 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1263 chip->oob_poi, mtd->oobsize,
1264 NULL, 0, chip->ecc.strength);
1266 mtd->ecc_stats.failed++;
1268 mtd->ecc_stats.corrected += bf;
1269 info->max_bitflips = max_t(unsigned int,
1270 info->max_bitflips, bf);
1271 info->retcode = ERR_CORERR;
1273 info->retcode = ERR_NONE;
1276 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1277 /* Raw read is not supported with Hamming ECC engine */
1278 if (is_buf_blank(buf, mtd->writesize))
1279 info->retcode = ERR_NONE;
1281 mtd->ecc_stats.failed++;
1284 return info->max_bitflips;
1287 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1288 struct nand_chip *chip, uint8_t *buf,
1289 int oob_required, int page)
1291 struct pxa3xx_nand_host *host = chip->priv;
1292 struct pxa3xx_nand_info *info = host->info_data;
1293 int chunk, ecc_off_buf;
1299 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1300 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1302 info->force_raw = true;
1303 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1305 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1306 info->last_spare_size;
1307 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1309 buf + (chunk * info->chunk_size),
1313 (chunk * (info->spare_size)),
1316 chip->oob_poi + ecc_off_buf +
1317 (chunk * (info->ecc_size)),
1318 info->ecc_size - 2);
1321 if (info->ntotalchunks > info->nfullchunks) {
1323 buf + (info->nfullchunks * info->chunk_size),
1324 info->last_chunk_size);
1327 (info->nfullchunks * (info->spare_size)),
1328 info->last_spare_size);
1330 chip->oob_poi + ecc_off_buf +
1331 (info->nfullchunks * (info->ecc_size)),
1332 info->ecc_size - 2);
1335 info->force_raw = false;
1340 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1341 struct nand_chip *chip, int page)
1343 /* Invalidate page cache */
1346 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1350 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1352 struct nand_chip *chip = mtd_to_nand(mtd);
1353 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1354 struct pxa3xx_nand_info *info = host->info_data;
1357 if (info->buf_start < info->buf_count)
1358 /* Has just send a new command? */
1359 retval = info->data_buff[info->buf_start++];
1364 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1366 struct nand_chip *chip = mtd_to_nand(mtd);
1367 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1368 struct pxa3xx_nand_info *info = host->info_data;
1369 u16 retval = 0xFFFF;
1371 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1372 retval = *((u16 *)(info->data_buff+info->buf_start));
1373 info->buf_start += 2;
1378 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1380 struct nand_chip *chip = mtd_to_nand(mtd);
1381 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1382 struct pxa3xx_nand_info *info = host->info_data;
1383 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1385 memcpy(buf, info->data_buff + info->buf_start, real_len);
1386 info->buf_start += real_len;
1389 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1390 const uint8_t *buf, int len)
1392 struct nand_chip *chip = mtd_to_nand(mtd);
1393 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1394 struct pxa3xx_nand_info *info = host->info_data;
1395 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1397 memcpy(info->data_buff + info->buf_start, buf, real_len);
1398 info->buf_start += real_len;
1401 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1406 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1408 struct nand_chip *chip = mtd_to_nand(mtd);
1409 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1410 struct pxa3xx_nand_info *info = host->info_data;
1412 if (info->need_wait) {
1415 info->need_wait = 0;
1421 status = nand_readl(info, NDSR);
1423 pxa3xx_nand_irq(info);
1425 if (info->dev_ready)
1428 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1429 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1430 return NAND_STATUS_FAIL;
1435 /* pxa3xx_nand_send_command has waited for command complete */
1436 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1437 if (info->retcode == ERR_NONE)
1440 return NAND_STATUS_FAIL;
1443 return NAND_STATUS_READY;
1446 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1448 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1450 /* Configure default flash values */
1451 info->reg_ndcr = 0x0; /* enable all interrupts */
1452 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1453 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1454 info->reg_ndcr |= NDCR_SPARE_EN;
1459 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1461 struct pxa3xx_nand_host *host = info->host[info->cs];
1462 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1463 struct nand_chip *chip = mtd_to_nand(mtd);
1465 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1466 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1467 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1470 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1472 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1473 uint32_t ndcr = nand_readl(info, NDCR);
1475 /* Set an initial chunk size */
1476 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1477 info->reg_ndcr = ndcr &
1478 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1479 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1480 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1481 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1484 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1486 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1487 if (info->data_buff == NULL)
1492 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1494 struct pxa3xx_nand_info *info = host->info_data;
1495 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1496 struct mtd_info *mtd;
1497 struct nand_chip *chip;
1498 const struct nand_sdr_timings *timings;
1501 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1502 chip = mtd_to_nand(mtd);
1504 /* configure default flash values */
1505 info->reg_ndcr = 0x0; /* enable all interrupts */
1506 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1507 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1508 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1510 /* use the common timing to make a try */
1511 timings = onfi_async_timing_mode_to_sdr_timings(0);
1512 if (IS_ERR(timings))
1513 return PTR_ERR(timings);
1515 pxa3xx_nand_set_sdr_timing(host, timings);
1517 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1518 ret = chip->waitfunc(mtd, chip);
1519 if (ret & NAND_STATUS_FAIL)
1525 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1526 struct nand_ecc_ctrl *ecc,
1527 int strength, int ecc_stepsize, int page_size)
1529 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1530 info->nfullchunks = 1;
1531 info->ntotalchunks = 1;
1532 info->chunk_size = 2048;
1533 info->spare_size = 40;
1534 info->ecc_size = 24;
1535 ecc->mode = NAND_ECC_HW;
1539 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1540 info->nfullchunks = 1;
1541 info->ntotalchunks = 1;
1542 info->chunk_size = 512;
1543 info->spare_size = 8;
1545 ecc->mode = NAND_ECC_HW;
1550 * Required ECC: 4-bit correction per 512 bytes
1551 * Select: 16-bit correction per 2048 bytes
1553 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1555 info->nfullchunks = 1;
1556 info->ntotalchunks = 1;
1557 info->chunk_size = 2048;
1558 info->spare_size = 32;
1559 info->ecc_size = 32;
1560 ecc->mode = NAND_ECC_HW;
1561 ecc->size = info->chunk_size;
1562 ecc->layout = &ecc_layout_2KB_bch4bit;
1565 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1567 info->nfullchunks = 2;
1568 info->ntotalchunks = 2;
1569 info->chunk_size = 2048;
1570 info->spare_size = 32;
1571 info->ecc_size = 32;
1572 ecc->mode = NAND_ECC_HW;
1573 ecc->size = info->chunk_size;
1574 ecc->layout = &ecc_layout_4KB_bch4bit;
1577 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1579 info->nfullchunks = 4;
1580 info->ntotalchunks = 4;
1581 info->chunk_size = 2048;
1582 info->spare_size = 32;
1583 info->ecc_size = 32;
1584 ecc->mode = NAND_ECC_HW;
1585 ecc->size = info->chunk_size;
1586 ecc->layout = &ecc_layout_8KB_bch4bit;
1590 * Required ECC: 8-bit correction per 512 bytes
1591 * Select: 16-bit correction per 1024 bytes
1593 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1595 info->nfullchunks = 1;
1596 info->ntotalchunks = 2;
1597 info->chunk_size = 1024;
1598 info->spare_size = 0;
1599 info->last_chunk_size = 1024;
1600 info->last_spare_size = 32;
1601 info->ecc_size = 32;
1602 ecc->mode = NAND_ECC_HW;
1603 ecc->size = info->chunk_size;
1604 ecc->layout = &ecc_layout_2KB_bch8bit;
1607 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1609 info->nfullchunks = 4;
1610 info->ntotalchunks = 5;
1611 info->chunk_size = 1024;
1612 info->spare_size = 0;
1613 info->last_chunk_size = 0;
1614 info->last_spare_size = 64;
1615 info->ecc_size = 32;
1616 ecc->mode = NAND_ECC_HW;
1617 ecc->size = info->chunk_size;
1618 ecc->layout = &ecc_layout_4KB_bch8bit;
1621 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1623 info->nfullchunks = 8;
1624 info->ntotalchunks = 9;
1625 info->chunk_size = 1024;
1626 info->spare_size = 0;
1627 info->last_chunk_size = 0;
1628 info->last_spare_size = 160;
1629 info->ecc_size = 32;
1630 ecc->mode = NAND_ECC_HW;
1631 ecc->size = info->chunk_size;
1632 ecc->layout = &ecc_layout_8KB_bch8bit;
1636 dev_err(&info->pdev->dev,
1637 "ECC strength %d at page size %d is not supported\n",
1638 strength, page_size);
1645 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1647 struct nand_chip *chip = mtd_to_nand(mtd);
1648 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1649 struct pxa3xx_nand_info *info = host->info_data;
1650 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1652 uint16_t ecc_strength, ecc_step;
1654 if (pdata->keep_config) {
1655 pxa3xx_nand_detect_config(info);
1657 ret = pxa3xx_nand_config_ident(info);
1660 ret = pxa3xx_nand_sensing(host);
1662 dev_info(&info->pdev->dev,
1663 "There is no chip on cs %d!\n",
1669 /* Device detection must be done with ECC disabled */
1670 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1671 nand_writel(info, NDECCCTRL, 0x0);
1673 if (nand_scan_ident(mtd, 1, NULL))
1676 if (!pdata->keep_config) {
1677 ret = pxa3xx_nand_init_timings(host);
1679 dev_err(&info->pdev->dev,
1680 "Failed to set timings: %d\n", ret);
1685 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1687 * We'll use a bad block table stored in-flash and don't
1688 * allow writing the bad block marker to the flash.
1690 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1691 chip->bbt_td = &bbt_main_descr;
1692 chip->bbt_md = &bbt_mirror_descr;
1695 if (pdata->ecc_strength && pdata->ecc_step_size) {
1696 ecc_strength = pdata->ecc_strength;
1697 ecc_step = pdata->ecc_step_size;
1699 ecc_strength = chip->ecc_strength_ds;
1700 ecc_step = chip->ecc_step_ds;
1703 /* Set default ECC strength requirements on non-ONFI devices */
1704 if (ecc_strength < 1 && ecc_step < 1) {
1709 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1710 ecc_step, mtd->writesize);
1715 * If the page size is bigger than the FIFO size, let's check
1716 * we are given the right variant and then switch to the extended
1717 * (aka split) command handling,
1719 if (mtd->writesize > info->chunk_size) {
1720 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1721 chip->cmdfunc = nand_cmdfunc_extended;
1723 dev_err(&info->pdev->dev,
1724 "unsupported page size on this variant\n");
1729 /* calculate addressing information */
1730 if (mtd->writesize >= 2048)
1731 host->col_addr_cycles = 2;
1733 host->col_addr_cycles = 1;
1735 /* release the initial buffer */
1736 kfree(info->data_buff);
1738 /* allocate the real data + oob buffer */
1739 info->buf_size = mtd->writesize + mtd->oobsize;
1740 ret = pxa3xx_nand_init_buff(info);
1743 info->oob_buff = info->data_buff + mtd->writesize;
1745 if ((mtd->size >> chip->page_shift) > 65536)
1746 host->row_addr_cycles = 3;
1748 host->row_addr_cycles = 2;
1750 if (!pdata->keep_config)
1751 pxa3xx_nand_config_tail(info);
1753 return nand_scan_tail(mtd);
1756 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1758 struct pxa3xx_nand_platform_data *pdata;
1759 struct pxa3xx_nand_host *host;
1760 struct nand_chip *chip = NULL;
1761 struct mtd_info *mtd;
1764 pdata = info->pdata;
1765 if (pdata->num_cs <= 0)
1768 info->variant = pxa3xx_nand_get_variant();
1769 for (cs = 0; cs < pdata->num_cs; cs++) {
1770 chip = (struct nand_chip *)
1771 ((u8 *)&info[1] + sizeof(*host) * cs);
1772 mtd = nand_to_mtd(chip);
1773 host = (struct pxa3xx_nand_host *)chip;
1774 info->host[cs] = host;
1776 host->info_data = info;
1777 mtd->owner = THIS_MODULE;
1779 nand_set_controller_data(chip, host);
1780 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1781 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1782 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1783 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1784 chip->controller = &info->controller;
1785 chip->waitfunc = pxa3xx_nand_waitfunc;
1786 chip->select_chip = pxa3xx_nand_select_chip;
1787 chip->read_word = pxa3xx_nand_read_word;
1788 chip->read_byte = pxa3xx_nand_read_byte;
1789 chip->read_buf = pxa3xx_nand_read_buf;
1790 chip->write_buf = pxa3xx_nand_write_buf;
1791 chip->options |= NAND_NO_SUBPAGE_WRITE;
1792 chip->cmdfunc = nand_cmdfunc;
1795 /* Allocate a buffer to allow flash detection */
1796 info->buf_size = INIT_BUFFER_SIZE;
1797 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1798 if (info->data_buff == NULL) {
1800 goto fail_disable_clk;
1803 /* initialize all interrupts to be disabled */
1804 disable_int(info, NDSR_MASK);
1808 kfree(info->data_buff);
1813 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1815 struct pxa3xx_nand_platform_data *pdata;
1816 const void *blob = gd->fdt_blob;
1819 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1823 /* Get address decoding nodes from the FDT blob */
1825 node = fdt_node_offset_by_compatible(blob, node,
1826 "marvell,mvebu-pxa3xx-nand");
1830 /* Bypass disabeld nodes */
1831 if (!fdtdec_get_is_enabled(blob, node))
1834 /* Get the first enabled NAND controler base address */
1836 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1837 blob, node, "reg", 0, NULL, true);
1839 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1840 if (pdata->num_cs != 1) {
1841 pr_err("pxa3xx driver supports single CS only\n");
1845 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1846 pdata->enable_arbiter = 1;
1848 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1849 pdata->keep_config = 1;
1853 * If these are not set, they will be selected according
1854 * to the detected flash type.
1857 pdata->ecc_strength = fdtdec_get_int(blob, node,
1858 "nand-ecc-strength", 0);
1861 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1862 "nand-ecc-step-size", 0);
1864 info->pdata = pdata;
1866 /* Currently support only a single NAND controller */
1869 } while (node >= 0);
1874 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1876 struct pxa3xx_nand_platform_data *pdata;
1877 int ret, cs, probe_success;
1879 ret = pxa3xx_nand_probe_dt(info);
1883 pdata = info->pdata;
1885 ret = alloc_nand_resource(info);
1887 dev_err(&pdev->dev, "alloc nand resource failed\n");
1892 for (cs = 0; cs < pdata->num_cs; cs++) {
1893 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1896 * The mtd name matches the one used in 'mtdparts' kernel
1897 * parameter. This name cannot be changed or otherwise
1898 * user's mtd partitions configuration would get broken.
1900 mtd->name = "pxa3xx_nand-0";
1902 ret = pxa3xx_nand_scan(mtd);
1904 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1909 if (nand_register(cs, mtd))
1922 * Main initialization routine
1924 void board_nand_init(void)
1926 struct pxa3xx_nand_info *info;
1927 struct pxa3xx_nand_host *host;
1930 info = kzalloc(sizeof(*info) +
1931 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1936 ret = pxa3xx_nand_probe(info);