1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/raw/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/bug.h>
16 #include <linux/err.h>
17 #include <linux/errno.h>
19 #include <asm/arch/cpu.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/rawnand.h>
22 #include <linux/types.h>
24 #include "pxa3xx_nand.h"
26 DECLARE_GLOBAL_DATA_PTR;
28 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
29 #define CHIP_DELAY_TIMEOUT 200
30 #define NAND_STOP_DELAY 40
33 * Define a buffer size for the initial command that detects the flash device:
34 * STATUS, READID and PARAM.
35 * ONFI param page is 256 bytes, and there are three redundant copies
36 * to be read. JEDEC param page is 512 bytes, and there are also three
37 * redundant copies to be read.
38 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
40 #define INIT_BUFFER_SIZE 2048
42 /* registers and bit definitions */
43 #define NDCR (0x00) /* Control register */
44 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
45 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
46 #define NDSR (0x14) /* Status Register */
47 #define NDPCR (0x18) /* Page Count Register */
48 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
49 #define NDBDR1 (0x20) /* Bad Block Register 1 */
50 #define NDECCCTRL (0x28) /* ECC control */
51 #define NDDB (0x40) /* Data Buffer */
52 #define NDCB0 (0x48) /* Command Buffer0 */
53 #define NDCB1 (0x4C) /* Command Buffer1 */
54 #define NDCB2 (0x50) /* Command Buffer2 */
56 #define NDCR_SPARE_EN (0x1 << 31)
57 #define NDCR_ECC_EN (0x1 << 30)
58 #define NDCR_DMA_EN (0x1 << 29)
59 #define NDCR_ND_RUN (0x1 << 28)
60 #define NDCR_DWIDTH_C (0x1 << 27)
61 #define NDCR_DWIDTH_M (0x1 << 26)
62 #define NDCR_PAGE_SZ (0x1 << 24)
63 #define NDCR_NCSX (0x1 << 23)
64 #define NDCR_ND_MODE (0x3 << 21)
65 #define NDCR_NAND_MODE (0x0)
66 #define NDCR_CLR_PG_CNT (0x1 << 20)
67 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
68 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
69 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
71 #define NDCR_RA_START (0x1 << 15)
72 #define NDCR_PG_PER_BLK (0x1 << 14)
73 #define NDCR_ND_ARB_EN (0x1 << 12)
74 #define NDCR_INT_MASK (0xFFF)
76 #define NDSR_MASK (0xfff)
77 #define NDSR_ERR_CNT_OFF (16)
78 #define NDSR_ERR_CNT_MASK (0x1f)
79 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
80 #define NDSR_RDY (0x1 << 12)
81 #define NDSR_FLASH_RDY (0x1 << 11)
82 #define NDSR_CS0_PAGED (0x1 << 10)
83 #define NDSR_CS1_PAGED (0x1 << 9)
84 #define NDSR_CS0_CMDD (0x1 << 8)
85 #define NDSR_CS1_CMDD (0x1 << 7)
86 #define NDSR_CS0_BBD (0x1 << 6)
87 #define NDSR_CS1_BBD (0x1 << 5)
88 #define NDSR_UNCORERR (0x1 << 4)
89 #define NDSR_CORERR (0x1 << 3)
90 #define NDSR_WRDREQ (0x1 << 2)
91 #define NDSR_RDDREQ (0x1 << 1)
92 #define NDSR_WRCMDREQ (0x1)
94 #define NDCB0_LEN_OVRD (0x1 << 28)
95 #define NDCB0_ST_ROW_EN (0x1 << 26)
96 #define NDCB0_AUTO_RS (0x1 << 25)
97 #define NDCB0_CSEL (0x1 << 24)
98 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
99 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
100 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
101 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
102 #define NDCB0_NC (0x1 << 20)
103 #define NDCB0_DBC (0x1 << 19)
104 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
105 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
106 #define NDCB0_CMD2_MASK (0xff << 8)
107 #define NDCB0_CMD1_MASK (0xff)
108 #define NDCB0_ADDR_CYC_SHIFT (16)
110 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
111 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
112 #define EXT_CMD_TYPE_READ 4 /* Read */
113 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
114 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
115 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
116 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
119 * This should be large enough to read 'ONFI' and 'JEDEC'.
120 * Let's use 7 bytes, which is the maximum ID count supported
121 * by the controller (see NDCR_RD_ID_CNT_MASK).
123 #define READ_ID_BYTES 7
125 /* macros for registers read/write */
126 #define nand_writel(info, off, val) \
127 writel((val), (info)->mmio_base + (off))
129 #define nand_readl(info, off) \
130 readl((info)->mmio_base + (off))
132 /* error code and state */
155 enum pxa3xx_nand_variant {
156 PXA3XX_NAND_VARIANT_PXA,
157 PXA3XX_NAND_VARIANT_ARMADA370,
160 struct pxa3xx_nand_host {
161 struct nand_chip chip;
164 /* page size of attached chip */
168 /* calculated from pxa3xx_nand_flash data */
169 unsigned int col_addr_cycles;
170 unsigned int row_addr_cycles;
173 struct pxa3xx_nand_info {
174 struct nand_hw_control controller;
175 struct pxa3xx_nand_platform_data *pdata;
178 void __iomem *mmio_base;
179 unsigned long mmio_phys;
180 int cmd_complete, dev_ready;
182 unsigned int buf_start;
183 unsigned int buf_count;
184 unsigned int buf_size;
185 unsigned int data_buff_pos;
186 unsigned int oob_buff_pos;
188 unsigned char *data_buff;
189 unsigned char *oob_buff;
191 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
195 * This driver supports NFCv1 (as found in PXA SoC)
196 * and NFCv2 (as found in Armada 370/XP SoC).
198 enum pxa3xx_nand_variant variant;
201 int use_ecc; /* use HW ECC ? */
202 int force_raw; /* prevent use_ecc to be set */
203 int ecc_bch; /* using BCH ECC? */
204 int use_spare; /* use spare ? */
207 /* Amount of real data per full chunk */
208 unsigned int chunk_size;
210 /* Amount of spare data per full chunk */
211 unsigned int spare_size;
213 /* Number of full chunks (i.e chunk_size + spare_size) */
214 unsigned int nfullchunks;
217 * Total number of chunks. If equal to nfullchunks, then there
218 * are only full chunks. Otherwise, there is one last chunk of
219 * size (last_chunk_size + last_spare_size)
221 unsigned int ntotalchunks;
223 /* Amount of real data in the last chunk */
224 unsigned int last_chunk_size;
226 /* Amount of spare data in the last chunk */
227 unsigned int last_spare_size;
229 unsigned int ecc_size;
230 unsigned int ecc_err_cnt;
231 unsigned int max_bitflips;
235 * Variables only valid during command
236 * execution. step_chunk_size and step_spare_size is the
237 * amount of real data and spare data in the current
238 * chunk. cur_chunk is the current chunk being
241 unsigned int step_chunk_size;
242 unsigned int step_spare_size;
243 unsigned int cur_chunk;
245 /* cached register value */
250 /* generated NDCBx register values */
257 static struct pxa3xx_nand_timing timing[] = {
259 * tCH Enable signal hold time
260 * tCS Enable signal setup time
261 * tWH ND_nWE high duration
262 * tWP ND_nWE pulse time
263 * tRH ND_nRE high duration
264 * tRP ND_nRE pulse width
265 * tR ND_nWE high to ND_nRE low for read
266 * tWHR ND_nWE high to ND_nRE low for status read
267 * tAR ND_ALE low to ND_nRE low delay
269 /*ch cs wh wp rh rp r whr ar */
270 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
271 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
272 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
273 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
274 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
277 static struct pxa3xx_nand_flash builtin_flash_types[] = {
280 * flash_width Width of Flash memory (DWIDTH_M)
281 * dfc_width Width of flash controller(DWIDTH_C)
283 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
285 { 0x46ec, 16, 16, &timing[1] },
286 { 0xdaec, 8, 8, &timing[1] },
287 { 0xd7ec, 8, 8, &timing[1] },
288 { 0xa12c, 8, 8, &timing[2] },
289 { 0xb12c, 16, 16, &timing[2] },
290 { 0xdc2c, 8, 8, &timing[2] },
291 { 0xcc2c, 16, 16, &timing[2] },
292 { 0xba20, 16, 16, &timing[3] },
293 { 0xda98, 8, 8, &timing[4] },
296 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
297 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
298 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
300 static struct nand_bbt_descr bbt_main_descr = {
301 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
302 | NAND_BBT_2BIT | NAND_BBT_VERSION,
306 .maxblocks = 8, /* Last 8 blocks in each chip */
307 .pattern = bbt_pattern
310 static struct nand_bbt_descr bbt_mirror_descr = {
311 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
312 | NAND_BBT_2BIT | NAND_BBT_VERSION,
316 .maxblocks = 8, /* Last 8 blocks in each chip */
317 .pattern = bbt_mirror_pattern
321 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
324 32, 33, 34, 35, 36, 37, 38, 39,
325 40, 41, 42, 43, 44, 45, 46, 47,
326 48, 49, 50, 51, 52, 53, 54, 55,
327 56, 57, 58, 59, 60, 61, 62, 63},
328 .oobfree = { {2, 30} }
331 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
334 32, 33, 34, 35, 36, 37, 38, 39,
335 40, 41, 42, 43, 44, 45, 46, 47,
336 48, 49, 50, 51, 52, 53, 54, 55,
337 56, 57, 58, 59, 60, 61, 62, 63,
338 64, 65, 66, 67, 68, 69, 70, 71,
339 72, 73, 74, 75, 76, 77, 78, 79,
340 80, 81, 82, 83, 84, 85, 86, 87,
341 88, 89, 90, 91, 92, 93, 94, 95},
342 .oobfree = { {1, 4}, {6, 26} }
345 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
348 32, 33, 34, 35, 36, 37, 38, 39,
349 40, 41, 42, 43, 44, 45, 46, 47,
350 48, 49, 50, 51, 52, 53, 54, 55,
351 56, 57, 58, 59, 60, 61, 62, 63,
352 96, 97, 98, 99, 100, 101, 102, 103,
353 104, 105, 106, 107, 108, 109, 110, 111,
354 112, 113, 114, 115, 116, 117, 118, 119,
355 120, 121, 122, 123, 124, 125, 126, 127},
356 /* Bootrom looks in bytes 0 & 5 for bad blocks */
357 .oobfree = { {6, 26}, { 64, 32} }
360 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
363 32, 33, 34, 35, 36, 37, 38, 39,
364 40, 41, 42, 43, 44, 45, 46, 47,
365 48, 49, 50, 51, 52, 53, 54, 55,
366 56, 57, 58, 59, 60, 61, 62, 63,
368 96, 97, 98, 99, 100, 101, 102, 103,
369 104, 105, 106, 107, 108, 109, 110, 111,
370 112, 113, 114, 115, 116, 117, 118, 119,
371 120, 121, 122, 123, 124, 125, 126, 127,
373 160, 161, 162, 163, 164, 165, 166, 167,
374 168, 169, 170, 171, 172, 173, 174, 175,
375 176, 177, 178, 179, 180, 181, 182, 183,
376 184, 185, 186, 187, 188, 189, 190, 191,
378 224, 225, 226, 227, 228, 229, 230, 231,
379 232, 233, 234, 235, 236, 237, 238, 239,
380 240, 241, 242, 243, 244, 245, 246, 247,
381 248, 249, 250, 251, 252, 253, 254, 255},
383 /* Bootrom looks in bytes 0 & 5 for bad blocks */
384 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
387 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
390 32, 33, 34, 35, 36, 37, 38, 39,
391 40, 41, 42, 43, 44, 45, 46, 47,
392 48, 49, 50, 51, 52, 53, 54, 55,
393 56, 57, 58, 59, 60, 61, 62, 63},
397 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
400 /* HW ECC handles all ECC data and all spare area is free for OOB */
401 .oobfree = {{0, 160} }
404 #define NDTR0_tCH(c) (min((c), 7) << 19)
405 #define NDTR0_tCS(c) (min((c), 7) << 16)
406 #define NDTR0_tWH(c) (min((c), 7) << 11)
407 #define NDTR0_tWP(c) (min((c), 7) << 8)
408 #define NDTR0_tRH(c) (min((c), 7) << 3)
409 #define NDTR0_tRP(c) (min((c), 7) << 0)
411 #define NDTR1_tR(c) (min((c), 65535) << 16)
412 #define NDTR1_tWHR(c) (min((c), 15) << 4)
413 #define NDTR1_tAR(c) (min((c), 15) << 0)
415 /* convert nano-seconds to nand flash controller clock cycles */
416 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
418 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
420 /* We only support the Armada 370/XP/38x for now */
421 return PXA3XX_NAND_VARIANT_ARMADA370;
424 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
425 const struct pxa3xx_nand_timing *t)
427 struct pxa3xx_nand_info *info = host->info_data;
428 unsigned long nand_clk = mvebu_get_nand_clock();
429 uint32_t ndtr0, ndtr1;
431 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
432 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
433 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
434 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
435 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
436 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
438 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
439 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
440 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
442 info->ndtr0cs0 = ndtr0;
443 info->ndtr1cs0 = ndtr1;
444 nand_writel(info, NDTR0CS0, ndtr0);
445 nand_writel(info, NDTR1CS0, ndtr1);
448 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
449 const struct nand_sdr_timings *t)
451 struct pxa3xx_nand_info *info = host->info_data;
452 struct nand_chip *chip = &host->chip;
453 unsigned long nand_clk = mvebu_get_nand_clock();
454 uint32_t ndtr0, ndtr1;
456 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
457 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
458 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
459 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
460 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
461 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
462 u32 tR = chip->chip_delay * 1000;
463 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
464 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
466 /* fallback to a default value if tR = 0 */
470 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
471 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
472 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
473 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
474 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
475 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
477 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
478 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
479 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
481 info->ndtr0cs0 = ndtr0;
482 info->ndtr1cs0 = ndtr1;
483 nand_writel(info, NDTR0CS0, ndtr0);
484 nand_writel(info, NDTR1CS0, ndtr1);
487 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
489 const struct nand_sdr_timings *timings;
490 struct nand_chip *chip = &host->chip;
491 struct pxa3xx_nand_info *info = host->info_data;
492 const struct pxa3xx_nand_flash *f = NULL;
493 struct mtd_info *mtd = nand_to_mtd(&host->chip);
494 int mode, id, ntypes, i;
496 mode = onfi_get_async_timing_mode(chip);
497 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
498 ntypes = ARRAY_SIZE(builtin_flash_types);
500 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
502 id = chip->read_byte(mtd);
503 id |= chip->read_byte(mtd) << 0x8;
505 for (i = 0; i < ntypes; i++) {
506 f = &builtin_flash_types[i];
508 if (f->chip_id == id)
513 dev_err(&info->pdev->dev, "Error: timings not found\n");
517 pxa3xx_nand_set_timing(host, f->timing);
519 if (f->flash_width == 16) {
520 info->reg_ndcr |= NDCR_DWIDTH_M;
521 chip->options |= NAND_BUSWIDTH_16;
524 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
526 mode = fls(mode) - 1;
530 timings = onfi_async_timing_mode_to_sdr_timings(mode);
532 return PTR_ERR(timings);
534 pxa3xx_nand_set_sdr_timing(host, timings);
541 * NOTE: it is a must to set ND_RUN first, then write
542 * command buffer, otherwise, it does not work.
543 * We enable all the interrupt at the same time, and
544 * let pxa3xx_nand_irq to handle all logic.
546 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
550 ndcr = info->reg_ndcr;
555 nand_writel(info, NDECCCTRL, 0x1);
557 ndcr &= ~NDCR_ECC_EN;
559 nand_writel(info, NDECCCTRL, 0x0);
562 ndcr &= ~NDCR_DMA_EN;
565 ndcr |= NDCR_SPARE_EN;
567 ndcr &= ~NDCR_SPARE_EN;
571 /* clear status bits and run */
572 nand_writel(info, NDSR, NDSR_MASK);
573 nand_writel(info, NDCR, 0);
574 nand_writel(info, NDCR, ndcr);
577 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
581 ndcr = nand_readl(info, NDCR);
582 nand_writel(info, NDCR, ndcr | int_mask);
585 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
587 if (info->ecc_bch && !info->force_raw) {
591 * According to the datasheet, when reading from NDDB
592 * with BCH enabled, after each 32 bytes reads, we
593 * have to make sure that the NDSR.RDDREQ bit is set.
595 * Drain the FIFO 8 32 bits reads at a time, and skip
596 * the polling on the last read.
599 readsl(info->mmio_base + NDDB, data, 8);
602 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
603 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
604 dev_err(&info->pdev->dev,
605 "Timeout on RDDREQ while draining the FIFO\n");
615 readsl(info->mmio_base + NDDB, data, len);
618 static void handle_data_pio(struct pxa3xx_nand_info *info)
620 int data_len = info->step_chunk_size;
623 * In raw mode, include the spare area and the ECC bytes that are not
624 * consumed by the controller in the data section. Do not reorganize
625 * here, do it in the ->read_page_raw() handler instead.
628 data_len += info->step_spare_size + info->ecc_size;
630 switch (info->state) {
631 case STATE_PIO_WRITING:
632 if (info->step_chunk_size)
633 writesl(info->mmio_base + NDDB,
634 info->data_buff + info->data_buff_pos,
635 DIV_ROUND_UP(data_len, 4));
637 if (info->step_spare_size)
638 writesl(info->mmio_base + NDDB,
639 info->oob_buff + info->oob_buff_pos,
640 DIV_ROUND_UP(info->step_spare_size, 4));
642 case STATE_PIO_READING:
645 info->data_buff + info->data_buff_pos,
646 DIV_ROUND_UP(data_len, 4));
651 if (info->step_spare_size)
653 info->oob_buff + info->oob_buff_pos,
654 DIV_ROUND_UP(info->step_spare_size, 4));
657 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
662 /* Update buffer pointers for multi-page read/write */
663 info->data_buff_pos += data_len;
664 info->oob_buff_pos += info->step_spare_size;
667 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
669 handle_data_pio(info);
671 info->state = STATE_CMD_DONE;
672 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
675 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
677 unsigned int status, is_completed = 0, is_ready = 0;
678 unsigned int ready, cmd_done;
679 irqreturn_t ret = IRQ_HANDLED;
682 ready = NDSR_FLASH_RDY;
683 cmd_done = NDSR_CS0_CMDD;
686 cmd_done = NDSR_CS1_CMDD;
689 /* TODO - find out why we need the delay during write operation. */
692 status = nand_readl(info, NDSR);
694 if (status & NDSR_UNCORERR)
695 info->retcode = ERR_UNCORERR;
696 if (status & NDSR_CORERR) {
697 info->retcode = ERR_CORERR;
698 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
700 info->ecc_err_cnt = NDSR_ERR_CNT(status);
702 info->ecc_err_cnt = 1;
705 * Each chunk composing a page is corrected independently,
706 * and we need to store maximum number of corrected bitflips
707 * to return it to the MTD layer in ecc.read_page().
709 info->max_bitflips = max_t(unsigned int,
713 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
714 info->state = (status & NDSR_RDDREQ) ?
715 STATE_PIO_READING : STATE_PIO_WRITING;
716 /* Call the IRQ thread in U-Boot directly */
717 pxa3xx_nand_irq_thread(info);
720 if (status & cmd_done) {
721 info->state = STATE_CMD_DONE;
724 if (status & ready) {
725 info->state = STATE_READY;
730 * Clear all status bit before issuing the next command, which
731 * can and will alter the status bits and will deserve a new
732 * interrupt on its own. This lets the controller exit the IRQ
734 nand_writel(info, NDSR, status);
736 if (status & NDSR_WRCMDREQ) {
737 status &= ~NDSR_WRCMDREQ;
738 info->state = STATE_CMD_HANDLE;
741 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
742 * must be loaded by writing directly either 12 or 16
743 * bytes directly to NDCB0, four bytes at a time.
745 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
746 * but each NDCBx register can be read.
748 nand_writel(info, NDCB0, info->ndcb0);
749 nand_writel(info, NDCB0, info->ndcb1);
750 nand_writel(info, NDCB0, info->ndcb2);
752 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
753 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
754 nand_writel(info, NDCB0, info->ndcb3);
758 info->cmd_complete = 1;
765 static inline int is_buf_blank(uint8_t *buf, size_t len)
767 for (; len > 0; len--)
773 static void set_command_address(struct pxa3xx_nand_info *info,
774 unsigned int page_size, uint16_t column, int page_addr)
776 /* small page addr setting */
777 if (page_size < info->chunk_size) {
778 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
783 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
786 if (page_addr & 0xFF0000)
787 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
793 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
795 struct pxa3xx_nand_host *host = info->host[info->cs];
796 struct mtd_info *mtd = nand_to_mtd(&host->chip);
798 /* reset data and oob column point to handle data */
801 info->data_buff_pos = 0;
802 info->oob_buff_pos = 0;
803 info->step_chunk_size = 0;
804 info->step_spare_size = 0;
808 info->retcode = ERR_NONE;
809 info->ecc_err_cnt = 0;
815 case NAND_CMD_READOOB:
816 case NAND_CMD_PAGEPROG:
817 if (!info->force_raw)
830 * If we are about to issue a read command, or about to set
831 * the write address, then clean the data buffer.
833 if (command == NAND_CMD_READ0 ||
834 command == NAND_CMD_READOOB ||
835 command == NAND_CMD_SEQIN) {
836 info->buf_count = mtd->writesize + mtd->oobsize;
837 memset(info->data_buff, 0xFF, info->buf_count);
841 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
842 int ext_cmd_type, uint16_t column, int page_addr)
844 int addr_cycle, exec_cmd;
845 struct pxa3xx_nand_host *host;
846 struct mtd_info *mtd;
848 host = info->host[info->cs];
849 mtd = nand_to_mtd(&host->chip);
854 info->ndcb0 = NDCB0_CSEL;
858 if (command == NAND_CMD_SEQIN)
861 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
862 + host->col_addr_cycles);
865 case NAND_CMD_READOOB:
867 info->buf_start = column;
868 info->ndcb0 |= NDCB0_CMD_TYPE(0)
872 if (command == NAND_CMD_READOOB)
873 info->buf_start += mtd->writesize;
875 if (info->cur_chunk < info->nfullchunks) {
876 info->step_chunk_size = info->chunk_size;
877 info->step_spare_size = info->spare_size;
879 info->step_chunk_size = info->last_chunk_size;
880 info->step_spare_size = info->last_spare_size;
884 * Multiple page read needs an 'extended command type' field,
885 * which is either naked-read or last-read according to the
888 if (info->force_raw) {
889 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
891 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
892 info->ndcb3 = info->step_chunk_size +
893 info->step_spare_size + info->ecc_size;
894 } else if (mtd->writesize == info->chunk_size) {
895 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
896 } else if (mtd->writesize > info->chunk_size) {
897 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
899 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
900 info->ndcb3 = info->step_chunk_size +
901 info->step_spare_size;
904 set_command_address(info, mtd->writesize, column, page_addr);
909 info->buf_start = column;
910 set_command_address(info, mtd->writesize, 0, page_addr);
913 * Multiple page programming needs to execute the initial
914 * SEQIN command that sets the page address.
916 if (mtd->writesize > info->chunk_size) {
917 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
918 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
925 case NAND_CMD_PAGEPROG:
926 if (is_buf_blank(info->data_buff,
927 (mtd->writesize + mtd->oobsize))) {
932 if (info->cur_chunk < info->nfullchunks) {
933 info->step_chunk_size = info->chunk_size;
934 info->step_spare_size = info->spare_size;
936 info->step_chunk_size = info->last_chunk_size;
937 info->step_spare_size = info->last_spare_size;
940 /* Second command setting for large pages */
941 if (mtd->writesize > info->chunk_size) {
943 * Multiple page write uses the 'extended command'
944 * field. This can be used to issue a command dispatch
945 * or a naked-write depending on the current stage.
947 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
949 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
950 info->ndcb3 = info->step_chunk_size +
951 info->step_spare_size;
954 * This is the command dispatch that completes a chunked
955 * page program operation.
957 if (info->cur_chunk == info->ntotalchunks) {
958 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
959 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
966 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
970 | (NAND_CMD_PAGEPROG << 8)
977 info->buf_count = INIT_BUFFER_SIZE;
978 info->ndcb0 |= NDCB0_CMD_TYPE(0)
982 info->ndcb1 = (column & 0xFF);
983 info->ndcb3 = INIT_BUFFER_SIZE;
984 info->step_chunk_size = INIT_BUFFER_SIZE;
987 case NAND_CMD_READID:
988 info->buf_count = READ_ID_BYTES;
989 info->ndcb0 |= NDCB0_CMD_TYPE(3)
992 info->ndcb1 = (column & 0xFF);
994 info->step_chunk_size = 8;
996 case NAND_CMD_STATUS:
998 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1002 info->step_chunk_size = 8;
1005 case NAND_CMD_ERASE1:
1006 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1010 | (NAND_CMD_ERASE2 << 8)
1012 info->ndcb1 = page_addr;
1016 case NAND_CMD_RESET:
1017 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1022 case NAND_CMD_ERASE2:
1028 dev_err(&info->pdev->dev, "non-supported command %x\n",
1036 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1037 int column, int page_addr)
1039 struct nand_chip *chip = mtd_to_nand(mtd);
1040 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1041 struct pxa3xx_nand_info *info = host->info_data;
1045 * if this is a x16 device ,then convert the input
1046 * "byte" address into a "word" address appropriate
1047 * for indexing a word-oriented device
1049 if (info->reg_ndcr & NDCR_DWIDTH_M)
1053 * There may be different NAND chip hooked to
1054 * different chip select, so check whether
1055 * chip select has been changed, if yes, reset the timing
1057 if (info->cs != host->cs) {
1058 info->cs = host->cs;
1059 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1060 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1063 prepare_start_command(info, command);
1065 info->state = STATE_PREPARED;
1066 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1071 info->cmd_complete = 0;
1072 info->dev_ready = 0;
1073 info->need_wait = 1;
1074 pxa3xx_nand_start(info);
1080 status = nand_readl(info, NDSR);
1082 pxa3xx_nand_irq(info);
1084 if (info->cmd_complete)
1087 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1088 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1093 info->state = STATE_IDLE;
1096 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1097 const unsigned command,
1098 int column, int page_addr)
1100 struct nand_chip *chip = mtd_to_nand(mtd);
1101 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1102 struct pxa3xx_nand_info *info = host->info_data;
1103 int exec_cmd, ext_cmd_type;
1106 * if this is a x16 device then convert the input
1107 * "byte" address into a "word" address appropriate
1108 * for indexing a word-oriented device
1110 if (info->reg_ndcr & NDCR_DWIDTH_M)
1114 * There may be different NAND chip hooked to
1115 * different chip select, so check whether
1116 * chip select has been changed, if yes, reset the timing
1118 if (info->cs != host->cs) {
1119 info->cs = host->cs;
1120 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1121 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1124 /* Select the extended command for the first command */
1126 case NAND_CMD_READ0:
1127 case NAND_CMD_READOOB:
1128 ext_cmd_type = EXT_CMD_TYPE_MONO;
1130 case NAND_CMD_SEQIN:
1131 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1133 case NAND_CMD_PAGEPROG:
1134 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1141 prepare_start_command(info, command);
1144 * Prepare the "is ready" completion before starting a command
1145 * transaction sequence. If the command is not executed the
1146 * completion will be completed, see below.
1148 * We can do that inside the loop because the command variable
1149 * is invariant and thus so is the exec_cmd.
1151 info->need_wait = 1;
1152 info->dev_ready = 0;
1157 info->state = STATE_PREPARED;
1158 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1161 info->need_wait = 0;
1162 info->dev_ready = 1;
1166 info->cmd_complete = 0;
1167 pxa3xx_nand_start(info);
1173 status = nand_readl(info, NDSR);
1175 pxa3xx_nand_irq(info);
1177 if (info->cmd_complete)
1180 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1181 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1186 /* Only a few commands need several steps */
1187 if (command != NAND_CMD_PAGEPROG &&
1188 command != NAND_CMD_READ0 &&
1189 command != NAND_CMD_READOOB)
1194 /* Check if the sequence is complete */
1195 if (info->cur_chunk == info->ntotalchunks &&
1196 command != NAND_CMD_PAGEPROG)
1200 * After a splitted program command sequence has issued
1201 * the command dispatch, the command sequence is complete.
1203 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1204 command == NAND_CMD_PAGEPROG &&
1205 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1208 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1209 /* Last read: issue a 'last naked read' */
1210 if (info->cur_chunk == info->ntotalchunks - 1)
1211 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1213 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1216 * If a splitted program command has no more data to transfer,
1217 * the command dispatch must be issued to complete.
1219 } else if (command == NAND_CMD_PAGEPROG &&
1220 info->cur_chunk == info->ntotalchunks) {
1221 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1225 info->state = STATE_IDLE;
1228 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1229 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1232 chip->write_buf(mtd, buf, mtd->writesize);
1233 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1238 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1239 struct nand_chip *chip, uint8_t *buf, int oob_required,
1242 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1243 struct pxa3xx_nand_info *info = host->info_data;
1246 chip->read_buf(mtd, buf, mtd->writesize);
1247 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1249 if (info->retcode == ERR_CORERR && info->use_ecc) {
1250 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1252 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1254 * Empty pages will trigger uncorrectable errors. Re-read the
1255 * entire page in raw mode and check for bits not being "1".
1256 * If there are more than the supported strength, then it means
1257 * this is an actual uncorrectable error.
1259 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1260 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1261 chip->oob_poi, mtd->oobsize,
1262 NULL, 0, chip->ecc.strength);
1264 mtd->ecc_stats.failed++;
1266 mtd->ecc_stats.corrected += bf;
1267 info->max_bitflips = max_t(unsigned int,
1268 info->max_bitflips, bf);
1269 info->retcode = ERR_CORERR;
1271 info->retcode = ERR_NONE;
1274 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1275 /* Raw read is not supported with Hamming ECC engine */
1276 if (is_buf_blank(buf, mtd->writesize))
1277 info->retcode = ERR_NONE;
1279 mtd->ecc_stats.failed++;
1282 return info->max_bitflips;
1285 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1286 struct nand_chip *chip, uint8_t *buf,
1287 int oob_required, int page)
1289 struct pxa3xx_nand_host *host = chip->priv;
1290 struct pxa3xx_nand_info *info = host->info_data;
1291 int chunk, ecc_off_buf;
1297 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1298 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1300 info->force_raw = true;
1301 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1303 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1304 info->last_spare_size;
1305 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1307 buf + (chunk * info->chunk_size),
1311 (chunk * (info->spare_size)),
1314 chip->oob_poi + ecc_off_buf +
1315 (chunk * (info->ecc_size)),
1316 info->ecc_size - 2);
1319 if (info->ntotalchunks > info->nfullchunks) {
1321 buf + (info->nfullchunks * info->chunk_size),
1322 info->last_chunk_size);
1325 (info->nfullchunks * (info->spare_size)),
1326 info->last_spare_size);
1328 chip->oob_poi + ecc_off_buf +
1329 (info->nfullchunks * (info->ecc_size)),
1330 info->ecc_size - 2);
1333 info->force_raw = false;
1338 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1339 struct nand_chip *chip, int page)
1341 /* Invalidate page cache */
1344 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1348 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1350 struct nand_chip *chip = mtd_to_nand(mtd);
1351 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1352 struct pxa3xx_nand_info *info = host->info_data;
1355 if (info->buf_start < info->buf_count)
1356 /* Has just send a new command? */
1357 retval = info->data_buff[info->buf_start++];
1362 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1364 struct nand_chip *chip = mtd_to_nand(mtd);
1365 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1366 struct pxa3xx_nand_info *info = host->info_data;
1367 u16 retval = 0xFFFF;
1369 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1370 retval = *((u16 *)(info->data_buff+info->buf_start));
1371 info->buf_start += 2;
1376 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1378 struct nand_chip *chip = mtd_to_nand(mtd);
1379 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1380 struct pxa3xx_nand_info *info = host->info_data;
1381 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1383 memcpy(buf, info->data_buff + info->buf_start, real_len);
1384 info->buf_start += real_len;
1387 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1388 const uint8_t *buf, int len)
1390 struct nand_chip *chip = mtd_to_nand(mtd);
1391 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1392 struct pxa3xx_nand_info *info = host->info_data;
1393 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1395 memcpy(info->data_buff + info->buf_start, buf, real_len);
1396 info->buf_start += real_len;
1399 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1404 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1406 struct nand_chip *chip = mtd_to_nand(mtd);
1407 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1408 struct pxa3xx_nand_info *info = host->info_data;
1410 if (info->need_wait) {
1413 info->need_wait = 0;
1419 status = nand_readl(info, NDSR);
1421 pxa3xx_nand_irq(info);
1423 if (info->dev_ready)
1426 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1427 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1428 return NAND_STATUS_FAIL;
1433 /* pxa3xx_nand_send_command has waited for command complete */
1434 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1435 if (info->retcode == ERR_NONE)
1438 return NAND_STATUS_FAIL;
1441 return NAND_STATUS_READY;
1444 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1446 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1448 /* Configure default flash values */
1449 info->reg_ndcr = 0x0; /* enable all interrupts */
1450 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1451 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1452 info->reg_ndcr |= NDCR_SPARE_EN;
1457 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1459 struct pxa3xx_nand_host *host = info->host[info->cs];
1460 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1461 struct nand_chip *chip = mtd_to_nand(mtd);
1463 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1464 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1465 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1468 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1470 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1471 uint32_t ndcr = nand_readl(info, NDCR);
1473 /* Set an initial chunk size */
1474 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1475 info->reg_ndcr = ndcr &
1476 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1477 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1478 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1479 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1482 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1484 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1485 if (info->data_buff == NULL)
1490 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1492 struct pxa3xx_nand_info *info = host->info_data;
1493 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1494 struct mtd_info *mtd;
1495 struct nand_chip *chip;
1496 const struct nand_sdr_timings *timings;
1499 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1500 chip = mtd_to_nand(mtd);
1502 /* configure default flash values */
1503 info->reg_ndcr = 0x0; /* enable all interrupts */
1504 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1505 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1506 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1508 /* use the common timing to make a try */
1509 timings = onfi_async_timing_mode_to_sdr_timings(0);
1510 if (IS_ERR(timings))
1511 return PTR_ERR(timings);
1513 pxa3xx_nand_set_sdr_timing(host, timings);
1515 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1516 ret = chip->waitfunc(mtd, chip);
1517 if (ret & NAND_STATUS_FAIL)
1523 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1524 struct nand_ecc_ctrl *ecc,
1525 int strength, int ecc_stepsize, int page_size)
1527 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1528 info->nfullchunks = 1;
1529 info->ntotalchunks = 1;
1530 info->chunk_size = 2048;
1531 info->spare_size = 40;
1532 info->ecc_size = 24;
1533 ecc->mode = NAND_ECC_HW;
1537 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1538 info->nfullchunks = 1;
1539 info->ntotalchunks = 1;
1540 info->chunk_size = 512;
1541 info->spare_size = 8;
1543 ecc->mode = NAND_ECC_HW;
1548 * Required ECC: 4-bit correction per 512 bytes
1549 * Select: 16-bit correction per 2048 bytes
1551 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1553 info->nfullchunks = 1;
1554 info->ntotalchunks = 1;
1555 info->chunk_size = 2048;
1556 info->spare_size = 32;
1557 info->ecc_size = 32;
1558 ecc->mode = NAND_ECC_HW;
1559 ecc->size = info->chunk_size;
1560 ecc->layout = &ecc_layout_2KB_bch4bit;
1563 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1565 info->nfullchunks = 2;
1566 info->ntotalchunks = 2;
1567 info->chunk_size = 2048;
1568 info->spare_size = 32;
1569 info->ecc_size = 32;
1570 ecc->mode = NAND_ECC_HW;
1571 ecc->size = info->chunk_size;
1572 ecc->layout = &ecc_layout_4KB_bch4bit;
1575 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1577 info->nfullchunks = 4;
1578 info->ntotalchunks = 4;
1579 info->chunk_size = 2048;
1580 info->spare_size = 32;
1581 info->ecc_size = 32;
1582 ecc->mode = NAND_ECC_HW;
1583 ecc->size = info->chunk_size;
1584 ecc->layout = &ecc_layout_8KB_bch4bit;
1588 * Required ECC: 8-bit correction per 512 bytes
1589 * Select: 16-bit correction per 1024 bytes
1591 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1593 info->nfullchunks = 1;
1594 info->ntotalchunks = 2;
1595 info->chunk_size = 1024;
1596 info->spare_size = 0;
1597 info->last_chunk_size = 1024;
1598 info->last_spare_size = 32;
1599 info->ecc_size = 32;
1600 ecc->mode = NAND_ECC_HW;
1601 ecc->size = info->chunk_size;
1602 ecc->layout = &ecc_layout_2KB_bch8bit;
1605 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1607 info->nfullchunks = 4;
1608 info->ntotalchunks = 5;
1609 info->chunk_size = 1024;
1610 info->spare_size = 0;
1611 info->last_chunk_size = 0;
1612 info->last_spare_size = 64;
1613 info->ecc_size = 32;
1614 ecc->mode = NAND_ECC_HW;
1615 ecc->size = info->chunk_size;
1616 ecc->layout = &ecc_layout_4KB_bch8bit;
1619 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1621 info->nfullchunks = 8;
1622 info->ntotalchunks = 9;
1623 info->chunk_size = 1024;
1624 info->spare_size = 0;
1625 info->last_chunk_size = 0;
1626 info->last_spare_size = 160;
1627 info->ecc_size = 32;
1628 ecc->mode = NAND_ECC_HW;
1629 ecc->size = info->chunk_size;
1630 ecc->layout = &ecc_layout_8KB_bch8bit;
1634 dev_err(&info->pdev->dev,
1635 "ECC strength %d at page size %d is not supported\n",
1636 strength, page_size);
1643 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1645 struct nand_chip *chip = mtd_to_nand(mtd);
1646 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1647 struct pxa3xx_nand_info *info = host->info_data;
1648 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1650 uint16_t ecc_strength, ecc_step;
1652 if (pdata->keep_config) {
1653 pxa3xx_nand_detect_config(info);
1655 ret = pxa3xx_nand_config_ident(info);
1658 ret = pxa3xx_nand_sensing(host);
1660 dev_info(&info->pdev->dev,
1661 "There is no chip on cs %d!\n",
1667 /* Device detection must be done with ECC disabled */
1668 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1669 nand_writel(info, NDECCCTRL, 0x0);
1671 if (nand_scan_ident(mtd, 1, NULL))
1674 if (!pdata->keep_config) {
1675 ret = pxa3xx_nand_init_timings(host);
1677 dev_err(&info->pdev->dev,
1678 "Failed to set timings: %d\n", ret);
1683 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1685 * We'll use a bad block table stored in-flash and don't
1686 * allow writing the bad block marker to the flash.
1688 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1689 chip->bbt_td = &bbt_main_descr;
1690 chip->bbt_md = &bbt_mirror_descr;
1693 if (pdata->ecc_strength && pdata->ecc_step_size) {
1694 ecc_strength = pdata->ecc_strength;
1695 ecc_step = pdata->ecc_step_size;
1697 ecc_strength = chip->ecc_strength_ds;
1698 ecc_step = chip->ecc_step_ds;
1701 /* Set default ECC strength requirements on non-ONFI devices */
1702 if (ecc_strength < 1 && ecc_step < 1) {
1707 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1708 ecc_step, mtd->writesize);
1713 * If the page size is bigger than the FIFO size, let's check
1714 * we are given the right variant and then switch to the extended
1715 * (aka split) command handling,
1717 if (mtd->writesize > info->chunk_size) {
1718 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1719 chip->cmdfunc = nand_cmdfunc_extended;
1721 dev_err(&info->pdev->dev,
1722 "unsupported page size on this variant\n");
1727 /* calculate addressing information */
1728 if (mtd->writesize >= 2048)
1729 host->col_addr_cycles = 2;
1731 host->col_addr_cycles = 1;
1733 /* release the initial buffer */
1734 kfree(info->data_buff);
1736 /* allocate the real data + oob buffer */
1737 info->buf_size = mtd->writesize + mtd->oobsize;
1738 ret = pxa3xx_nand_init_buff(info);
1741 info->oob_buff = info->data_buff + mtd->writesize;
1743 if ((mtd->size >> chip->page_shift) > 65536)
1744 host->row_addr_cycles = 3;
1746 host->row_addr_cycles = 2;
1748 if (!pdata->keep_config)
1749 pxa3xx_nand_config_tail(info);
1751 return nand_scan_tail(mtd);
1754 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1756 struct pxa3xx_nand_platform_data *pdata;
1757 struct pxa3xx_nand_host *host;
1758 struct nand_chip *chip = NULL;
1759 struct mtd_info *mtd;
1762 pdata = info->pdata;
1763 if (pdata->num_cs <= 0)
1766 info->variant = pxa3xx_nand_get_variant();
1767 for (cs = 0; cs < pdata->num_cs; cs++) {
1768 chip = (struct nand_chip *)
1769 ((u8 *)&info[1] + sizeof(*host) * cs);
1770 mtd = nand_to_mtd(chip);
1771 host = (struct pxa3xx_nand_host *)chip;
1772 info->host[cs] = host;
1774 host->info_data = info;
1775 mtd->owner = THIS_MODULE;
1777 nand_set_controller_data(chip, host);
1778 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1779 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1780 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1781 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1782 chip->controller = &info->controller;
1783 chip->waitfunc = pxa3xx_nand_waitfunc;
1784 chip->select_chip = pxa3xx_nand_select_chip;
1785 chip->read_word = pxa3xx_nand_read_word;
1786 chip->read_byte = pxa3xx_nand_read_byte;
1787 chip->read_buf = pxa3xx_nand_read_buf;
1788 chip->write_buf = pxa3xx_nand_write_buf;
1789 chip->options |= NAND_NO_SUBPAGE_WRITE;
1790 chip->cmdfunc = nand_cmdfunc;
1793 /* Allocate a buffer to allow flash detection */
1794 info->buf_size = INIT_BUFFER_SIZE;
1795 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1796 if (info->data_buff == NULL) {
1798 goto fail_disable_clk;
1801 /* initialize all interrupts to be disabled */
1802 disable_int(info, NDSR_MASK);
1806 kfree(info->data_buff);
1811 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1813 struct pxa3xx_nand_platform_data *pdata;
1814 const void *blob = gd->fdt_blob;
1817 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1821 /* Get address decoding nodes from the FDT blob */
1823 node = fdt_node_offset_by_compatible(blob, node,
1824 "marvell,mvebu-pxa3xx-nand");
1828 /* Bypass disabeld nodes */
1829 if (!fdtdec_get_is_enabled(blob, node))
1832 /* Get the first enabled NAND controler base address */
1834 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1835 blob, node, "reg", 0, NULL, true);
1837 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1838 if (pdata->num_cs != 1) {
1839 pr_err("pxa3xx driver supports single CS only\n");
1843 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1844 pdata->enable_arbiter = 1;
1846 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1847 pdata->keep_config = 1;
1851 * If these are not set, they will be selected according
1852 * to the detected flash type.
1855 pdata->ecc_strength = fdtdec_get_int(blob, node,
1856 "nand-ecc-strength", 0);
1859 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1860 "nand-ecc-step-size", 0);
1862 info->pdata = pdata;
1864 /* Currently support only a single NAND controller */
1867 } while (node >= 0);
1872 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1874 struct pxa3xx_nand_platform_data *pdata;
1875 int ret, cs, probe_success;
1877 ret = pxa3xx_nand_probe_dt(info);
1881 pdata = info->pdata;
1883 ret = alloc_nand_resource(info);
1885 dev_err(&pdev->dev, "alloc nand resource failed\n");
1890 for (cs = 0; cs < pdata->num_cs; cs++) {
1891 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1894 * The mtd name matches the one used in 'mtdparts' kernel
1895 * parameter. This name cannot be changed or otherwise
1896 * user's mtd partitions configuration would get broken.
1898 mtd->name = "pxa3xx_nand-0";
1900 ret = pxa3xx_nand_scan(mtd);
1902 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1907 if (nand_register(cs, mtd))
1920 * Main initialization routine
1922 void board_nand_init(void)
1924 struct pxa3xx_nand_info *info;
1925 struct pxa3xx_nand_host *host;
1928 info = kzalloc(sizeof(*info) +
1929 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1934 ret = pxa3xx_nand_probe(info);