2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * SPDX-License-Identifier: GPL-2.0
14 #include <linux/errno.h>
16 #include <asm/arch/cpu.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/nand.h>
19 #include <linux/types.h>
21 #include "pxa3xx_nand.h"
23 DECLARE_GLOBAL_DATA_PTR;
25 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
26 #define CHIP_DELAY_TIMEOUT 200
27 #define NAND_STOP_DELAY 40
28 #define PAGE_CHUNK_SIZE (2048)
31 * Define a buffer size for the initial command that detects the flash device:
32 * STATUS, READID and PARAM. The largest of these is the PARAM command,
35 #define INIT_BUFFER_SIZE 256
37 /* registers and bit definitions */
38 #define NDCR (0x00) /* Control register */
39 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
40 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
41 #define NDSR (0x14) /* Status Register */
42 #define NDPCR (0x18) /* Page Count Register */
43 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
44 #define NDBDR1 (0x20) /* Bad Block Register 1 */
45 #define NDECCCTRL (0x28) /* ECC control */
46 #define NDDB (0x40) /* Data Buffer */
47 #define NDCB0 (0x48) /* Command Buffer0 */
48 #define NDCB1 (0x4C) /* Command Buffer1 */
49 #define NDCB2 (0x50) /* Command Buffer2 */
51 #define NDCR_SPARE_EN (0x1 << 31)
52 #define NDCR_ECC_EN (0x1 << 30)
53 #define NDCR_DMA_EN (0x1 << 29)
54 #define NDCR_ND_RUN (0x1 << 28)
55 #define NDCR_DWIDTH_C (0x1 << 27)
56 #define NDCR_DWIDTH_M (0x1 << 26)
57 #define NDCR_PAGE_SZ (0x1 << 24)
58 #define NDCR_NCSX (0x1 << 23)
59 #define NDCR_ND_MODE (0x3 << 21)
60 #define NDCR_NAND_MODE (0x0)
61 #define NDCR_CLR_PG_CNT (0x1 << 20)
62 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
63 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
64 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
66 #define NDCR_RA_START (0x1 << 15)
67 #define NDCR_PG_PER_BLK (0x1 << 14)
68 #define NDCR_ND_ARB_EN (0x1 << 12)
69 #define NDCR_INT_MASK (0xFFF)
71 #define NDSR_MASK (0xfff)
72 #define NDSR_ERR_CNT_OFF (16)
73 #define NDSR_ERR_CNT_MASK (0x1f)
74 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
75 #define NDSR_RDY (0x1 << 12)
76 #define NDSR_FLASH_RDY (0x1 << 11)
77 #define NDSR_CS0_PAGED (0x1 << 10)
78 #define NDSR_CS1_PAGED (0x1 << 9)
79 #define NDSR_CS0_CMDD (0x1 << 8)
80 #define NDSR_CS1_CMDD (0x1 << 7)
81 #define NDSR_CS0_BBD (0x1 << 6)
82 #define NDSR_CS1_BBD (0x1 << 5)
83 #define NDSR_UNCORERR (0x1 << 4)
84 #define NDSR_CORERR (0x1 << 3)
85 #define NDSR_WRDREQ (0x1 << 2)
86 #define NDSR_RDDREQ (0x1 << 1)
87 #define NDSR_WRCMDREQ (0x1)
89 #define NDCB0_LEN_OVRD (0x1 << 28)
90 #define NDCB0_ST_ROW_EN (0x1 << 26)
91 #define NDCB0_AUTO_RS (0x1 << 25)
92 #define NDCB0_CSEL (0x1 << 24)
93 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
94 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
95 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
96 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
97 #define NDCB0_NC (0x1 << 20)
98 #define NDCB0_DBC (0x1 << 19)
99 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
100 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
101 #define NDCB0_CMD2_MASK (0xff << 8)
102 #define NDCB0_CMD1_MASK (0xff)
103 #define NDCB0_ADDR_CYC_SHIFT (16)
105 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
106 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
107 #define EXT_CMD_TYPE_READ 4 /* Read */
108 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
109 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
110 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
111 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
113 /* macros for registers read/write */
114 #define nand_writel(info, off, val) \
115 writel((val), (info)->mmio_base + (off))
117 #define nand_readl(info, off) \
118 readl((info)->mmio_base + (off))
120 /* error code and state */
143 enum pxa3xx_nand_variant {
144 PXA3XX_NAND_VARIANT_PXA,
145 PXA3XX_NAND_VARIANT_ARMADA370,
148 struct pxa3xx_nand_host {
149 struct nand_chip chip;
150 struct mtd_info *mtd;
153 /* page size of attached chip */
157 /* calculated from pxa3xx_nand_flash data */
158 unsigned int col_addr_cycles;
159 unsigned int row_addr_cycles;
160 size_t read_id_bytes;
164 struct pxa3xx_nand_info {
165 struct nand_hw_control controller;
166 struct pxa3xx_nand_platform_data *pdata;
169 void __iomem *mmio_base;
170 unsigned long mmio_phys;
171 int cmd_complete, dev_ready;
173 unsigned int buf_start;
174 unsigned int buf_count;
175 unsigned int buf_size;
176 unsigned int data_buff_pos;
177 unsigned int oob_buff_pos;
179 unsigned char *data_buff;
180 unsigned char *oob_buff;
182 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
186 * This driver supports NFCv1 (as found in PXA SoC)
187 * and NFCv2 (as found in Armada 370/XP SoC).
189 enum pxa3xx_nand_variant variant;
192 int use_ecc; /* use HW ECC ? */
193 int ecc_bch; /* using BCH ECC? */
194 int use_spare; /* use spare ? */
197 unsigned int data_size; /* data to be read from FIFO */
198 unsigned int chunk_size; /* split commands chunk size */
199 unsigned int oob_size;
200 unsigned int spare_size;
201 unsigned int ecc_size;
202 unsigned int ecc_err_cnt;
203 unsigned int max_bitflips;
206 /* cached register value */
211 /* generated NDCBx register values */
218 static struct pxa3xx_nand_timing timing[] = {
219 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
220 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
221 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
222 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
225 static struct pxa3xx_nand_flash builtin_flash_types[] = {
226 { 0x46ec, 16, 16, &timing[1] },
227 { 0xdaec, 8, 8, &timing[1] },
228 { 0xd7ec, 8, 8, &timing[1] },
229 { 0xa12c, 8, 8, &timing[2] },
230 { 0xb12c, 16, 16, &timing[2] },
231 { 0xdc2c, 8, 8, &timing[2] },
232 { 0xcc2c, 16, 16, &timing[2] },
233 { 0xba20, 16, 16, &timing[3] },
236 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
237 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
239 static struct nand_bbt_descr bbt_main_descr = {
240 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
241 | NAND_BBT_2BIT | NAND_BBT_VERSION,
245 .maxblocks = 8, /* Last 8 blocks in each chip */
246 .pattern = bbt_pattern
249 static struct nand_bbt_descr bbt_mirror_descr = {
250 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
251 | NAND_BBT_2BIT | NAND_BBT_VERSION,
255 .maxblocks = 8, /* Last 8 blocks in each chip */
256 .pattern = bbt_mirror_pattern
259 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
262 32, 33, 34, 35, 36, 37, 38, 39,
263 40, 41, 42, 43, 44, 45, 46, 47,
264 48, 49, 50, 51, 52, 53, 54, 55,
265 56, 57, 58, 59, 60, 61, 62, 63},
266 .oobfree = { {2, 30} }
269 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
272 32, 33, 34, 35, 36, 37, 38, 39,
273 40, 41, 42, 43, 44, 45, 46, 47,
274 48, 49, 50, 51, 52, 53, 54, 55,
275 56, 57, 58, 59, 60, 61, 62, 63,
276 96, 97, 98, 99, 100, 101, 102, 103,
277 104, 105, 106, 107, 108, 109, 110, 111,
278 112, 113, 114, 115, 116, 117, 118, 119,
279 120, 121, 122, 123, 124, 125, 126, 127},
280 /* Bootrom looks in bytes 0 & 5 for bad blocks */
281 .oobfree = { {6, 26}, { 64, 32} }
284 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
287 32, 33, 34, 35, 36, 37, 38, 39,
288 40, 41, 42, 43, 44, 45, 46, 47,
289 48, 49, 50, 51, 52, 53, 54, 55,
290 56, 57, 58, 59, 60, 61, 62, 63},
294 #define NDTR0_tCH(c) (min((c), 7) << 19)
295 #define NDTR0_tCS(c) (min((c), 7) << 16)
296 #define NDTR0_tWH(c) (min((c), 7) << 11)
297 #define NDTR0_tWP(c) (min((c), 7) << 8)
298 #define NDTR0_tRH(c) (min((c), 7) << 3)
299 #define NDTR0_tRP(c) (min((c), 7) << 0)
301 #define NDTR1_tR(c) (min((c), 65535) << 16)
302 #define NDTR1_tWHR(c) (min((c), 15) << 4)
303 #define NDTR1_tAR(c) (min((c), 15) << 0)
305 /* convert nano-seconds to nand flash controller clock cycles */
306 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
308 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
310 /* We only support the Armada 370/XP/38x for now */
311 return PXA3XX_NAND_VARIANT_ARMADA370;
314 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
315 const struct pxa3xx_nand_timing *t)
317 struct pxa3xx_nand_info *info = host->info_data;
318 unsigned long nand_clk = mvebu_get_nand_clock();
319 uint32_t ndtr0, ndtr1;
321 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
322 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
323 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
324 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
325 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
326 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
328 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
329 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
330 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
332 info->ndtr0cs0 = ndtr0;
333 info->ndtr1cs0 = ndtr1;
334 nand_writel(info, NDTR0CS0, ndtr0);
335 nand_writel(info, NDTR1CS0, ndtr1);
338 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
339 const struct nand_sdr_timings *t)
341 struct pxa3xx_nand_info *info = host->info_data;
342 struct nand_chip *chip = &host->chip;
343 unsigned long nand_clk = mvebu_get_nand_clock();
344 uint32_t ndtr0, ndtr1;
346 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
347 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
348 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
349 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
350 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
351 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
352 u32 tR = chip->chip_delay * 1000;
353 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
354 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
356 /* fallback to a default value if tR = 0 */
360 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
361 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
362 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
363 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
364 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
365 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
367 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
368 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
369 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
371 info->ndtr0cs0 = ndtr0;
372 info->ndtr1cs0 = ndtr1;
373 nand_writel(info, NDTR0CS0, ndtr0);
374 nand_writel(info, NDTR1CS0, ndtr1);
377 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
379 const struct nand_sdr_timings *timings;
380 struct nand_chip *chip = &host->chip;
381 struct pxa3xx_nand_info *info = host->info_data;
382 const struct pxa3xx_nand_flash *f = NULL;
383 int mode, id, ntypes, i;
385 mode = onfi_get_async_timing_mode(chip);
386 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
387 ntypes = ARRAY_SIZE(builtin_flash_types);
389 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
391 id = chip->read_byte(host->mtd);
392 id |= chip->read_byte(host->mtd) << 0x8;
394 for (i = 0; i < ntypes; i++) {
395 f = &builtin_flash_types[i];
397 if (f->chip_id == id)
402 dev_err(&info->pdev->dev, "Error: timings not found\n");
406 pxa3xx_nand_set_timing(host, f->timing);
408 if (f->flash_width == 16) {
409 info->reg_ndcr |= NDCR_DWIDTH_M;
410 chip->options |= NAND_BUSWIDTH_16;
413 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
415 mode = fls(mode) - 1;
419 timings = onfi_async_timing_mode_to_sdr_timings(mode);
421 return PTR_ERR(timings);
423 pxa3xx_nand_set_sdr_timing(host, timings);
430 * Set the data and OOB size, depending on the selected
431 * spare and ECC configuration.
432 * Only applicable to READ0, READOOB and PAGEPROG commands.
434 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
435 struct mtd_info *mtd)
437 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
439 info->data_size = mtd->writesize;
443 info->oob_size = info->spare_size;
445 info->oob_size += info->ecc_size;
449 * NOTE: it is a must to set ND_RUN first, then write
450 * command buffer, otherwise, it does not work.
451 * We enable all the interrupt at the same time, and
452 * let pxa3xx_nand_irq to handle all logic.
454 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
458 ndcr = info->reg_ndcr;
463 nand_writel(info, NDECCCTRL, 0x1);
465 ndcr &= ~NDCR_ECC_EN;
467 nand_writel(info, NDECCCTRL, 0x0);
470 ndcr &= ~NDCR_DMA_EN;
473 ndcr |= NDCR_SPARE_EN;
475 ndcr &= ~NDCR_SPARE_EN;
479 /* clear status bits and run */
480 nand_writel(info, NDCR, 0);
481 nand_writel(info, NDSR, NDSR_MASK);
482 nand_writel(info, NDCR, ndcr);
485 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
489 ndcr = nand_readl(info, NDCR);
490 nand_writel(info, NDCR, ndcr | int_mask);
493 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
499 * According to the datasheet, when reading from NDDB
500 * with BCH enabled, after each 32 bytes reads, we
501 * have to make sure that the NDSR.RDDREQ bit is set.
503 * Drain the FIFO 8 32 bits reads at a time, and skip
504 * the polling on the last read.
507 readsl(info->mmio_base + NDDB, data, 8);
510 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
511 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
512 dev_err(&info->pdev->dev,
513 "Timeout on RDDREQ while draining the FIFO\n");
523 readsl(info->mmio_base + NDDB, data, len);
526 static void handle_data_pio(struct pxa3xx_nand_info *info)
528 unsigned int do_bytes = min(info->data_size, info->chunk_size);
530 switch (info->state) {
531 case STATE_PIO_WRITING:
532 writesl(info->mmio_base + NDDB,
533 info->data_buff + info->data_buff_pos,
534 DIV_ROUND_UP(do_bytes, 4));
536 if (info->oob_size > 0)
537 writesl(info->mmio_base + NDDB,
538 info->oob_buff + info->oob_buff_pos,
539 DIV_ROUND_UP(info->oob_size, 4));
541 case STATE_PIO_READING:
543 info->data_buff + info->data_buff_pos,
544 DIV_ROUND_UP(do_bytes, 4));
546 if (info->oob_size > 0)
548 info->oob_buff + info->oob_buff_pos,
549 DIV_ROUND_UP(info->oob_size, 4));
552 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
557 /* Update buffer pointers for multi-page read/write */
558 info->data_buff_pos += do_bytes;
559 info->oob_buff_pos += info->oob_size;
560 info->data_size -= do_bytes;
563 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
565 handle_data_pio(info);
567 info->state = STATE_CMD_DONE;
568 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
571 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
573 unsigned int status, is_completed = 0, is_ready = 0;
574 unsigned int ready, cmd_done;
575 irqreturn_t ret = IRQ_HANDLED;
578 ready = NDSR_FLASH_RDY;
579 cmd_done = NDSR_CS0_CMDD;
582 cmd_done = NDSR_CS1_CMDD;
585 status = nand_readl(info, NDSR);
587 if (status & NDSR_UNCORERR)
588 info->retcode = ERR_UNCORERR;
589 if (status & NDSR_CORERR) {
590 info->retcode = ERR_CORERR;
591 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
593 info->ecc_err_cnt = NDSR_ERR_CNT(status);
595 info->ecc_err_cnt = 1;
598 * Each chunk composing a page is corrected independently,
599 * and we need to store maximum number of corrected bitflips
600 * to return it to the MTD layer in ecc.read_page().
602 info->max_bitflips = max_t(unsigned int,
606 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
607 info->state = (status & NDSR_RDDREQ) ?
608 STATE_PIO_READING : STATE_PIO_WRITING;
609 /* Call the IRQ thread in U-Boot directly */
610 pxa3xx_nand_irq_thread(info);
613 if (status & cmd_done) {
614 info->state = STATE_CMD_DONE;
617 if (status & ready) {
618 info->state = STATE_READY;
622 if (status & NDSR_WRCMDREQ) {
623 nand_writel(info, NDSR, NDSR_WRCMDREQ);
624 status &= ~NDSR_WRCMDREQ;
625 info->state = STATE_CMD_HANDLE;
628 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
629 * must be loaded by writing directly either 12 or 16
630 * bytes directly to NDCB0, four bytes at a time.
632 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
633 * but each NDCBx register can be read.
635 nand_writel(info, NDCB0, info->ndcb0);
636 nand_writel(info, NDCB0, info->ndcb1);
637 nand_writel(info, NDCB0, info->ndcb2);
639 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
640 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
641 nand_writel(info, NDCB0, info->ndcb3);
644 /* clear NDSR to let the controller exit the IRQ */
645 nand_writel(info, NDSR, status);
647 info->cmd_complete = 1;
654 static inline int is_buf_blank(uint8_t *buf, size_t len)
656 for (; len > 0; len--)
662 static void set_command_address(struct pxa3xx_nand_info *info,
663 unsigned int page_size, uint16_t column, int page_addr)
665 /* small page addr setting */
666 if (page_size < PAGE_CHUNK_SIZE) {
667 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
672 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
675 if (page_addr & 0xFF0000)
676 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
682 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
684 struct pxa3xx_nand_host *host = info->host[info->cs];
685 struct mtd_info *mtd = host->mtd;
687 /* reset data and oob column point to handle data */
691 info->data_buff_pos = 0;
692 info->oob_buff_pos = 0;
695 info->retcode = ERR_NONE;
696 info->ecc_err_cnt = 0;
702 case NAND_CMD_PAGEPROG:
704 case NAND_CMD_READOOB:
705 pxa3xx_set_datasize(info, mtd);
717 * If we are about to issue a read command, or about to set
718 * the write address, then clean the data buffer.
720 if (command == NAND_CMD_READ0 ||
721 command == NAND_CMD_READOOB ||
722 command == NAND_CMD_SEQIN) {
723 info->buf_count = mtd->writesize + mtd->oobsize;
724 memset(info->data_buff, 0xFF, info->buf_count);
728 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
729 int ext_cmd_type, uint16_t column, int page_addr)
731 int addr_cycle, exec_cmd;
732 struct pxa3xx_nand_host *host;
733 struct mtd_info *mtd;
735 host = info->host[info->cs];
741 info->ndcb0 = NDCB0_CSEL;
745 if (command == NAND_CMD_SEQIN)
748 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
749 + host->col_addr_cycles);
752 case NAND_CMD_READOOB:
754 info->buf_start = column;
755 info->ndcb0 |= NDCB0_CMD_TYPE(0)
759 if (command == NAND_CMD_READOOB)
760 info->buf_start += mtd->writesize;
763 * Multiple page read needs an 'extended command type' field,
764 * which is either naked-read or last-read according to the
767 if (mtd->writesize == PAGE_CHUNK_SIZE) {
768 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
769 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
770 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
772 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
773 info->ndcb3 = info->chunk_size +
777 set_command_address(info, mtd->writesize, column, page_addr);
782 info->buf_start = column;
783 set_command_address(info, mtd->writesize, 0, page_addr);
786 * Multiple page programming needs to execute the initial
787 * SEQIN command that sets the page address.
789 if (mtd->writesize > PAGE_CHUNK_SIZE) {
790 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
791 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
794 /* No data transfer in this case */
800 case NAND_CMD_PAGEPROG:
801 if (is_buf_blank(info->data_buff,
802 (mtd->writesize + mtd->oobsize))) {
807 /* Second command setting for large pages */
808 if (mtd->writesize > PAGE_CHUNK_SIZE) {
810 * Multiple page write uses the 'extended command'
811 * field. This can be used to issue a command dispatch
812 * or a naked-write depending on the current stage.
814 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
816 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
817 info->ndcb3 = info->chunk_size +
821 * This is the command dispatch that completes a chunked
822 * page program operation.
824 if (info->data_size == 0) {
825 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
826 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
833 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
837 | (NAND_CMD_PAGEPROG << 8)
844 info->buf_count = 256;
845 info->ndcb0 |= NDCB0_CMD_TYPE(0)
849 info->ndcb1 = (column & 0xFF);
851 info->data_size = 256;
854 case NAND_CMD_READID:
855 info->buf_count = host->read_id_bytes;
856 info->ndcb0 |= NDCB0_CMD_TYPE(3)
859 info->ndcb1 = (column & 0xFF);
863 case NAND_CMD_STATUS:
865 info->ndcb0 |= NDCB0_CMD_TYPE(4)
872 case NAND_CMD_ERASE1:
873 info->ndcb0 |= NDCB0_CMD_TYPE(2)
877 | (NAND_CMD_ERASE2 << 8)
879 info->ndcb1 = page_addr;
884 info->ndcb0 |= NDCB0_CMD_TYPE(5)
889 case NAND_CMD_ERASE2:
895 dev_err(&info->pdev->dev, "non-supported command %x\n",
903 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
904 int column, int page_addr)
906 struct nand_chip *chip = mtd_to_nand(mtd);
907 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
908 struct pxa3xx_nand_info *info = host->info_data;
912 * if this is a x16 device ,then convert the input
913 * "byte" address into a "word" address appropriate
914 * for indexing a word-oriented device
916 if (info->reg_ndcr & NDCR_DWIDTH_M)
920 * There may be different NAND chip hooked to
921 * different chip select, so check whether
922 * chip select has been changed, if yes, reset the timing
924 if (info->cs != host->cs) {
926 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
927 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
930 prepare_start_command(info, command);
932 info->state = STATE_PREPARED;
933 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
938 info->cmd_complete = 0;
941 pxa3xx_nand_start(info);
947 status = nand_readl(info, NDSR);
949 pxa3xx_nand_irq(info);
951 if (info->cmd_complete)
954 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
955 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
960 info->state = STATE_IDLE;
963 static void nand_cmdfunc_extended(struct mtd_info *mtd,
964 const unsigned command,
965 int column, int page_addr)
967 struct nand_chip *chip = mtd_to_nand(mtd);
968 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
969 struct pxa3xx_nand_info *info = host->info_data;
970 int exec_cmd, ext_cmd_type;
973 * if this is a x16 device then convert the input
974 * "byte" address into a "word" address appropriate
975 * for indexing a word-oriented device
977 if (info->reg_ndcr & NDCR_DWIDTH_M)
981 * There may be different NAND chip hooked to
982 * different chip select, so check whether
983 * chip select has been changed, if yes, reset the timing
985 if (info->cs != host->cs) {
987 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
988 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
991 /* Select the extended command for the first command */
994 case NAND_CMD_READOOB:
995 ext_cmd_type = EXT_CMD_TYPE_MONO;
998 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1000 case NAND_CMD_PAGEPROG:
1001 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1008 prepare_start_command(info, command);
1011 * Prepare the "is ready" completion before starting a command
1012 * transaction sequence. If the command is not executed the
1013 * completion will be completed, see below.
1015 * We can do that inside the loop because the command variable
1016 * is invariant and thus so is the exec_cmd.
1018 info->need_wait = 1;
1019 info->dev_ready = 0;
1024 info->state = STATE_PREPARED;
1025 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1028 info->need_wait = 0;
1029 info->dev_ready = 1;
1033 info->cmd_complete = 0;
1034 pxa3xx_nand_start(info);
1040 status = nand_readl(info, NDSR);
1042 pxa3xx_nand_irq(info);
1044 if (info->cmd_complete)
1047 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1048 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1053 /* Check if the sequence is complete */
1054 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1058 * After a splitted program command sequence has issued
1059 * the command dispatch, the command sequence is complete.
1061 if (info->data_size == 0 &&
1062 command == NAND_CMD_PAGEPROG &&
1063 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1066 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1067 /* Last read: issue a 'last naked read' */
1068 if (info->data_size == info->chunk_size)
1069 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1071 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1074 * If a splitted program command has no more data to transfer,
1075 * the command dispatch must be issued to complete.
1077 } else if (command == NAND_CMD_PAGEPROG &&
1078 info->data_size == 0) {
1079 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1083 info->state = STATE_IDLE;
1086 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1087 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1090 chip->write_buf(mtd, buf, mtd->writesize);
1091 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1096 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1097 struct nand_chip *chip, uint8_t *buf, int oob_required,
1100 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1101 struct pxa3xx_nand_info *info = host->info_data;
1103 chip->read_buf(mtd, buf, mtd->writesize);
1104 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1106 if (info->retcode == ERR_CORERR && info->use_ecc) {
1107 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1109 } else if (info->retcode == ERR_UNCORERR) {
1111 * for blank page (all 0xff), HW will calculate its ECC as
1112 * 0, which is different from the ECC information within
1113 * OOB, ignore such uncorrectable errors
1115 if (is_buf_blank(buf, mtd->writesize))
1116 info->retcode = ERR_NONE;
1118 mtd->ecc_stats.failed++;
1121 return info->max_bitflips;
1124 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1126 struct nand_chip *chip = mtd_to_nand(mtd);
1127 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1128 struct pxa3xx_nand_info *info = host->info_data;
1131 if (info->buf_start < info->buf_count)
1132 /* Has just send a new command? */
1133 retval = info->data_buff[info->buf_start++];
1138 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1140 struct nand_chip *chip = mtd_to_nand(mtd);
1141 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1142 struct pxa3xx_nand_info *info = host->info_data;
1143 u16 retval = 0xFFFF;
1145 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1146 retval = *((u16 *)(info->data_buff+info->buf_start));
1147 info->buf_start += 2;
1152 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1154 struct nand_chip *chip = mtd_to_nand(mtd);
1155 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1156 struct pxa3xx_nand_info *info = host->info_data;
1157 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1159 memcpy(buf, info->data_buff + info->buf_start, real_len);
1160 info->buf_start += real_len;
1163 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1164 const uint8_t *buf, int len)
1166 struct nand_chip *chip = mtd_to_nand(mtd);
1167 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1168 struct pxa3xx_nand_info *info = host->info_data;
1169 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1171 memcpy(info->data_buff + info->buf_start, buf, real_len);
1172 info->buf_start += real_len;
1175 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1180 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1182 struct nand_chip *chip = mtd_to_nand(mtd);
1183 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1184 struct pxa3xx_nand_info *info = host->info_data;
1186 if (info->need_wait) {
1189 info->need_wait = 0;
1195 status = nand_readl(info, NDSR);
1197 pxa3xx_nand_irq(info);
1199 if (info->dev_ready)
1202 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1203 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1204 return NAND_STATUS_FAIL;
1209 /* pxa3xx_nand_send_command has waited for command complete */
1210 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1211 if (info->retcode == ERR_NONE)
1214 return NAND_STATUS_FAIL;
1217 return NAND_STATUS_READY;
1220 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1222 struct pxa3xx_nand_host *host = info->host[info->cs];
1223 struct mtd_info *mtd = host->mtd;
1224 struct nand_chip *chip = mtd_to_nand(mtd);
1226 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1227 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1228 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1233 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1236 * We set 0 by hard coding here, for we don't support keep_config
1237 * when there is more than one chip attached to the controller
1239 struct pxa3xx_nand_host *host = info->host[0];
1240 uint32_t ndcr = nand_readl(info, NDCR);
1242 if (ndcr & NDCR_PAGE_SZ) {
1243 /* Controller's FIFO size */
1244 info->chunk_size = 2048;
1245 host->read_id_bytes = 4;
1247 info->chunk_size = 512;
1248 host->read_id_bytes = 2;
1251 /* Set an initial chunk size */
1252 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1253 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1254 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1258 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1260 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1261 if (info->data_buff == NULL)
1266 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1268 struct pxa3xx_nand_info *info = host->info_data;
1269 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1270 struct mtd_info *mtd;
1271 struct nand_chip *chip;
1272 const struct nand_sdr_timings *timings;
1275 mtd = info->host[info->cs]->mtd;
1276 chip = mtd_to_nand(mtd);
1278 /* configure default flash values */
1279 info->reg_ndcr = 0x0; /* enable all interrupts */
1280 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1281 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1282 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1284 /* use the common timing to make a try */
1285 timings = onfi_async_timing_mode_to_sdr_timings(0);
1286 if (IS_ERR(timings))
1287 return PTR_ERR(timings);
1289 pxa3xx_nand_set_sdr_timing(host, timings);
1291 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1292 ret = chip->waitfunc(mtd, chip);
1293 if (ret & NAND_STATUS_FAIL)
1299 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1300 struct nand_ecc_ctrl *ecc,
1301 int strength, int ecc_stepsize, int page_size)
1303 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1304 info->chunk_size = 2048;
1305 info->spare_size = 40;
1306 info->ecc_size = 24;
1307 ecc->mode = NAND_ECC_HW;
1311 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1312 info->chunk_size = 512;
1313 info->spare_size = 8;
1315 ecc->mode = NAND_ECC_HW;
1320 * Required ECC: 4-bit correction per 512 bytes
1321 * Select: 16-bit correction per 2048 bytes
1323 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1325 info->chunk_size = 2048;
1326 info->spare_size = 32;
1327 info->ecc_size = 32;
1328 ecc->mode = NAND_ECC_HW;
1329 ecc->size = info->chunk_size;
1330 ecc->layout = &ecc_layout_2KB_bch4bit;
1333 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1335 info->chunk_size = 2048;
1336 info->spare_size = 32;
1337 info->ecc_size = 32;
1338 ecc->mode = NAND_ECC_HW;
1339 ecc->size = info->chunk_size;
1340 ecc->layout = &ecc_layout_4KB_bch4bit;
1344 * Required ECC: 8-bit correction per 512 bytes
1345 * Select: 16-bit correction per 1024 bytes
1347 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1349 info->chunk_size = 1024;
1350 info->spare_size = 0;
1351 info->ecc_size = 32;
1352 ecc->mode = NAND_ECC_HW;
1353 ecc->size = info->chunk_size;
1354 ecc->layout = &ecc_layout_4KB_bch8bit;
1357 dev_err(&info->pdev->dev,
1358 "ECC strength %d at page size %d is not supported\n",
1359 strength, page_size);
1366 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1368 struct nand_chip *chip = mtd_to_nand(mtd);
1369 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1370 struct pxa3xx_nand_info *info = host->info_data;
1371 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1373 uint16_t ecc_strength, ecc_step;
1375 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1378 /* Set a default chunk size */
1379 info->chunk_size = 512;
1381 ret = pxa3xx_nand_sensing(host);
1383 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1390 /* Device detection must be done with ECC disabled */
1391 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1392 nand_writel(info, NDECCCTRL, 0x0);
1394 if (nand_scan_ident(mtd, 1, NULL))
1397 if (!pdata->keep_config) {
1398 ret = pxa3xx_nand_init_timings(host);
1400 dev_err(&info->pdev->dev,
1401 "Failed to set timings: %d\n", ret);
1406 ret = pxa3xx_nand_config_flash(info);
1410 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1412 * We'll use a bad block table stored in-flash and don't
1413 * allow writing the bad block marker to the flash.
1415 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1416 chip->bbt_td = &bbt_main_descr;
1417 chip->bbt_md = &bbt_mirror_descr;
1421 * If the page size is bigger than the FIFO size, let's check
1422 * we are given the right variant and then switch to the extended
1423 * (aka splitted) command handling,
1425 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1426 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1427 chip->cmdfunc = nand_cmdfunc_extended;
1429 dev_err(&info->pdev->dev,
1430 "unsupported page size on this variant\n");
1435 if (pdata->ecc_strength && pdata->ecc_step_size) {
1436 ecc_strength = pdata->ecc_strength;
1437 ecc_step = pdata->ecc_step_size;
1439 ecc_strength = chip->ecc_strength_ds;
1440 ecc_step = chip->ecc_step_ds;
1443 /* Set default ECC strength requirements on non-ONFI devices */
1444 if (ecc_strength < 1 && ecc_step < 1) {
1449 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1450 ecc_step, mtd->writesize);
1454 /* calculate addressing information */
1455 if (mtd->writesize >= 2048)
1456 host->col_addr_cycles = 2;
1458 host->col_addr_cycles = 1;
1460 /* release the initial buffer */
1461 kfree(info->data_buff);
1463 /* allocate the real data + oob buffer */
1464 info->buf_size = mtd->writesize + mtd->oobsize;
1465 ret = pxa3xx_nand_init_buff(info);
1468 info->oob_buff = info->data_buff + mtd->writesize;
1470 if ((mtd->size >> chip->page_shift) > 65536)
1471 host->row_addr_cycles = 3;
1473 host->row_addr_cycles = 2;
1474 return nand_scan_tail(mtd);
1477 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1479 struct pxa3xx_nand_platform_data *pdata;
1480 struct pxa3xx_nand_host *host;
1481 struct nand_chip *chip = NULL;
1482 struct mtd_info *mtd;
1485 pdata = info->pdata;
1486 if (pdata->num_cs <= 0)
1489 info->variant = pxa3xx_nand_get_variant();
1490 for (cs = 0; cs < pdata->num_cs; cs++) {
1491 chip = (struct nand_chip *)
1492 ((u8 *)&info[1] + sizeof(*host) * cs);
1493 mtd = nand_to_mtd(chip);
1494 host = (struct pxa3xx_nand_host *)chip;
1495 info->host[cs] = host;
1498 host->info_data = info;
1499 host->read_id_bytes = 4;
1500 mtd->owner = THIS_MODULE;
1502 nand_set_controller_data(chip, host);
1503 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1504 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1505 chip->controller = &info->controller;
1506 chip->waitfunc = pxa3xx_nand_waitfunc;
1507 chip->select_chip = pxa3xx_nand_select_chip;
1508 chip->read_word = pxa3xx_nand_read_word;
1509 chip->read_byte = pxa3xx_nand_read_byte;
1510 chip->read_buf = pxa3xx_nand_read_buf;
1511 chip->write_buf = pxa3xx_nand_write_buf;
1512 chip->options |= NAND_NO_SUBPAGE_WRITE;
1513 chip->cmdfunc = nand_cmdfunc;
1516 /* Allocate a buffer to allow flash detection */
1517 info->buf_size = INIT_BUFFER_SIZE;
1518 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1519 if (info->data_buff == NULL) {
1521 goto fail_disable_clk;
1524 /* initialize all interrupts to be disabled */
1525 disable_int(info, NDSR_MASK);
1529 kfree(info->data_buff);
1534 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1536 struct pxa3xx_nand_platform_data *pdata;
1537 const void *blob = gd->fdt_blob;
1540 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1544 /* Get address decoding nodes from the FDT blob */
1546 node = fdt_node_offset_by_compatible(blob, node,
1547 "marvell,mvebu-pxa3xx-nand");
1551 /* Bypass disabeld nodes */
1552 if (!fdtdec_get_is_enabled(blob, node))
1555 /* Get the first enabled NAND controler base address */
1557 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1558 blob, node, "reg", 0, NULL, true);
1560 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1561 if (pdata->num_cs != 1) {
1562 pr_err("pxa3xx driver supports single CS only\n");
1566 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1567 pdata->enable_arbiter = 1;
1569 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1570 pdata->keep_config = 1;
1574 * If these are not set, they will be selected according
1575 * to the detected flash type.
1578 pdata->ecc_strength = fdtdec_get_int(blob, node,
1579 "nand-ecc-strength", 0);
1582 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1583 "nand-ecc-step-size", 0);
1585 info->pdata = pdata;
1587 /* Currently support only a single NAND controller */
1590 } while (node >= 0);
1595 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1597 struct pxa3xx_nand_platform_data *pdata;
1598 int ret, cs, probe_success;
1600 ret = pxa3xx_nand_probe_dt(info);
1604 pdata = info->pdata;
1606 ret = alloc_nand_resource(info);
1608 dev_err(&pdev->dev, "alloc nand resource failed\n");
1613 for (cs = 0; cs < pdata->num_cs; cs++) {
1614 struct mtd_info *mtd = info->host[cs]->mtd;
1617 * The mtd name matches the one used in 'mtdparts' kernel
1618 * parameter. This name cannot be changed or otherwise
1619 * user's mtd partitions configuration would get broken.
1621 mtd->name = "pxa3xx_nand-0";
1623 ret = pxa3xx_nand_scan(mtd);
1625 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1630 if (nand_register(cs, mtd))
1643 * Main initialization routine
1645 void board_nand_init(void)
1647 struct pxa3xx_nand_info *info;
1648 struct pxa3xx_nand_host *host;
1651 info = kzalloc(sizeof(*info) +
1652 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1657 ret = pxa3xx_nand_probe(info);