1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/raw/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/err.h>
16 #include <linux/errno.h>
18 #include <asm/arch/cpu.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/rawnand.h>
21 #include <linux/types.h>
23 #include "pxa3xx_nand.h"
25 DECLARE_GLOBAL_DATA_PTR;
27 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
28 #define CHIP_DELAY_TIMEOUT 200
29 #define NAND_STOP_DELAY 40
32 * Define a buffer size for the initial command that detects the flash device:
33 * STATUS, READID and PARAM.
34 * ONFI param page is 256 bytes, and there are three redundant copies
35 * to be read. JEDEC param page is 512 bytes, and there are also three
36 * redundant copies to be read.
37 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
39 #define INIT_BUFFER_SIZE 2048
41 /* registers and bit definitions */
42 #define NDCR (0x00) /* Control register */
43 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
44 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
45 #define NDSR (0x14) /* Status Register */
46 #define NDPCR (0x18) /* Page Count Register */
47 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
48 #define NDBDR1 (0x20) /* Bad Block Register 1 */
49 #define NDECCCTRL (0x28) /* ECC control */
50 #define NDDB (0x40) /* Data Buffer */
51 #define NDCB0 (0x48) /* Command Buffer0 */
52 #define NDCB1 (0x4C) /* Command Buffer1 */
53 #define NDCB2 (0x50) /* Command Buffer2 */
55 #define NDCR_SPARE_EN (0x1 << 31)
56 #define NDCR_ECC_EN (0x1 << 30)
57 #define NDCR_DMA_EN (0x1 << 29)
58 #define NDCR_ND_RUN (0x1 << 28)
59 #define NDCR_DWIDTH_C (0x1 << 27)
60 #define NDCR_DWIDTH_M (0x1 << 26)
61 #define NDCR_PAGE_SZ (0x1 << 24)
62 #define NDCR_NCSX (0x1 << 23)
63 #define NDCR_ND_MODE (0x3 << 21)
64 #define NDCR_NAND_MODE (0x0)
65 #define NDCR_CLR_PG_CNT (0x1 << 20)
66 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
67 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
68 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
70 #define NDCR_RA_START (0x1 << 15)
71 #define NDCR_PG_PER_BLK (0x1 << 14)
72 #define NDCR_ND_ARB_EN (0x1 << 12)
73 #define NDCR_INT_MASK (0xFFF)
75 #define NDSR_MASK (0xfff)
76 #define NDSR_ERR_CNT_OFF (16)
77 #define NDSR_ERR_CNT_MASK (0x1f)
78 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
79 #define NDSR_RDY (0x1 << 12)
80 #define NDSR_FLASH_RDY (0x1 << 11)
81 #define NDSR_CS0_PAGED (0x1 << 10)
82 #define NDSR_CS1_PAGED (0x1 << 9)
83 #define NDSR_CS0_CMDD (0x1 << 8)
84 #define NDSR_CS1_CMDD (0x1 << 7)
85 #define NDSR_CS0_BBD (0x1 << 6)
86 #define NDSR_CS1_BBD (0x1 << 5)
87 #define NDSR_UNCORERR (0x1 << 4)
88 #define NDSR_CORERR (0x1 << 3)
89 #define NDSR_WRDREQ (0x1 << 2)
90 #define NDSR_RDDREQ (0x1 << 1)
91 #define NDSR_WRCMDREQ (0x1)
93 #define NDCB0_LEN_OVRD (0x1 << 28)
94 #define NDCB0_ST_ROW_EN (0x1 << 26)
95 #define NDCB0_AUTO_RS (0x1 << 25)
96 #define NDCB0_CSEL (0x1 << 24)
97 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
98 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
99 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
100 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
101 #define NDCB0_NC (0x1 << 20)
102 #define NDCB0_DBC (0x1 << 19)
103 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
104 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
105 #define NDCB0_CMD2_MASK (0xff << 8)
106 #define NDCB0_CMD1_MASK (0xff)
107 #define NDCB0_ADDR_CYC_SHIFT (16)
109 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
110 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
111 #define EXT_CMD_TYPE_READ 4 /* Read */
112 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
113 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
114 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
115 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
118 * This should be large enough to read 'ONFI' and 'JEDEC'.
119 * Let's use 7 bytes, which is the maximum ID count supported
120 * by the controller (see NDCR_RD_ID_CNT_MASK).
122 #define READ_ID_BYTES 7
124 /* macros for registers read/write */
125 #define nand_writel(info, off, val) \
126 writel((val), (info)->mmio_base + (off))
128 #define nand_readl(info, off) \
129 readl((info)->mmio_base + (off))
131 /* error code and state */
154 enum pxa3xx_nand_variant {
155 PXA3XX_NAND_VARIANT_PXA,
156 PXA3XX_NAND_VARIANT_ARMADA370,
159 struct pxa3xx_nand_host {
160 struct nand_chip chip;
163 /* page size of attached chip */
167 /* calculated from pxa3xx_nand_flash data */
168 unsigned int col_addr_cycles;
169 unsigned int row_addr_cycles;
172 struct pxa3xx_nand_info {
173 struct nand_hw_control controller;
174 struct pxa3xx_nand_platform_data *pdata;
177 void __iomem *mmio_base;
178 unsigned long mmio_phys;
179 int cmd_complete, dev_ready;
181 unsigned int buf_start;
182 unsigned int buf_count;
183 unsigned int buf_size;
184 unsigned int data_buff_pos;
185 unsigned int oob_buff_pos;
187 unsigned char *data_buff;
188 unsigned char *oob_buff;
190 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
194 * This driver supports NFCv1 (as found in PXA SoC)
195 * and NFCv2 (as found in Armada 370/XP SoC).
197 enum pxa3xx_nand_variant variant;
200 int use_ecc; /* use HW ECC ? */
201 int force_raw; /* prevent use_ecc to be set */
202 int ecc_bch; /* using BCH ECC? */
203 int use_spare; /* use spare ? */
206 /* Amount of real data per full chunk */
207 unsigned int chunk_size;
209 /* Amount of spare data per full chunk */
210 unsigned int spare_size;
212 /* Number of full chunks (i.e chunk_size + spare_size) */
213 unsigned int nfullchunks;
216 * Total number of chunks. If equal to nfullchunks, then there
217 * are only full chunks. Otherwise, there is one last chunk of
218 * size (last_chunk_size + last_spare_size)
220 unsigned int ntotalchunks;
222 /* Amount of real data in the last chunk */
223 unsigned int last_chunk_size;
225 /* Amount of spare data in the last chunk */
226 unsigned int last_spare_size;
228 unsigned int ecc_size;
229 unsigned int ecc_err_cnt;
230 unsigned int max_bitflips;
234 * Variables only valid during command
235 * execution. step_chunk_size and step_spare_size is the
236 * amount of real data and spare data in the current
237 * chunk. cur_chunk is the current chunk being
240 unsigned int step_chunk_size;
241 unsigned int step_spare_size;
242 unsigned int cur_chunk;
244 /* cached register value */
249 /* generated NDCBx register values */
256 static struct pxa3xx_nand_timing timing[] = {
258 * tCH Enable signal hold time
259 * tCS Enable signal setup time
260 * tWH ND_nWE high duration
261 * tWP ND_nWE pulse time
262 * tRH ND_nRE high duration
263 * tRP ND_nRE pulse width
264 * tR ND_nWE high to ND_nRE low for read
265 * tWHR ND_nWE high to ND_nRE low for status read
266 * tAR ND_ALE low to ND_nRE low delay
268 /*ch cs wh wp rh rp r whr ar */
269 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
270 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
271 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
272 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
273 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
276 static struct pxa3xx_nand_flash builtin_flash_types[] = {
279 * flash_width Width of Flash memory (DWIDTH_M)
280 * dfc_width Width of flash controller(DWIDTH_C)
282 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
284 { 0x46ec, 16, 16, &timing[1] },
285 { 0xdaec, 8, 8, &timing[1] },
286 { 0xd7ec, 8, 8, &timing[1] },
287 { 0xa12c, 8, 8, &timing[2] },
288 { 0xb12c, 16, 16, &timing[2] },
289 { 0xdc2c, 8, 8, &timing[2] },
290 { 0xcc2c, 16, 16, &timing[2] },
291 { 0xba20, 16, 16, &timing[3] },
292 { 0xda98, 8, 8, &timing[4] },
295 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
296 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
297 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
299 static struct nand_bbt_descr bbt_main_descr = {
300 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
301 | NAND_BBT_2BIT | NAND_BBT_VERSION,
305 .maxblocks = 8, /* Last 8 blocks in each chip */
306 .pattern = bbt_pattern
309 static struct nand_bbt_descr bbt_mirror_descr = {
310 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
311 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315 .maxblocks = 8, /* Last 8 blocks in each chip */
316 .pattern = bbt_mirror_pattern
320 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
323 32, 33, 34, 35, 36, 37, 38, 39,
324 40, 41, 42, 43, 44, 45, 46, 47,
325 48, 49, 50, 51, 52, 53, 54, 55,
326 56, 57, 58, 59, 60, 61, 62, 63},
327 .oobfree = { {2, 30} }
330 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
333 32, 33, 34, 35, 36, 37, 38, 39,
334 40, 41, 42, 43, 44, 45, 46, 47,
335 48, 49, 50, 51, 52, 53, 54, 55,
336 56, 57, 58, 59, 60, 61, 62, 63,
337 64, 65, 66, 67, 68, 69, 70, 71,
338 72, 73, 74, 75, 76, 77, 78, 79,
339 80, 81, 82, 83, 84, 85, 86, 87,
340 88, 89, 90, 91, 92, 93, 94, 95},
341 .oobfree = { {1, 4}, {6, 26} }
344 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
347 32, 33, 34, 35, 36, 37, 38, 39,
348 40, 41, 42, 43, 44, 45, 46, 47,
349 48, 49, 50, 51, 52, 53, 54, 55,
350 56, 57, 58, 59, 60, 61, 62, 63,
351 96, 97, 98, 99, 100, 101, 102, 103,
352 104, 105, 106, 107, 108, 109, 110, 111,
353 112, 113, 114, 115, 116, 117, 118, 119,
354 120, 121, 122, 123, 124, 125, 126, 127},
355 /* Bootrom looks in bytes 0 & 5 for bad blocks */
356 .oobfree = { {6, 26}, { 64, 32} }
359 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
362 32, 33, 34, 35, 36, 37, 38, 39,
363 40, 41, 42, 43, 44, 45, 46, 47,
364 48, 49, 50, 51, 52, 53, 54, 55,
365 56, 57, 58, 59, 60, 61, 62, 63,
367 96, 97, 98, 99, 100, 101, 102, 103,
368 104, 105, 106, 107, 108, 109, 110, 111,
369 112, 113, 114, 115, 116, 117, 118, 119,
370 120, 121, 122, 123, 124, 125, 126, 127,
372 160, 161, 162, 163, 164, 165, 166, 167,
373 168, 169, 170, 171, 172, 173, 174, 175,
374 176, 177, 178, 179, 180, 181, 182, 183,
375 184, 185, 186, 187, 188, 189, 190, 191,
377 224, 225, 226, 227, 228, 229, 230, 231,
378 232, 233, 234, 235, 236, 237, 238, 239,
379 240, 241, 242, 243, 244, 245, 246, 247,
380 248, 249, 250, 251, 252, 253, 254, 255},
382 /* Bootrom looks in bytes 0 & 5 for bad blocks */
383 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
386 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
389 32, 33, 34, 35, 36, 37, 38, 39,
390 40, 41, 42, 43, 44, 45, 46, 47,
391 48, 49, 50, 51, 52, 53, 54, 55,
392 56, 57, 58, 59, 60, 61, 62, 63},
396 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
399 /* HW ECC handles all ECC data and all spare area is free for OOB */
400 .oobfree = {{0, 160} }
403 #define NDTR0_tCH(c) (min((c), 7) << 19)
404 #define NDTR0_tCS(c) (min((c), 7) << 16)
405 #define NDTR0_tWH(c) (min((c), 7) << 11)
406 #define NDTR0_tWP(c) (min((c), 7) << 8)
407 #define NDTR0_tRH(c) (min((c), 7) << 3)
408 #define NDTR0_tRP(c) (min((c), 7) << 0)
410 #define NDTR1_tR(c) (min((c), 65535) << 16)
411 #define NDTR1_tWHR(c) (min((c), 15) << 4)
412 #define NDTR1_tAR(c) (min((c), 15) << 0)
414 /* convert nano-seconds to nand flash controller clock cycles */
415 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
417 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
419 /* We only support the Armada 370/XP/38x for now */
420 return PXA3XX_NAND_VARIANT_ARMADA370;
423 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
424 const struct pxa3xx_nand_timing *t)
426 struct pxa3xx_nand_info *info = host->info_data;
427 unsigned long nand_clk = mvebu_get_nand_clock();
428 uint32_t ndtr0, ndtr1;
430 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
431 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
432 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
433 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
434 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
435 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
437 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
438 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
439 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
441 info->ndtr0cs0 = ndtr0;
442 info->ndtr1cs0 = ndtr1;
443 nand_writel(info, NDTR0CS0, ndtr0);
444 nand_writel(info, NDTR1CS0, ndtr1);
447 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
448 const struct nand_sdr_timings *t)
450 struct pxa3xx_nand_info *info = host->info_data;
451 struct nand_chip *chip = &host->chip;
452 unsigned long nand_clk = mvebu_get_nand_clock();
453 uint32_t ndtr0, ndtr1;
455 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
456 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
457 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
458 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
459 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
460 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
461 u32 tR = chip->chip_delay * 1000;
462 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
463 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
465 /* fallback to a default value if tR = 0 */
469 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
470 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
471 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
472 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
473 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
474 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
476 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
477 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
478 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
480 info->ndtr0cs0 = ndtr0;
481 info->ndtr1cs0 = ndtr1;
482 nand_writel(info, NDTR0CS0, ndtr0);
483 nand_writel(info, NDTR1CS0, ndtr1);
486 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
488 const struct nand_sdr_timings *timings;
489 struct nand_chip *chip = &host->chip;
490 struct pxa3xx_nand_info *info = host->info_data;
491 const struct pxa3xx_nand_flash *f = NULL;
492 struct mtd_info *mtd = nand_to_mtd(&host->chip);
493 int mode, id, ntypes, i;
495 mode = onfi_get_async_timing_mode(chip);
496 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
497 ntypes = ARRAY_SIZE(builtin_flash_types);
499 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
501 id = chip->read_byte(mtd);
502 id |= chip->read_byte(mtd) << 0x8;
504 for (i = 0; i < ntypes; i++) {
505 f = &builtin_flash_types[i];
507 if (f->chip_id == id)
512 dev_err(&info->pdev->dev, "Error: timings not found\n");
516 pxa3xx_nand_set_timing(host, f->timing);
518 if (f->flash_width == 16) {
519 info->reg_ndcr |= NDCR_DWIDTH_M;
520 chip->options |= NAND_BUSWIDTH_16;
523 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
525 mode = fls(mode) - 1;
529 timings = onfi_async_timing_mode_to_sdr_timings(mode);
531 return PTR_ERR(timings);
533 pxa3xx_nand_set_sdr_timing(host, timings);
540 * NOTE: it is a must to set ND_RUN first, then write
541 * command buffer, otherwise, it does not work.
542 * We enable all the interrupt at the same time, and
543 * let pxa3xx_nand_irq to handle all logic.
545 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
549 ndcr = info->reg_ndcr;
554 nand_writel(info, NDECCCTRL, 0x1);
556 ndcr &= ~NDCR_ECC_EN;
558 nand_writel(info, NDECCCTRL, 0x0);
561 ndcr &= ~NDCR_DMA_EN;
564 ndcr |= NDCR_SPARE_EN;
566 ndcr &= ~NDCR_SPARE_EN;
570 /* clear status bits and run */
571 nand_writel(info, NDSR, NDSR_MASK);
572 nand_writel(info, NDCR, 0);
573 nand_writel(info, NDCR, ndcr);
576 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
580 ndcr = nand_readl(info, NDCR);
581 nand_writel(info, NDCR, ndcr | int_mask);
584 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
586 if (info->ecc_bch && !info->force_raw) {
590 * According to the datasheet, when reading from NDDB
591 * with BCH enabled, after each 32 bytes reads, we
592 * have to make sure that the NDSR.RDDREQ bit is set.
594 * Drain the FIFO 8 32 bits reads at a time, and skip
595 * the polling on the last read.
598 readsl(info->mmio_base + NDDB, data, 8);
601 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
602 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
603 dev_err(&info->pdev->dev,
604 "Timeout on RDDREQ while draining the FIFO\n");
614 readsl(info->mmio_base + NDDB, data, len);
617 static void handle_data_pio(struct pxa3xx_nand_info *info)
619 int data_len = info->step_chunk_size;
622 * In raw mode, include the spare area and the ECC bytes that are not
623 * consumed by the controller in the data section. Do not reorganize
624 * here, do it in the ->read_page_raw() handler instead.
627 data_len += info->step_spare_size + info->ecc_size;
629 switch (info->state) {
630 case STATE_PIO_WRITING:
631 if (info->step_chunk_size)
632 writesl(info->mmio_base + NDDB,
633 info->data_buff + info->data_buff_pos,
634 DIV_ROUND_UP(data_len, 4));
636 if (info->step_spare_size)
637 writesl(info->mmio_base + NDDB,
638 info->oob_buff + info->oob_buff_pos,
639 DIV_ROUND_UP(info->step_spare_size, 4));
641 case STATE_PIO_READING:
642 if (info->step_chunk_size)
644 info->data_buff + info->data_buff_pos,
645 DIV_ROUND_UP(data_len, 4));
650 if (info->step_spare_size)
652 info->oob_buff + info->oob_buff_pos,
653 DIV_ROUND_UP(info->step_spare_size, 4));
656 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
661 /* Update buffer pointers for multi-page read/write */
662 info->data_buff_pos += data_len;
663 info->oob_buff_pos += info->step_spare_size;
666 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
668 handle_data_pio(info);
670 info->state = STATE_CMD_DONE;
671 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
674 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
676 unsigned int status, is_completed = 0, is_ready = 0;
677 unsigned int ready, cmd_done;
678 irqreturn_t ret = IRQ_HANDLED;
681 ready = NDSR_FLASH_RDY;
682 cmd_done = NDSR_CS0_CMDD;
685 cmd_done = NDSR_CS1_CMDD;
688 /* TODO - find out why we need the delay during write operation. */
691 status = nand_readl(info, NDSR);
693 if (status & NDSR_UNCORERR)
694 info->retcode = ERR_UNCORERR;
695 if (status & NDSR_CORERR) {
696 info->retcode = ERR_CORERR;
697 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
699 info->ecc_err_cnt = NDSR_ERR_CNT(status);
701 info->ecc_err_cnt = 1;
704 * Each chunk composing a page is corrected independently,
705 * and we need to store maximum number of corrected bitflips
706 * to return it to the MTD layer in ecc.read_page().
708 info->max_bitflips = max_t(unsigned int,
712 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
713 info->state = (status & NDSR_RDDREQ) ?
714 STATE_PIO_READING : STATE_PIO_WRITING;
715 /* Call the IRQ thread in U-Boot directly */
716 pxa3xx_nand_irq_thread(info);
719 if (status & cmd_done) {
720 info->state = STATE_CMD_DONE;
723 if (status & ready) {
724 info->state = STATE_READY;
729 * Clear all status bit before issuing the next command, which
730 * can and will alter the status bits and will deserve a new
731 * interrupt on its own. This lets the controller exit the IRQ
733 nand_writel(info, NDSR, status);
735 if (status & NDSR_WRCMDREQ) {
736 status &= ~NDSR_WRCMDREQ;
737 info->state = STATE_CMD_HANDLE;
740 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
741 * must be loaded by writing directly either 12 or 16
742 * bytes directly to NDCB0, four bytes at a time.
744 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
745 * but each NDCBx register can be read.
747 nand_writel(info, NDCB0, info->ndcb0);
748 nand_writel(info, NDCB0, info->ndcb1);
749 nand_writel(info, NDCB0, info->ndcb2);
751 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
752 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
753 nand_writel(info, NDCB0, info->ndcb3);
757 info->cmd_complete = 1;
764 static inline int is_buf_blank(uint8_t *buf, size_t len)
766 for (; len > 0; len--)
772 static void set_command_address(struct pxa3xx_nand_info *info,
773 unsigned int page_size, uint16_t column, int page_addr)
775 /* small page addr setting */
776 if (page_size < info->chunk_size) {
777 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
782 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
785 if (page_addr & 0xFF0000)
786 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
792 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
794 struct pxa3xx_nand_host *host = info->host[info->cs];
795 struct mtd_info *mtd = nand_to_mtd(&host->chip);
797 /* reset data and oob column point to handle data */
800 info->data_buff_pos = 0;
801 info->oob_buff_pos = 0;
802 info->step_chunk_size = 0;
803 info->step_spare_size = 0;
807 info->retcode = ERR_NONE;
808 info->ecc_err_cnt = 0;
814 case NAND_CMD_READOOB:
815 case NAND_CMD_PAGEPROG:
816 if (!info->force_raw)
829 * If we are about to issue a read command, or about to set
830 * the write address, then clean the data buffer.
832 if (command == NAND_CMD_READ0 ||
833 command == NAND_CMD_READOOB ||
834 command == NAND_CMD_SEQIN) {
835 info->buf_count = mtd->writesize + mtd->oobsize;
836 memset(info->data_buff, 0xFF, info->buf_count);
840 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
841 int ext_cmd_type, uint16_t column, int page_addr)
843 int addr_cycle, exec_cmd;
844 struct pxa3xx_nand_host *host;
845 struct mtd_info *mtd;
847 host = info->host[info->cs];
848 mtd = nand_to_mtd(&host->chip);
853 info->ndcb0 = NDCB0_CSEL;
857 if (command == NAND_CMD_SEQIN)
860 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
861 + host->col_addr_cycles);
864 case NAND_CMD_READOOB:
866 info->buf_start = column;
867 info->ndcb0 |= NDCB0_CMD_TYPE(0)
871 if (command == NAND_CMD_READOOB)
872 info->buf_start += mtd->writesize;
874 if (info->cur_chunk < info->nfullchunks) {
875 info->step_chunk_size = info->chunk_size;
876 info->step_spare_size = info->spare_size;
878 info->step_chunk_size = info->last_chunk_size;
879 info->step_spare_size = info->last_spare_size;
883 * Multiple page read needs an 'extended command type' field,
884 * which is either naked-read or last-read according to the
887 if (info->force_raw) {
888 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
890 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
891 info->ndcb3 = info->step_chunk_size +
892 info->step_spare_size + info->ecc_size;
893 } else if (mtd->writesize == info->chunk_size) {
894 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
895 } else if (mtd->writesize > info->chunk_size) {
896 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
898 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
899 info->ndcb3 = info->step_chunk_size +
900 info->step_spare_size;
903 set_command_address(info, mtd->writesize, column, page_addr);
908 info->buf_start = column;
909 set_command_address(info, mtd->writesize, 0, page_addr);
912 * Multiple page programming needs to execute the initial
913 * SEQIN command that sets the page address.
915 if (mtd->writesize > info->chunk_size) {
916 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
917 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
924 case NAND_CMD_PAGEPROG:
925 if (is_buf_blank(info->data_buff,
926 (mtd->writesize + mtd->oobsize))) {
931 if (info->cur_chunk < info->nfullchunks) {
932 info->step_chunk_size = info->chunk_size;
933 info->step_spare_size = info->spare_size;
935 info->step_chunk_size = info->last_chunk_size;
936 info->step_spare_size = info->last_spare_size;
939 /* Second command setting for large pages */
940 if (mtd->writesize > info->chunk_size) {
942 * Multiple page write uses the 'extended command'
943 * field. This can be used to issue a command dispatch
944 * or a naked-write depending on the current stage.
946 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
948 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
949 info->ndcb3 = info->step_chunk_size +
950 info->step_spare_size;
953 * This is the command dispatch that completes a chunked
954 * page program operation.
956 if (info->cur_chunk == info->ntotalchunks) {
957 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
958 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
965 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
969 | (NAND_CMD_PAGEPROG << 8)
976 info->buf_count = INIT_BUFFER_SIZE;
977 info->ndcb0 |= NDCB0_CMD_TYPE(0)
981 info->ndcb1 = (column & 0xFF);
982 info->ndcb3 = INIT_BUFFER_SIZE;
983 info->step_chunk_size = INIT_BUFFER_SIZE;
986 case NAND_CMD_READID:
987 info->buf_count = READ_ID_BYTES;
988 info->ndcb0 |= NDCB0_CMD_TYPE(3)
991 info->ndcb1 = (column & 0xFF);
993 info->step_chunk_size = 8;
995 case NAND_CMD_STATUS:
997 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1001 info->step_chunk_size = 8;
1004 case NAND_CMD_ERASE1:
1005 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1009 | (NAND_CMD_ERASE2 << 8)
1011 info->ndcb1 = page_addr;
1015 case NAND_CMD_RESET:
1016 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1021 case NAND_CMD_ERASE2:
1027 dev_err(&info->pdev->dev, "non-supported command %x\n",
1035 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1036 int column, int page_addr)
1038 struct nand_chip *chip = mtd_to_nand(mtd);
1039 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1040 struct pxa3xx_nand_info *info = host->info_data;
1044 * if this is a x16 device ,then convert the input
1045 * "byte" address into a "word" address appropriate
1046 * for indexing a word-oriented device
1048 if (info->reg_ndcr & NDCR_DWIDTH_M)
1052 * There may be different NAND chip hooked to
1053 * different chip select, so check whether
1054 * chip select has been changed, if yes, reset the timing
1056 if (info->cs != host->cs) {
1057 info->cs = host->cs;
1058 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1059 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1062 prepare_start_command(info, command);
1064 info->state = STATE_PREPARED;
1065 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1070 info->cmd_complete = 0;
1071 info->dev_ready = 0;
1072 info->need_wait = 1;
1073 pxa3xx_nand_start(info);
1079 status = nand_readl(info, NDSR);
1081 pxa3xx_nand_irq(info);
1083 if (info->cmd_complete)
1086 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1087 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1092 info->state = STATE_IDLE;
1095 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1096 const unsigned command,
1097 int column, int page_addr)
1099 struct nand_chip *chip = mtd_to_nand(mtd);
1100 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1101 struct pxa3xx_nand_info *info = host->info_data;
1102 int exec_cmd, ext_cmd_type;
1105 * if this is a x16 device then convert the input
1106 * "byte" address into a "word" address appropriate
1107 * for indexing a word-oriented device
1109 if (info->reg_ndcr & NDCR_DWIDTH_M)
1113 * There may be different NAND chip hooked to
1114 * different chip select, so check whether
1115 * chip select has been changed, if yes, reset the timing
1117 if (info->cs != host->cs) {
1118 info->cs = host->cs;
1119 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1120 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1123 /* Select the extended command for the first command */
1125 case NAND_CMD_READ0:
1126 case NAND_CMD_READOOB:
1127 ext_cmd_type = EXT_CMD_TYPE_MONO;
1129 case NAND_CMD_SEQIN:
1130 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1132 case NAND_CMD_PAGEPROG:
1133 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1140 prepare_start_command(info, command);
1143 * Prepare the "is ready" completion before starting a command
1144 * transaction sequence. If the command is not executed the
1145 * completion will be completed, see below.
1147 * We can do that inside the loop because the command variable
1148 * is invariant and thus so is the exec_cmd.
1150 info->need_wait = 1;
1151 info->dev_ready = 0;
1156 info->state = STATE_PREPARED;
1157 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1160 info->need_wait = 0;
1161 info->dev_ready = 1;
1165 info->cmd_complete = 0;
1166 pxa3xx_nand_start(info);
1172 status = nand_readl(info, NDSR);
1174 pxa3xx_nand_irq(info);
1176 if (info->cmd_complete)
1179 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1180 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1185 /* Only a few commands need several steps */
1186 if (command != NAND_CMD_PAGEPROG &&
1187 command != NAND_CMD_READ0 &&
1188 command != NAND_CMD_READOOB)
1193 /* Check if the sequence is complete */
1194 if (info->cur_chunk == info->ntotalchunks &&
1195 command != NAND_CMD_PAGEPROG)
1199 * After a splitted program command sequence has issued
1200 * the command dispatch, the command sequence is complete.
1202 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1203 command == NAND_CMD_PAGEPROG &&
1204 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1207 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1208 /* Last read: issue a 'last naked read' */
1209 if (info->cur_chunk == info->ntotalchunks - 1)
1210 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1212 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1215 * If a splitted program command has no more data to transfer,
1216 * the command dispatch must be issued to complete.
1218 } else if (command == NAND_CMD_PAGEPROG &&
1219 info->cur_chunk == info->ntotalchunks) {
1220 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1224 info->state = STATE_IDLE;
1227 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1228 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1231 chip->write_buf(mtd, buf, mtd->writesize);
1232 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1237 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1238 struct nand_chip *chip, uint8_t *buf, int oob_required,
1241 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1242 struct pxa3xx_nand_info *info = host->info_data;
1245 chip->read_buf(mtd, buf, mtd->writesize);
1246 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1248 if (info->retcode == ERR_CORERR && info->use_ecc) {
1249 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1251 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1253 * Empty pages will trigger uncorrectable errors. Re-read the
1254 * entire page in raw mode and check for bits not being "1".
1255 * If there are more than the supported strength, then it means
1256 * this is an actual uncorrectable error.
1258 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1259 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1260 chip->oob_poi, mtd->oobsize,
1261 NULL, 0, chip->ecc.strength);
1263 mtd->ecc_stats.failed++;
1265 mtd->ecc_stats.corrected += bf;
1266 info->max_bitflips = max_t(unsigned int,
1267 info->max_bitflips, bf);
1268 info->retcode = ERR_CORERR;
1270 info->retcode = ERR_NONE;
1273 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1274 /* Raw read is not supported with Hamming ECC engine */
1275 if (is_buf_blank(buf, mtd->writesize))
1276 info->retcode = ERR_NONE;
1278 mtd->ecc_stats.failed++;
1281 return info->max_bitflips;
1284 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1285 struct nand_chip *chip, uint8_t *buf,
1286 int oob_required, int page)
1288 struct pxa3xx_nand_host *host = chip->priv;
1289 struct pxa3xx_nand_info *info = host->info_data;
1290 int chunk, ecc_off_buf;
1296 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1297 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1299 info->force_raw = true;
1300 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1302 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1303 info->last_spare_size;
1304 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1306 buf + (chunk * info->chunk_size),
1310 (chunk * (info->spare_size)),
1313 chip->oob_poi + ecc_off_buf +
1314 (chunk * (info->ecc_size)),
1315 info->ecc_size - 2);
1318 if (info->ntotalchunks > info->nfullchunks) {
1320 buf + (info->nfullchunks * info->chunk_size),
1321 info->last_chunk_size);
1324 (info->nfullchunks * (info->spare_size)),
1325 info->last_spare_size);
1327 chip->oob_poi + ecc_off_buf +
1328 (info->nfullchunks * (info->ecc_size)),
1329 info->ecc_size - 2);
1332 info->force_raw = false;
1337 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1338 struct nand_chip *chip, int page)
1340 /* Invalidate page cache */
1343 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1347 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1349 struct nand_chip *chip = mtd_to_nand(mtd);
1350 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1351 struct pxa3xx_nand_info *info = host->info_data;
1354 if (info->buf_start < info->buf_count)
1355 /* Has just send a new command? */
1356 retval = info->data_buff[info->buf_start++];
1361 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1363 struct nand_chip *chip = mtd_to_nand(mtd);
1364 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1365 struct pxa3xx_nand_info *info = host->info_data;
1366 u16 retval = 0xFFFF;
1368 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1369 retval = *((u16 *)(info->data_buff+info->buf_start));
1370 info->buf_start += 2;
1375 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1377 struct nand_chip *chip = mtd_to_nand(mtd);
1378 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1379 struct pxa3xx_nand_info *info = host->info_data;
1380 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1382 memcpy(buf, info->data_buff + info->buf_start, real_len);
1383 info->buf_start += real_len;
1386 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1387 const uint8_t *buf, int len)
1389 struct nand_chip *chip = mtd_to_nand(mtd);
1390 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1391 struct pxa3xx_nand_info *info = host->info_data;
1392 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1394 memcpy(info->data_buff + info->buf_start, buf, real_len);
1395 info->buf_start += real_len;
1398 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1403 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1405 struct nand_chip *chip = mtd_to_nand(mtd);
1406 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1407 struct pxa3xx_nand_info *info = host->info_data;
1409 if (info->need_wait) {
1412 info->need_wait = 0;
1418 status = nand_readl(info, NDSR);
1420 pxa3xx_nand_irq(info);
1422 if (info->dev_ready)
1425 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1426 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1427 return NAND_STATUS_FAIL;
1432 /* pxa3xx_nand_send_command has waited for command complete */
1433 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1434 if (info->retcode == ERR_NONE)
1437 return NAND_STATUS_FAIL;
1440 return NAND_STATUS_READY;
1443 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1445 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1447 /* Configure default flash values */
1448 info->reg_ndcr = 0x0; /* enable all interrupts */
1449 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1450 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1451 info->reg_ndcr |= NDCR_SPARE_EN;
1456 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1458 struct pxa3xx_nand_host *host = info->host[info->cs];
1459 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1460 struct nand_chip *chip = mtd_to_nand(mtd);
1462 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1463 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1464 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1467 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1469 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1470 uint32_t ndcr = nand_readl(info, NDCR);
1472 /* Set an initial chunk size */
1473 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1474 info->reg_ndcr = ndcr &
1475 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1476 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1477 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1478 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1481 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1483 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1484 if (info->data_buff == NULL)
1489 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1491 struct pxa3xx_nand_info *info = host->info_data;
1492 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1493 struct mtd_info *mtd;
1494 struct nand_chip *chip;
1495 const struct nand_sdr_timings *timings;
1498 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1499 chip = mtd_to_nand(mtd);
1501 /* configure default flash values */
1502 info->reg_ndcr = 0x0; /* enable all interrupts */
1503 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1504 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1505 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1507 /* use the common timing to make a try */
1508 timings = onfi_async_timing_mode_to_sdr_timings(0);
1509 if (IS_ERR(timings))
1510 return PTR_ERR(timings);
1512 pxa3xx_nand_set_sdr_timing(host, timings);
1514 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1515 ret = chip->waitfunc(mtd, chip);
1516 if (ret & NAND_STATUS_FAIL)
1522 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1523 struct nand_ecc_ctrl *ecc,
1524 int strength, int ecc_stepsize, int page_size)
1526 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1527 info->nfullchunks = 1;
1528 info->ntotalchunks = 1;
1529 info->chunk_size = 2048;
1530 info->spare_size = 40;
1531 info->ecc_size = 24;
1532 ecc->mode = NAND_ECC_HW;
1536 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1537 info->nfullchunks = 1;
1538 info->ntotalchunks = 1;
1539 info->chunk_size = 512;
1540 info->spare_size = 8;
1542 ecc->mode = NAND_ECC_HW;
1547 * Required ECC: 4-bit correction per 512 bytes
1548 * Select: 16-bit correction per 2048 bytes
1550 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1552 info->nfullchunks = 1;
1553 info->ntotalchunks = 1;
1554 info->chunk_size = 2048;
1555 info->spare_size = 32;
1556 info->ecc_size = 32;
1557 ecc->mode = NAND_ECC_HW;
1558 ecc->size = info->chunk_size;
1559 ecc->layout = &ecc_layout_2KB_bch4bit;
1562 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1564 info->nfullchunks = 2;
1565 info->ntotalchunks = 2;
1566 info->chunk_size = 2048;
1567 info->spare_size = 32;
1568 info->ecc_size = 32;
1569 ecc->mode = NAND_ECC_HW;
1570 ecc->size = info->chunk_size;
1571 ecc->layout = &ecc_layout_4KB_bch4bit;
1574 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1576 info->nfullchunks = 4;
1577 info->ntotalchunks = 4;
1578 info->chunk_size = 2048;
1579 info->spare_size = 32;
1580 info->ecc_size = 32;
1581 ecc->mode = NAND_ECC_HW;
1582 ecc->size = info->chunk_size;
1583 ecc->layout = &ecc_layout_8KB_bch4bit;
1587 * Required ECC: 8-bit correction per 512 bytes
1588 * Select: 16-bit correction per 1024 bytes
1590 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1592 info->nfullchunks = 1;
1593 info->ntotalchunks = 2;
1594 info->chunk_size = 1024;
1595 info->spare_size = 0;
1596 info->last_chunk_size = 1024;
1597 info->last_spare_size = 32;
1598 info->ecc_size = 32;
1599 ecc->mode = NAND_ECC_HW;
1600 ecc->size = info->chunk_size;
1601 ecc->layout = &ecc_layout_2KB_bch8bit;
1604 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1606 info->nfullchunks = 4;
1607 info->ntotalchunks = 5;
1608 info->chunk_size = 1024;
1609 info->spare_size = 0;
1610 info->last_chunk_size = 0;
1611 info->last_spare_size = 64;
1612 info->ecc_size = 32;
1613 ecc->mode = NAND_ECC_HW;
1614 ecc->size = info->chunk_size;
1615 ecc->layout = &ecc_layout_4KB_bch8bit;
1618 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1620 info->nfullchunks = 8;
1621 info->ntotalchunks = 9;
1622 info->chunk_size = 1024;
1623 info->spare_size = 0;
1624 info->last_chunk_size = 0;
1625 info->last_spare_size = 160;
1626 info->ecc_size = 32;
1627 ecc->mode = NAND_ECC_HW;
1628 ecc->size = info->chunk_size;
1629 ecc->layout = &ecc_layout_8KB_bch8bit;
1633 dev_err(&info->pdev->dev,
1634 "ECC strength %d at page size %d is not supported\n",
1635 strength, page_size);
1642 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1644 struct nand_chip *chip = mtd_to_nand(mtd);
1645 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1646 struct pxa3xx_nand_info *info = host->info_data;
1647 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1649 uint16_t ecc_strength, ecc_step;
1651 if (pdata->keep_config) {
1652 pxa3xx_nand_detect_config(info);
1654 ret = pxa3xx_nand_config_ident(info);
1657 ret = pxa3xx_nand_sensing(host);
1659 dev_info(&info->pdev->dev,
1660 "There is no chip on cs %d!\n",
1666 /* Device detection must be done with ECC disabled */
1667 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1668 nand_writel(info, NDECCCTRL, 0x0);
1670 if (nand_scan_ident(mtd, 1, NULL))
1673 if (!pdata->keep_config) {
1674 ret = pxa3xx_nand_init_timings(host);
1676 dev_err(&info->pdev->dev,
1677 "Failed to set timings: %d\n", ret);
1682 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1684 * We'll use a bad block table stored in-flash and don't
1685 * allow writing the bad block marker to the flash.
1687 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1688 chip->bbt_td = &bbt_main_descr;
1689 chip->bbt_md = &bbt_mirror_descr;
1692 if (pdata->ecc_strength && pdata->ecc_step_size) {
1693 ecc_strength = pdata->ecc_strength;
1694 ecc_step = pdata->ecc_step_size;
1696 ecc_strength = chip->ecc_strength_ds;
1697 ecc_step = chip->ecc_step_ds;
1700 /* Set default ECC strength requirements on non-ONFI devices */
1701 if (ecc_strength < 1 && ecc_step < 1) {
1706 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1707 ecc_step, mtd->writesize);
1712 * If the page size is bigger than the FIFO size, let's check
1713 * we are given the right variant and then switch to the extended
1714 * (aka split) command handling,
1716 if (mtd->writesize > info->chunk_size) {
1717 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1718 chip->cmdfunc = nand_cmdfunc_extended;
1720 dev_err(&info->pdev->dev,
1721 "unsupported page size on this variant\n");
1726 /* calculate addressing information */
1727 if (mtd->writesize >= 2048)
1728 host->col_addr_cycles = 2;
1730 host->col_addr_cycles = 1;
1732 /* release the initial buffer */
1733 kfree(info->data_buff);
1735 /* allocate the real data + oob buffer */
1736 info->buf_size = mtd->writesize + mtd->oobsize;
1737 ret = pxa3xx_nand_init_buff(info);
1740 info->oob_buff = info->data_buff + mtd->writesize;
1742 if ((mtd->size >> chip->page_shift) > 65536)
1743 host->row_addr_cycles = 3;
1745 host->row_addr_cycles = 2;
1747 if (!pdata->keep_config)
1748 pxa3xx_nand_config_tail(info);
1750 return nand_scan_tail(mtd);
1753 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1755 struct pxa3xx_nand_platform_data *pdata;
1756 struct pxa3xx_nand_host *host;
1757 struct nand_chip *chip = NULL;
1758 struct mtd_info *mtd;
1761 pdata = info->pdata;
1762 if (pdata->num_cs <= 0)
1765 info->variant = pxa3xx_nand_get_variant();
1766 for (cs = 0; cs < pdata->num_cs; cs++) {
1767 chip = (struct nand_chip *)
1768 ((u8 *)&info[1] + sizeof(*host) * cs);
1769 mtd = nand_to_mtd(chip);
1770 host = (struct pxa3xx_nand_host *)chip;
1771 info->host[cs] = host;
1773 host->info_data = info;
1774 mtd->owner = THIS_MODULE;
1776 nand_set_controller_data(chip, host);
1777 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1778 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1779 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1780 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1781 chip->controller = &info->controller;
1782 chip->waitfunc = pxa3xx_nand_waitfunc;
1783 chip->select_chip = pxa3xx_nand_select_chip;
1784 chip->read_word = pxa3xx_nand_read_word;
1785 chip->read_byte = pxa3xx_nand_read_byte;
1786 chip->read_buf = pxa3xx_nand_read_buf;
1787 chip->write_buf = pxa3xx_nand_write_buf;
1788 chip->options |= NAND_NO_SUBPAGE_WRITE;
1789 chip->cmdfunc = nand_cmdfunc;
1792 /* Allocate a buffer to allow flash detection */
1793 info->buf_size = INIT_BUFFER_SIZE;
1794 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1795 if (info->data_buff == NULL) {
1797 goto fail_disable_clk;
1800 /* initialize all interrupts to be disabled */
1801 disable_int(info, NDSR_MASK);
1805 kfree(info->data_buff);
1810 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1812 struct pxa3xx_nand_platform_data *pdata;
1813 const void *blob = gd->fdt_blob;
1816 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1820 /* Get address decoding nodes from the FDT blob */
1822 node = fdt_node_offset_by_compatible(blob, node,
1823 "marvell,mvebu-pxa3xx-nand");
1827 /* Bypass disabeld nodes */
1828 if (!fdtdec_get_is_enabled(blob, node))
1831 /* Get the first enabled NAND controler base address */
1833 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1834 blob, node, "reg", 0, NULL, true);
1836 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1837 if (pdata->num_cs != 1) {
1838 pr_err("pxa3xx driver supports single CS only\n");
1842 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1843 pdata->enable_arbiter = 1;
1845 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1846 pdata->keep_config = 1;
1850 * If these are not set, they will be selected according
1851 * to the detected flash type.
1854 pdata->ecc_strength = fdtdec_get_int(blob, node,
1855 "nand-ecc-strength", 0);
1858 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1859 "nand-ecc-step-size", 0);
1861 info->pdata = pdata;
1863 /* Currently support only a single NAND controller */
1866 } while (node >= 0);
1871 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1873 struct pxa3xx_nand_platform_data *pdata;
1874 int ret, cs, probe_success;
1876 ret = pxa3xx_nand_probe_dt(info);
1880 pdata = info->pdata;
1882 ret = alloc_nand_resource(info);
1884 dev_err(&pdev->dev, "alloc nand resource failed\n");
1889 for (cs = 0; cs < pdata->num_cs; cs++) {
1890 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1893 * The mtd name matches the one used in 'mtdparts' kernel
1894 * parameter. This name cannot be changed or otherwise
1895 * user's mtd partitions configuration would get broken.
1897 mtd->name = "pxa3xx_nand-0";
1899 ret = pxa3xx_nand_scan(mtd);
1901 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1906 if (nand_register(cs, mtd))
1919 * Main initialization routine
1921 void board_nand_init(void)
1923 struct pxa3xx_nand_info *info;
1924 struct pxa3xx_nand_host *host;
1927 info = kzalloc(sizeof(*info) +
1928 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1933 ret = pxa3xx_nand_probe(info);