1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/raw/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <linux/errno.h>
15 #include <asm/arch/cpu.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/types.h>
20 #include "pxa3xx_nand.h"
22 DECLARE_GLOBAL_DATA_PTR;
24 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25 #define CHIP_DELAY_TIMEOUT 200
26 #define NAND_STOP_DELAY 40
29 * Define a buffer size for the initial command that detects the flash device:
30 * STATUS, READID and PARAM.
31 * ONFI param page is 256 bytes, and there are three redundant copies
32 * to be read. JEDEC param page is 512 bytes, and there are also three
33 * redundant copies to be read.
34 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
36 #define INIT_BUFFER_SIZE 2048
38 /* registers and bit definitions */
39 #define NDCR (0x00) /* Control register */
40 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
41 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
42 #define NDSR (0x14) /* Status Register */
43 #define NDPCR (0x18) /* Page Count Register */
44 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
45 #define NDBDR1 (0x20) /* Bad Block Register 1 */
46 #define NDECCCTRL (0x28) /* ECC control */
47 #define NDDB (0x40) /* Data Buffer */
48 #define NDCB0 (0x48) /* Command Buffer0 */
49 #define NDCB1 (0x4C) /* Command Buffer1 */
50 #define NDCB2 (0x50) /* Command Buffer2 */
52 #define NDCR_SPARE_EN (0x1 << 31)
53 #define NDCR_ECC_EN (0x1 << 30)
54 #define NDCR_DMA_EN (0x1 << 29)
55 #define NDCR_ND_RUN (0x1 << 28)
56 #define NDCR_DWIDTH_C (0x1 << 27)
57 #define NDCR_DWIDTH_M (0x1 << 26)
58 #define NDCR_PAGE_SZ (0x1 << 24)
59 #define NDCR_NCSX (0x1 << 23)
60 #define NDCR_ND_MODE (0x3 << 21)
61 #define NDCR_NAND_MODE (0x0)
62 #define NDCR_CLR_PG_CNT (0x1 << 20)
63 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
64 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
65 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
67 #define NDCR_RA_START (0x1 << 15)
68 #define NDCR_PG_PER_BLK (0x1 << 14)
69 #define NDCR_ND_ARB_EN (0x1 << 12)
70 #define NDCR_INT_MASK (0xFFF)
72 #define NDSR_MASK (0xfff)
73 #define NDSR_ERR_CNT_OFF (16)
74 #define NDSR_ERR_CNT_MASK (0x1f)
75 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
76 #define NDSR_RDY (0x1 << 12)
77 #define NDSR_FLASH_RDY (0x1 << 11)
78 #define NDSR_CS0_PAGED (0x1 << 10)
79 #define NDSR_CS1_PAGED (0x1 << 9)
80 #define NDSR_CS0_CMDD (0x1 << 8)
81 #define NDSR_CS1_CMDD (0x1 << 7)
82 #define NDSR_CS0_BBD (0x1 << 6)
83 #define NDSR_CS1_BBD (0x1 << 5)
84 #define NDSR_UNCORERR (0x1 << 4)
85 #define NDSR_CORERR (0x1 << 3)
86 #define NDSR_WRDREQ (0x1 << 2)
87 #define NDSR_RDDREQ (0x1 << 1)
88 #define NDSR_WRCMDREQ (0x1)
90 #define NDCB0_LEN_OVRD (0x1 << 28)
91 #define NDCB0_ST_ROW_EN (0x1 << 26)
92 #define NDCB0_AUTO_RS (0x1 << 25)
93 #define NDCB0_CSEL (0x1 << 24)
94 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
95 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
96 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
97 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
98 #define NDCB0_NC (0x1 << 20)
99 #define NDCB0_DBC (0x1 << 19)
100 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
101 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
102 #define NDCB0_CMD2_MASK (0xff << 8)
103 #define NDCB0_CMD1_MASK (0xff)
104 #define NDCB0_ADDR_CYC_SHIFT (16)
106 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
107 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
108 #define EXT_CMD_TYPE_READ 4 /* Read */
109 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
110 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
111 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
112 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
115 * This should be large enough to read 'ONFI' and 'JEDEC'.
116 * Let's use 7 bytes, which is the maximum ID count supported
117 * by the controller (see NDCR_RD_ID_CNT_MASK).
119 #define READ_ID_BYTES 7
121 /* macros for registers read/write */
122 #define nand_writel(info, off, val) \
123 writel((val), (info)->mmio_base + (off))
125 #define nand_readl(info, off) \
126 readl((info)->mmio_base + (off))
128 /* error code and state */
151 enum pxa3xx_nand_variant {
152 PXA3XX_NAND_VARIANT_PXA,
153 PXA3XX_NAND_VARIANT_ARMADA370,
156 struct pxa3xx_nand_host {
157 struct nand_chip chip;
160 /* page size of attached chip */
164 /* calculated from pxa3xx_nand_flash data */
165 unsigned int col_addr_cycles;
166 unsigned int row_addr_cycles;
169 struct pxa3xx_nand_info {
170 struct nand_hw_control controller;
171 struct pxa3xx_nand_platform_data *pdata;
174 void __iomem *mmio_base;
175 unsigned long mmio_phys;
176 int cmd_complete, dev_ready;
178 unsigned int buf_start;
179 unsigned int buf_count;
180 unsigned int buf_size;
181 unsigned int data_buff_pos;
182 unsigned int oob_buff_pos;
184 unsigned char *data_buff;
185 unsigned char *oob_buff;
187 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
191 * This driver supports NFCv1 (as found in PXA SoC)
192 * and NFCv2 (as found in Armada 370/XP SoC).
194 enum pxa3xx_nand_variant variant;
197 int use_ecc; /* use HW ECC ? */
198 int force_raw; /* prevent use_ecc to be set */
199 int ecc_bch; /* using BCH ECC? */
200 int use_spare; /* use spare ? */
203 /* Amount of real data per full chunk */
204 unsigned int chunk_size;
206 /* Amount of spare data per full chunk */
207 unsigned int spare_size;
209 /* Number of full chunks (i.e chunk_size + spare_size) */
210 unsigned int nfullchunks;
213 * Total number of chunks. If equal to nfullchunks, then there
214 * are only full chunks. Otherwise, there is one last chunk of
215 * size (last_chunk_size + last_spare_size)
217 unsigned int ntotalchunks;
219 /* Amount of real data in the last chunk */
220 unsigned int last_chunk_size;
222 /* Amount of spare data in the last chunk */
223 unsigned int last_spare_size;
225 unsigned int ecc_size;
226 unsigned int ecc_err_cnt;
227 unsigned int max_bitflips;
231 * Variables only valid during command
232 * execution. step_chunk_size and step_spare_size is the
233 * amount of real data and spare data in the current
234 * chunk. cur_chunk is the current chunk being
237 unsigned int step_chunk_size;
238 unsigned int step_spare_size;
239 unsigned int cur_chunk;
241 /* cached register value */
246 /* generated NDCBx register values */
253 static struct pxa3xx_nand_timing timing[] = {
255 * tCH Enable signal hold time
256 * tCS Enable signal setup time
257 * tWH ND_nWE high duration
258 * tWP ND_nWE pulse time
259 * tRH ND_nRE high duration
260 * tRP ND_nRE pulse width
261 * tR ND_nWE high to ND_nRE low for read
262 * tWHR ND_nWE high to ND_nRE low for status read
263 * tAR ND_ALE low to ND_nRE low delay
265 /*ch cs wh wp rh rp r whr ar */
266 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
267 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
268 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
269 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
270 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
273 static struct pxa3xx_nand_flash builtin_flash_types[] = {
276 * flash_width Width of Flash memory (DWIDTH_M)
277 * dfc_width Width of flash controller(DWIDTH_C)
279 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
281 { 0x46ec, 16, 16, &timing[1] },
282 { 0xdaec, 8, 8, &timing[1] },
283 { 0xd7ec, 8, 8, &timing[1] },
284 { 0xa12c, 8, 8, &timing[2] },
285 { 0xb12c, 16, 16, &timing[2] },
286 { 0xdc2c, 8, 8, &timing[2] },
287 { 0xcc2c, 16, 16, &timing[2] },
288 { 0xba20, 16, 16, &timing[3] },
289 { 0xda98, 8, 8, &timing[4] },
292 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
293 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
294 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
296 static struct nand_bbt_descr bbt_main_descr = {
297 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
298 | NAND_BBT_2BIT | NAND_BBT_VERSION,
302 .maxblocks = 8, /* Last 8 blocks in each chip */
303 .pattern = bbt_pattern
306 static struct nand_bbt_descr bbt_mirror_descr = {
307 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
308 | NAND_BBT_2BIT | NAND_BBT_VERSION,
312 .maxblocks = 8, /* Last 8 blocks in each chip */
313 .pattern = bbt_mirror_pattern
317 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
320 32, 33, 34, 35, 36, 37, 38, 39,
321 40, 41, 42, 43, 44, 45, 46, 47,
322 48, 49, 50, 51, 52, 53, 54, 55,
323 56, 57, 58, 59, 60, 61, 62, 63},
324 .oobfree = { {2, 30} }
327 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
330 32, 33, 34, 35, 36, 37, 38, 39,
331 40, 41, 42, 43, 44, 45, 46, 47,
332 48, 49, 50, 51, 52, 53, 54, 55,
333 56, 57, 58, 59, 60, 61, 62, 63,
334 64, 65, 66, 67, 68, 69, 70, 71,
335 72, 73, 74, 75, 76, 77, 78, 79,
336 80, 81, 82, 83, 84, 85, 86, 87,
337 88, 89, 90, 91, 92, 93, 94, 95},
338 .oobfree = { {1, 4}, {6, 26} }
341 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
344 32, 33, 34, 35, 36, 37, 38, 39,
345 40, 41, 42, 43, 44, 45, 46, 47,
346 48, 49, 50, 51, 52, 53, 54, 55,
347 56, 57, 58, 59, 60, 61, 62, 63,
348 96, 97, 98, 99, 100, 101, 102, 103,
349 104, 105, 106, 107, 108, 109, 110, 111,
350 112, 113, 114, 115, 116, 117, 118, 119,
351 120, 121, 122, 123, 124, 125, 126, 127},
352 /* Bootrom looks in bytes 0 & 5 for bad blocks */
353 .oobfree = { {6, 26}, { 64, 32} }
356 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
359 32, 33, 34, 35, 36, 37, 38, 39,
360 40, 41, 42, 43, 44, 45, 46, 47,
361 48, 49, 50, 51, 52, 53, 54, 55,
362 56, 57, 58, 59, 60, 61, 62, 63,
364 96, 97, 98, 99, 100, 101, 102, 103,
365 104, 105, 106, 107, 108, 109, 110, 111,
366 112, 113, 114, 115, 116, 117, 118, 119,
367 120, 121, 122, 123, 124, 125, 126, 127,
369 160, 161, 162, 163, 164, 165, 166, 167,
370 168, 169, 170, 171, 172, 173, 174, 175,
371 176, 177, 178, 179, 180, 181, 182, 183,
372 184, 185, 186, 187, 188, 189, 190, 191,
374 224, 225, 226, 227, 228, 229, 230, 231,
375 232, 233, 234, 235, 236, 237, 238, 239,
376 240, 241, 242, 243, 244, 245, 246, 247,
377 248, 249, 250, 251, 252, 253, 254, 255},
379 /* Bootrom looks in bytes 0 & 5 for bad blocks */
380 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
383 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
386 32, 33, 34, 35, 36, 37, 38, 39,
387 40, 41, 42, 43, 44, 45, 46, 47,
388 48, 49, 50, 51, 52, 53, 54, 55,
389 56, 57, 58, 59, 60, 61, 62, 63},
393 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
396 /* HW ECC handles all ECC data and all spare area is free for OOB */
397 .oobfree = {{0, 160} }
400 #define NDTR0_tCH(c) (min((c), 7) << 19)
401 #define NDTR0_tCS(c) (min((c), 7) << 16)
402 #define NDTR0_tWH(c) (min((c), 7) << 11)
403 #define NDTR0_tWP(c) (min((c), 7) << 8)
404 #define NDTR0_tRH(c) (min((c), 7) << 3)
405 #define NDTR0_tRP(c) (min((c), 7) << 0)
407 #define NDTR1_tR(c) (min((c), 65535) << 16)
408 #define NDTR1_tWHR(c) (min((c), 15) << 4)
409 #define NDTR1_tAR(c) (min((c), 15) << 0)
411 /* convert nano-seconds to nand flash controller clock cycles */
412 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
414 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
416 /* We only support the Armada 370/XP/38x for now */
417 return PXA3XX_NAND_VARIANT_ARMADA370;
420 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
421 const struct pxa3xx_nand_timing *t)
423 struct pxa3xx_nand_info *info = host->info_data;
424 unsigned long nand_clk = mvebu_get_nand_clock();
425 uint32_t ndtr0, ndtr1;
427 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
428 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
429 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
430 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
431 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
432 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
434 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
435 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
436 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
438 info->ndtr0cs0 = ndtr0;
439 info->ndtr1cs0 = ndtr1;
440 nand_writel(info, NDTR0CS0, ndtr0);
441 nand_writel(info, NDTR1CS0, ndtr1);
444 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
445 const struct nand_sdr_timings *t)
447 struct pxa3xx_nand_info *info = host->info_data;
448 struct nand_chip *chip = &host->chip;
449 unsigned long nand_clk = mvebu_get_nand_clock();
450 uint32_t ndtr0, ndtr1;
452 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
453 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
454 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
455 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
456 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
457 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
458 u32 tR = chip->chip_delay * 1000;
459 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
460 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
462 /* fallback to a default value if tR = 0 */
466 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
467 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
468 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
469 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
470 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
471 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
473 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
474 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
475 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
477 info->ndtr0cs0 = ndtr0;
478 info->ndtr1cs0 = ndtr1;
479 nand_writel(info, NDTR0CS0, ndtr0);
480 nand_writel(info, NDTR1CS0, ndtr1);
483 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
485 const struct nand_sdr_timings *timings;
486 struct nand_chip *chip = &host->chip;
487 struct pxa3xx_nand_info *info = host->info_data;
488 const struct pxa3xx_nand_flash *f = NULL;
489 struct mtd_info *mtd = nand_to_mtd(&host->chip);
490 int mode, id, ntypes, i;
492 mode = onfi_get_async_timing_mode(chip);
493 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
494 ntypes = ARRAY_SIZE(builtin_flash_types);
496 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
498 id = chip->read_byte(mtd);
499 id |= chip->read_byte(mtd) << 0x8;
501 for (i = 0; i < ntypes; i++) {
502 f = &builtin_flash_types[i];
504 if (f->chip_id == id)
509 dev_err(&info->pdev->dev, "Error: timings not found\n");
513 pxa3xx_nand_set_timing(host, f->timing);
515 if (f->flash_width == 16) {
516 info->reg_ndcr |= NDCR_DWIDTH_M;
517 chip->options |= NAND_BUSWIDTH_16;
520 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
522 mode = fls(mode) - 1;
526 timings = onfi_async_timing_mode_to_sdr_timings(mode);
528 return PTR_ERR(timings);
530 pxa3xx_nand_set_sdr_timing(host, timings);
537 * NOTE: it is a must to set ND_RUN first, then write
538 * command buffer, otherwise, it does not work.
539 * We enable all the interrupt at the same time, and
540 * let pxa3xx_nand_irq to handle all logic.
542 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
546 ndcr = info->reg_ndcr;
551 nand_writel(info, NDECCCTRL, 0x1);
553 ndcr &= ~NDCR_ECC_EN;
555 nand_writel(info, NDECCCTRL, 0x0);
558 ndcr &= ~NDCR_DMA_EN;
561 ndcr |= NDCR_SPARE_EN;
563 ndcr &= ~NDCR_SPARE_EN;
567 /* clear status bits and run */
568 nand_writel(info, NDSR, NDSR_MASK);
569 nand_writel(info, NDCR, 0);
570 nand_writel(info, NDCR, ndcr);
573 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
577 ndcr = nand_readl(info, NDCR);
578 nand_writel(info, NDCR, ndcr | int_mask);
581 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
583 if (info->ecc_bch && !info->force_raw) {
587 * According to the datasheet, when reading from NDDB
588 * with BCH enabled, after each 32 bytes reads, we
589 * have to make sure that the NDSR.RDDREQ bit is set.
591 * Drain the FIFO 8 32 bits reads at a time, and skip
592 * the polling on the last read.
595 readsl(info->mmio_base + NDDB, data, 8);
598 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
599 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
600 dev_err(&info->pdev->dev,
601 "Timeout on RDDREQ while draining the FIFO\n");
611 readsl(info->mmio_base + NDDB, data, len);
614 static void handle_data_pio(struct pxa3xx_nand_info *info)
616 int data_len = info->step_chunk_size;
619 * In raw mode, include the spare area and the ECC bytes that are not
620 * consumed by the controller in the data section. Do not reorganize
621 * here, do it in the ->read_page_raw() handler instead.
624 data_len += info->step_spare_size + info->ecc_size;
626 switch (info->state) {
627 case STATE_PIO_WRITING:
628 if (info->step_chunk_size)
629 writesl(info->mmio_base + NDDB,
630 info->data_buff + info->data_buff_pos,
631 DIV_ROUND_UP(data_len, 4));
633 if (info->step_spare_size)
634 writesl(info->mmio_base + NDDB,
635 info->oob_buff + info->oob_buff_pos,
636 DIV_ROUND_UP(info->step_spare_size, 4));
638 case STATE_PIO_READING:
639 if (info->step_chunk_size)
641 info->data_buff + info->data_buff_pos,
642 DIV_ROUND_UP(data_len, 4));
647 if (info->step_spare_size)
649 info->oob_buff + info->oob_buff_pos,
650 DIV_ROUND_UP(info->step_spare_size, 4));
653 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
658 /* Update buffer pointers for multi-page read/write */
659 info->data_buff_pos += data_len;
660 info->oob_buff_pos += info->step_spare_size;
663 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
665 handle_data_pio(info);
667 info->state = STATE_CMD_DONE;
668 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
671 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
673 unsigned int status, is_completed = 0, is_ready = 0;
674 unsigned int ready, cmd_done;
675 irqreturn_t ret = IRQ_HANDLED;
678 ready = NDSR_FLASH_RDY;
679 cmd_done = NDSR_CS0_CMDD;
682 cmd_done = NDSR_CS1_CMDD;
685 /* TODO - find out why we need the delay during write operation. */
688 status = nand_readl(info, NDSR);
690 if (status & NDSR_UNCORERR)
691 info->retcode = ERR_UNCORERR;
692 if (status & NDSR_CORERR) {
693 info->retcode = ERR_CORERR;
694 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
696 info->ecc_err_cnt = NDSR_ERR_CNT(status);
698 info->ecc_err_cnt = 1;
701 * Each chunk composing a page is corrected independently,
702 * and we need to store maximum number of corrected bitflips
703 * to return it to the MTD layer in ecc.read_page().
705 info->max_bitflips = max_t(unsigned int,
709 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
710 info->state = (status & NDSR_RDDREQ) ?
711 STATE_PIO_READING : STATE_PIO_WRITING;
712 /* Call the IRQ thread in U-Boot directly */
713 pxa3xx_nand_irq_thread(info);
716 if (status & cmd_done) {
717 info->state = STATE_CMD_DONE;
720 if (status & ready) {
721 info->state = STATE_READY;
726 * Clear all status bit before issuing the next command, which
727 * can and will alter the status bits and will deserve a new
728 * interrupt on its own. This lets the controller exit the IRQ
730 nand_writel(info, NDSR, status);
732 if (status & NDSR_WRCMDREQ) {
733 status &= ~NDSR_WRCMDREQ;
734 info->state = STATE_CMD_HANDLE;
737 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
738 * must be loaded by writing directly either 12 or 16
739 * bytes directly to NDCB0, four bytes at a time.
741 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
742 * but each NDCBx register can be read.
744 nand_writel(info, NDCB0, info->ndcb0);
745 nand_writel(info, NDCB0, info->ndcb1);
746 nand_writel(info, NDCB0, info->ndcb2);
748 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
749 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
750 nand_writel(info, NDCB0, info->ndcb3);
754 info->cmd_complete = 1;
761 static inline int is_buf_blank(uint8_t *buf, size_t len)
763 for (; len > 0; len--)
769 static void set_command_address(struct pxa3xx_nand_info *info,
770 unsigned int page_size, uint16_t column, int page_addr)
772 /* small page addr setting */
773 if (page_size < info->chunk_size) {
774 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
779 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
782 if (page_addr & 0xFF0000)
783 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
789 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
791 struct pxa3xx_nand_host *host = info->host[info->cs];
792 struct mtd_info *mtd = nand_to_mtd(&host->chip);
794 /* reset data and oob column point to handle data */
797 info->data_buff_pos = 0;
798 info->oob_buff_pos = 0;
799 info->step_chunk_size = 0;
800 info->step_spare_size = 0;
804 info->retcode = ERR_NONE;
805 info->ecc_err_cnt = 0;
811 case NAND_CMD_READOOB:
812 case NAND_CMD_PAGEPROG:
813 if (!info->force_raw)
826 * If we are about to issue a read command, or about to set
827 * the write address, then clean the data buffer.
829 if (command == NAND_CMD_READ0 ||
830 command == NAND_CMD_READOOB ||
831 command == NAND_CMD_SEQIN) {
832 info->buf_count = mtd->writesize + mtd->oobsize;
833 memset(info->data_buff, 0xFF, info->buf_count);
837 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
838 int ext_cmd_type, uint16_t column, int page_addr)
840 int addr_cycle, exec_cmd;
841 struct pxa3xx_nand_host *host;
842 struct mtd_info *mtd;
844 host = info->host[info->cs];
845 mtd = nand_to_mtd(&host->chip);
850 info->ndcb0 = NDCB0_CSEL;
854 if (command == NAND_CMD_SEQIN)
857 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
858 + host->col_addr_cycles);
861 case NAND_CMD_READOOB:
863 info->buf_start = column;
864 info->ndcb0 |= NDCB0_CMD_TYPE(0)
868 if (command == NAND_CMD_READOOB)
869 info->buf_start += mtd->writesize;
871 if (info->cur_chunk < info->nfullchunks) {
872 info->step_chunk_size = info->chunk_size;
873 info->step_spare_size = info->spare_size;
875 info->step_chunk_size = info->last_chunk_size;
876 info->step_spare_size = info->last_spare_size;
880 * Multiple page read needs an 'extended command type' field,
881 * which is either naked-read or last-read according to the
884 if (info->force_raw) {
885 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
887 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
888 info->ndcb3 = info->step_chunk_size +
889 info->step_spare_size + info->ecc_size;
890 } else if (mtd->writesize == info->chunk_size) {
891 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
892 } else if (mtd->writesize > info->chunk_size) {
893 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
895 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
896 info->ndcb3 = info->step_chunk_size +
897 info->step_spare_size;
900 set_command_address(info, mtd->writesize, column, page_addr);
905 info->buf_start = column;
906 set_command_address(info, mtd->writesize, 0, page_addr);
909 * Multiple page programming needs to execute the initial
910 * SEQIN command that sets the page address.
912 if (mtd->writesize > info->chunk_size) {
913 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
914 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
921 case NAND_CMD_PAGEPROG:
922 if (is_buf_blank(info->data_buff,
923 (mtd->writesize + mtd->oobsize))) {
928 if (info->cur_chunk < info->nfullchunks) {
929 info->step_chunk_size = info->chunk_size;
930 info->step_spare_size = info->spare_size;
932 info->step_chunk_size = info->last_chunk_size;
933 info->step_spare_size = info->last_spare_size;
936 /* Second command setting for large pages */
937 if (mtd->writesize > info->chunk_size) {
939 * Multiple page write uses the 'extended command'
940 * field. This can be used to issue a command dispatch
941 * or a naked-write depending on the current stage.
943 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
945 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
946 info->ndcb3 = info->step_chunk_size +
947 info->step_spare_size;
950 * This is the command dispatch that completes a chunked
951 * page program operation.
953 if (info->cur_chunk == info->ntotalchunks) {
954 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
955 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
962 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
966 | (NAND_CMD_PAGEPROG << 8)
973 info->buf_count = INIT_BUFFER_SIZE;
974 info->ndcb0 |= NDCB0_CMD_TYPE(0)
978 info->ndcb1 = (column & 0xFF);
979 info->ndcb3 = INIT_BUFFER_SIZE;
980 info->step_chunk_size = INIT_BUFFER_SIZE;
983 case NAND_CMD_READID:
984 info->buf_count = READ_ID_BYTES;
985 info->ndcb0 |= NDCB0_CMD_TYPE(3)
988 info->ndcb1 = (column & 0xFF);
990 info->step_chunk_size = 8;
992 case NAND_CMD_STATUS:
994 info->ndcb0 |= NDCB0_CMD_TYPE(4)
998 info->step_chunk_size = 8;
1001 case NAND_CMD_ERASE1:
1002 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1006 | (NAND_CMD_ERASE2 << 8)
1008 info->ndcb1 = page_addr;
1012 case NAND_CMD_RESET:
1013 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1018 case NAND_CMD_ERASE2:
1024 dev_err(&info->pdev->dev, "non-supported command %x\n",
1032 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1033 int column, int page_addr)
1035 struct nand_chip *chip = mtd_to_nand(mtd);
1036 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1037 struct pxa3xx_nand_info *info = host->info_data;
1041 * if this is a x16 device ,then convert the input
1042 * "byte" address into a "word" address appropriate
1043 * for indexing a word-oriented device
1045 if (info->reg_ndcr & NDCR_DWIDTH_M)
1049 * There may be different NAND chip hooked to
1050 * different chip select, so check whether
1051 * chip select has been changed, if yes, reset the timing
1053 if (info->cs != host->cs) {
1054 info->cs = host->cs;
1055 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1056 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1059 prepare_start_command(info, command);
1061 info->state = STATE_PREPARED;
1062 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1067 info->cmd_complete = 0;
1068 info->dev_ready = 0;
1069 info->need_wait = 1;
1070 pxa3xx_nand_start(info);
1076 status = nand_readl(info, NDSR);
1078 pxa3xx_nand_irq(info);
1080 if (info->cmd_complete)
1083 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1084 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1089 info->state = STATE_IDLE;
1092 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1093 const unsigned command,
1094 int column, int page_addr)
1096 struct nand_chip *chip = mtd_to_nand(mtd);
1097 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1098 struct pxa3xx_nand_info *info = host->info_data;
1099 int exec_cmd, ext_cmd_type;
1102 * if this is a x16 device then convert the input
1103 * "byte" address into a "word" address appropriate
1104 * for indexing a word-oriented device
1106 if (info->reg_ndcr & NDCR_DWIDTH_M)
1110 * There may be different NAND chip hooked to
1111 * different chip select, so check whether
1112 * chip select has been changed, if yes, reset the timing
1114 if (info->cs != host->cs) {
1115 info->cs = host->cs;
1116 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1117 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1120 /* Select the extended command for the first command */
1122 case NAND_CMD_READ0:
1123 case NAND_CMD_READOOB:
1124 ext_cmd_type = EXT_CMD_TYPE_MONO;
1126 case NAND_CMD_SEQIN:
1127 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1129 case NAND_CMD_PAGEPROG:
1130 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1137 prepare_start_command(info, command);
1140 * Prepare the "is ready" completion before starting a command
1141 * transaction sequence. If the command is not executed the
1142 * completion will be completed, see below.
1144 * We can do that inside the loop because the command variable
1145 * is invariant and thus so is the exec_cmd.
1147 info->need_wait = 1;
1148 info->dev_ready = 0;
1153 info->state = STATE_PREPARED;
1154 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1157 info->need_wait = 0;
1158 info->dev_ready = 1;
1162 info->cmd_complete = 0;
1163 pxa3xx_nand_start(info);
1169 status = nand_readl(info, NDSR);
1171 pxa3xx_nand_irq(info);
1173 if (info->cmd_complete)
1176 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1177 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1182 /* Only a few commands need several steps */
1183 if (command != NAND_CMD_PAGEPROG &&
1184 command != NAND_CMD_READ0 &&
1185 command != NAND_CMD_READOOB)
1190 /* Check if the sequence is complete */
1191 if (info->cur_chunk == info->ntotalchunks &&
1192 command != NAND_CMD_PAGEPROG)
1196 * After a splitted program command sequence has issued
1197 * the command dispatch, the command sequence is complete.
1199 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1200 command == NAND_CMD_PAGEPROG &&
1201 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1204 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1205 /* Last read: issue a 'last naked read' */
1206 if (info->cur_chunk == info->ntotalchunks - 1)
1207 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1209 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1212 * If a splitted program command has no more data to transfer,
1213 * the command dispatch must be issued to complete.
1215 } else if (command == NAND_CMD_PAGEPROG &&
1216 info->cur_chunk == info->ntotalchunks) {
1217 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1221 info->state = STATE_IDLE;
1224 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1225 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1228 chip->write_buf(mtd, buf, mtd->writesize);
1229 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1234 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1235 struct nand_chip *chip, uint8_t *buf, int oob_required,
1238 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1239 struct pxa3xx_nand_info *info = host->info_data;
1242 chip->read_buf(mtd, buf, mtd->writesize);
1243 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1245 if (info->retcode == ERR_CORERR && info->use_ecc) {
1246 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1248 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1250 * Empty pages will trigger uncorrectable errors. Re-read the
1251 * entire page in raw mode and check for bits not being "1".
1252 * If there are more than the supported strength, then it means
1253 * this is an actual uncorrectable error.
1255 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1256 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1257 chip->oob_poi, mtd->oobsize,
1258 NULL, 0, chip->ecc.strength);
1260 mtd->ecc_stats.failed++;
1262 mtd->ecc_stats.corrected += bf;
1263 info->max_bitflips = max_t(unsigned int,
1264 info->max_bitflips, bf);
1265 info->retcode = ERR_CORERR;
1267 info->retcode = ERR_NONE;
1270 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1271 /* Raw read is not supported with Hamming ECC engine */
1272 if (is_buf_blank(buf, mtd->writesize))
1273 info->retcode = ERR_NONE;
1275 mtd->ecc_stats.failed++;
1278 return info->max_bitflips;
1281 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1282 struct nand_chip *chip, uint8_t *buf,
1283 int oob_required, int page)
1285 struct pxa3xx_nand_host *host = chip->priv;
1286 struct pxa3xx_nand_info *info = host->info_data;
1287 int chunk, ecc_off_buf;
1293 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1294 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1296 info->force_raw = true;
1297 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1299 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1300 info->last_spare_size;
1301 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1303 buf + (chunk * info->chunk_size),
1307 (chunk * (info->spare_size)),
1310 chip->oob_poi + ecc_off_buf +
1311 (chunk * (info->ecc_size)),
1312 info->ecc_size - 2);
1315 if (info->ntotalchunks > info->nfullchunks) {
1317 buf + (info->nfullchunks * info->chunk_size),
1318 info->last_chunk_size);
1321 (info->nfullchunks * (info->spare_size)),
1322 info->last_spare_size);
1324 chip->oob_poi + ecc_off_buf +
1325 (info->nfullchunks * (info->ecc_size)),
1326 info->ecc_size - 2);
1329 info->force_raw = false;
1334 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1335 struct nand_chip *chip, int page)
1337 /* Invalidate page cache */
1340 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1344 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1346 struct nand_chip *chip = mtd_to_nand(mtd);
1347 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1348 struct pxa3xx_nand_info *info = host->info_data;
1351 if (info->buf_start < info->buf_count)
1352 /* Has just send a new command? */
1353 retval = info->data_buff[info->buf_start++];
1358 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1360 struct nand_chip *chip = mtd_to_nand(mtd);
1361 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1362 struct pxa3xx_nand_info *info = host->info_data;
1363 u16 retval = 0xFFFF;
1365 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1366 retval = *((u16 *)(info->data_buff+info->buf_start));
1367 info->buf_start += 2;
1372 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1374 struct nand_chip *chip = mtd_to_nand(mtd);
1375 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1376 struct pxa3xx_nand_info *info = host->info_data;
1377 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1379 memcpy(buf, info->data_buff + info->buf_start, real_len);
1380 info->buf_start += real_len;
1383 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1384 const uint8_t *buf, int len)
1386 struct nand_chip *chip = mtd_to_nand(mtd);
1387 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1388 struct pxa3xx_nand_info *info = host->info_data;
1389 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1391 memcpy(info->data_buff + info->buf_start, buf, real_len);
1392 info->buf_start += real_len;
1395 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1400 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1402 struct nand_chip *chip = mtd_to_nand(mtd);
1403 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1404 struct pxa3xx_nand_info *info = host->info_data;
1406 if (info->need_wait) {
1409 info->need_wait = 0;
1415 status = nand_readl(info, NDSR);
1417 pxa3xx_nand_irq(info);
1419 if (info->dev_ready)
1422 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1423 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1424 return NAND_STATUS_FAIL;
1429 /* pxa3xx_nand_send_command has waited for command complete */
1430 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1431 if (info->retcode == ERR_NONE)
1434 return NAND_STATUS_FAIL;
1437 return NAND_STATUS_READY;
1440 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1442 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1444 /* Configure default flash values */
1445 info->reg_ndcr = 0x0; /* enable all interrupts */
1446 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1447 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1448 info->reg_ndcr |= NDCR_SPARE_EN;
1453 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1455 struct pxa3xx_nand_host *host = info->host[info->cs];
1456 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1457 struct nand_chip *chip = mtd_to_nand(mtd);
1459 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1460 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1461 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1464 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1466 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1467 uint32_t ndcr = nand_readl(info, NDCR);
1469 /* Set an initial chunk size */
1470 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1471 info->reg_ndcr = ndcr &
1472 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1473 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1474 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1475 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1478 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1480 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1481 if (info->data_buff == NULL)
1486 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1488 struct pxa3xx_nand_info *info = host->info_data;
1489 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1490 struct mtd_info *mtd;
1491 struct nand_chip *chip;
1492 const struct nand_sdr_timings *timings;
1495 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1496 chip = mtd_to_nand(mtd);
1498 /* configure default flash values */
1499 info->reg_ndcr = 0x0; /* enable all interrupts */
1500 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1501 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1502 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1504 /* use the common timing to make a try */
1505 timings = onfi_async_timing_mode_to_sdr_timings(0);
1506 if (IS_ERR(timings))
1507 return PTR_ERR(timings);
1509 pxa3xx_nand_set_sdr_timing(host, timings);
1511 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1512 ret = chip->waitfunc(mtd, chip);
1513 if (ret & NAND_STATUS_FAIL)
1519 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1520 struct nand_ecc_ctrl *ecc,
1521 int strength, int ecc_stepsize, int page_size)
1523 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1524 info->nfullchunks = 1;
1525 info->ntotalchunks = 1;
1526 info->chunk_size = 2048;
1527 info->spare_size = 40;
1528 info->ecc_size = 24;
1529 ecc->mode = NAND_ECC_HW;
1533 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1534 info->nfullchunks = 1;
1535 info->ntotalchunks = 1;
1536 info->chunk_size = 512;
1537 info->spare_size = 8;
1539 ecc->mode = NAND_ECC_HW;
1544 * Required ECC: 4-bit correction per 512 bytes
1545 * Select: 16-bit correction per 2048 bytes
1547 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1549 info->nfullchunks = 1;
1550 info->ntotalchunks = 1;
1551 info->chunk_size = 2048;
1552 info->spare_size = 32;
1553 info->ecc_size = 32;
1554 ecc->mode = NAND_ECC_HW;
1555 ecc->size = info->chunk_size;
1556 ecc->layout = &ecc_layout_2KB_bch4bit;
1559 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1561 info->nfullchunks = 2;
1562 info->ntotalchunks = 2;
1563 info->chunk_size = 2048;
1564 info->spare_size = 32;
1565 info->ecc_size = 32;
1566 ecc->mode = NAND_ECC_HW;
1567 ecc->size = info->chunk_size;
1568 ecc->layout = &ecc_layout_4KB_bch4bit;
1571 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1573 info->nfullchunks = 4;
1574 info->ntotalchunks = 4;
1575 info->chunk_size = 2048;
1576 info->spare_size = 32;
1577 info->ecc_size = 32;
1578 ecc->mode = NAND_ECC_HW;
1579 ecc->size = info->chunk_size;
1580 ecc->layout = &ecc_layout_8KB_bch4bit;
1584 * Required ECC: 8-bit correction per 512 bytes
1585 * Select: 16-bit correction per 1024 bytes
1587 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1589 info->nfullchunks = 1;
1590 info->ntotalchunks = 2;
1591 info->chunk_size = 1024;
1592 info->spare_size = 0;
1593 info->last_chunk_size = 1024;
1594 info->last_spare_size = 32;
1595 info->ecc_size = 32;
1596 ecc->mode = NAND_ECC_HW;
1597 ecc->size = info->chunk_size;
1598 ecc->layout = &ecc_layout_2KB_bch8bit;
1601 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1603 info->nfullchunks = 4;
1604 info->ntotalchunks = 5;
1605 info->chunk_size = 1024;
1606 info->spare_size = 0;
1607 info->last_chunk_size = 0;
1608 info->last_spare_size = 64;
1609 info->ecc_size = 32;
1610 ecc->mode = NAND_ECC_HW;
1611 ecc->size = info->chunk_size;
1612 ecc->layout = &ecc_layout_4KB_bch8bit;
1615 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1617 info->nfullchunks = 8;
1618 info->ntotalchunks = 9;
1619 info->chunk_size = 1024;
1620 info->spare_size = 0;
1621 info->last_chunk_size = 0;
1622 info->last_spare_size = 160;
1623 info->ecc_size = 32;
1624 ecc->mode = NAND_ECC_HW;
1625 ecc->size = info->chunk_size;
1626 ecc->layout = &ecc_layout_8KB_bch8bit;
1630 dev_err(&info->pdev->dev,
1631 "ECC strength %d at page size %d is not supported\n",
1632 strength, page_size);
1639 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1641 struct nand_chip *chip = mtd_to_nand(mtd);
1642 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1643 struct pxa3xx_nand_info *info = host->info_data;
1644 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1646 uint16_t ecc_strength, ecc_step;
1648 if (pdata->keep_config) {
1649 pxa3xx_nand_detect_config(info);
1651 ret = pxa3xx_nand_config_ident(info);
1654 ret = pxa3xx_nand_sensing(host);
1656 dev_info(&info->pdev->dev,
1657 "There is no chip on cs %d!\n",
1663 /* Device detection must be done with ECC disabled */
1664 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1665 nand_writel(info, NDECCCTRL, 0x0);
1667 if (nand_scan_ident(mtd, 1, NULL))
1670 if (!pdata->keep_config) {
1671 ret = pxa3xx_nand_init_timings(host);
1673 dev_err(&info->pdev->dev,
1674 "Failed to set timings: %d\n", ret);
1679 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1681 * We'll use a bad block table stored in-flash and don't
1682 * allow writing the bad block marker to the flash.
1684 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1685 chip->bbt_td = &bbt_main_descr;
1686 chip->bbt_md = &bbt_mirror_descr;
1689 if (pdata->ecc_strength && pdata->ecc_step_size) {
1690 ecc_strength = pdata->ecc_strength;
1691 ecc_step = pdata->ecc_step_size;
1693 ecc_strength = chip->ecc_strength_ds;
1694 ecc_step = chip->ecc_step_ds;
1697 /* Set default ECC strength requirements on non-ONFI devices */
1698 if (ecc_strength < 1 && ecc_step < 1) {
1703 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1704 ecc_step, mtd->writesize);
1709 * If the page size is bigger than the FIFO size, let's check
1710 * we are given the right variant and then switch to the extended
1711 * (aka split) command handling,
1713 if (mtd->writesize > info->chunk_size) {
1714 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1715 chip->cmdfunc = nand_cmdfunc_extended;
1717 dev_err(&info->pdev->dev,
1718 "unsupported page size on this variant\n");
1723 /* calculate addressing information */
1724 if (mtd->writesize >= 2048)
1725 host->col_addr_cycles = 2;
1727 host->col_addr_cycles = 1;
1729 /* release the initial buffer */
1730 kfree(info->data_buff);
1732 /* allocate the real data + oob buffer */
1733 info->buf_size = mtd->writesize + mtd->oobsize;
1734 ret = pxa3xx_nand_init_buff(info);
1737 info->oob_buff = info->data_buff + mtd->writesize;
1739 if ((mtd->size >> chip->page_shift) > 65536)
1740 host->row_addr_cycles = 3;
1742 host->row_addr_cycles = 2;
1744 if (!pdata->keep_config)
1745 pxa3xx_nand_config_tail(info);
1747 return nand_scan_tail(mtd);
1750 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1752 struct pxa3xx_nand_platform_data *pdata;
1753 struct pxa3xx_nand_host *host;
1754 struct nand_chip *chip = NULL;
1755 struct mtd_info *mtd;
1758 pdata = info->pdata;
1759 if (pdata->num_cs <= 0)
1762 info->variant = pxa3xx_nand_get_variant();
1763 for (cs = 0; cs < pdata->num_cs; cs++) {
1764 chip = (struct nand_chip *)
1765 ((u8 *)&info[1] + sizeof(*host) * cs);
1766 mtd = nand_to_mtd(chip);
1767 host = (struct pxa3xx_nand_host *)chip;
1768 info->host[cs] = host;
1770 host->info_data = info;
1771 mtd->owner = THIS_MODULE;
1773 nand_set_controller_data(chip, host);
1774 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1775 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1776 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1777 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1778 chip->controller = &info->controller;
1779 chip->waitfunc = pxa3xx_nand_waitfunc;
1780 chip->select_chip = pxa3xx_nand_select_chip;
1781 chip->read_word = pxa3xx_nand_read_word;
1782 chip->read_byte = pxa3xx_nand_read_byte;
1783 chip->read_buf = pxa3xx_nand_read_buf;
1784 chip->write_buf = pxa3xx_nand_write_buf;
1785 chip->options |= NAND_NO_SUBPAGE_WRITE;
1786 chip->cmdfunc = nand_cmdfunc;
1789 /* Allocate a buffer to allow flash detection */
1790 info->buf_size = INIT_BUFFER_SIZE;
1791 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1792 if (info->data_buff == NULL) {
1794 goto fail_disable_clk;
1797 /* initialize all interrupts to be disabled */
1798 disable_int(info, NDSR_MASK);
1802 kfree(info->data_buff);
1807 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1809 struct pxa3xx_nand_platform_data *pdata;
1810 const void *blob = gd->fdt_blob;
1813 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1817 /* Get address decoding nodes from the FDT blob */
1819 node = fdt_node_offset_by_compatible(blob, node,
1820 "marvell,mvebu-pxa3xx-nand");
1824 /* Bypass disabeld nodes */
1825 if (!fdtdec_get_is_enabled(blob, node))
1828 /* Get the first enabled NAND controler base address */
1830 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1831 blob, node, "reg", 0, NULL, true);
1833 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1834 if (pdata->num_cs != 1) {
1835 pr_err("pxa3xx driver supports single CS only\n");
1839 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1840 pdata->enable_arbiter = 1;
1842 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1843 pdata->keep_config = 1;
1847 * If these are not set, they will be selected according
1848 * to the detected flash type.
1851 pdata->ecc_strength = fdtdec_get_int(blob, node,
1852 "nand-ecc-strength", 0);
1855 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1856 "nand-ecc-step-size", 0);
1858 info->pdata = pdata;
1860 /* Currently support only a single NAND controller */
1863 } while (node >= 0);
1868 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1870 struct pxa3xx_nand_platform_data *pdata;
1871 int ret, cs, probe_success;
1873 ret = pxa3xx_nand_probe_dt(info);
1877 pdata = info->pdata;
1879 ret = alloc_nand_resource(info);
1881 dev_err(&pdev->dev, "alloc nand resource failed\n");
1886 for (cs = 0; cs < pdata->num_cs; cs++) {
1887 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1890 * The mtd name matches the one used in 'mtdparts' kernel
1891 * parameter. This name cannot be changed or otherwise
1892 * user's mtd partitions configuration would get broken.
1894 mtd->name = "pxa3xx_nand-0";
1896 ret = pxa3xx_nand_scan(mtd);
1898 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1903 if (nand_register(cs, mtd))
1916 * Main initialization routine
1918 void board_nand_init(void)
1920 struct pxa3xx_nand_info *info;
1921 struct pxa3xx_nand_host *host;
1924 info = kzalloc(sizeof(*info) +
1925 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1930 ret = pxa3xx_nand_probe(info);