1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/raw/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <dm/devres.h>
14 #include <linux/err.h>
15 #include <linux/errno.h>
17 #include <asm/arch/cpu.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/rawnand.h>
20 #include <linux/types.h>
22 #include "pxa3xx_nand.h"
24 DECLARE_GLOBAL_DATA_PTR;
26 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
27 #define CHIP_DELAY_TIMEOUT 200
28 #define NAND_STOP_DELAY 40
31 * Define a buffer size for the initial command that detects the flash device:
32 * STATUS, READID and PARAM.
33 * ONFI param page is 256 bytes, and there are three redundant copies
34 * to be read. JEDEC param page is 512 bytes, and there are also three
35 * redundant copies to be read.
36 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
38 #define INIT_BUFFER_SIZE 2048
40 /* registers and bit definitions */
41 #define NDCR (0x00) /* Control register */
42 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
43 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
44 #define NDSR (0x14) /* Status Register */
45 #define NDPCR (0x18) /* Page Count Register */
46 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
47 #define NDBDR1 (0x20) /* Bad Block Register 1 */
48 #define NDECCCTRL (0x28) /* ECC control */
49 #define NDDB (0x40) /* Data Buffer */
50 #define NDCB0 (0x48) /* Command Buffer0 */
51 #define NDCB1 (0x4C) /* Command Buffer1 */
52 #define NDCB2 (0x50) /* Command Buffer2 */
54 #define NDCR_SPARE_EN (0x1 << 31)
55 #define NDCR_ECC_EN (0x1 << 30)
56 #define NDCR_DMA_EN (0x1 << 29)
57 #define NDCR_ND_RUN (0x1 << 28)
58 #define NDCR_DWIDTH_C (0x1 << 27)
59 #define NDCR_DWIDTH_M (0x1 << 26)
60 #define NDCR_PAGE_SZ (0x1 << 24)
61 #define NDCR_NCSX (0x1 << 23)
62 #define NDCR_ND_MODE (0x3 << 21)
63 #define NDCR_NAND_MODE (0x0)
64 #define NDCR_CLR_PG_CNT (0x1 << 20)
65 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
66 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
67 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
69 #define NDCR_RA_START (0x1 << 15)
70 #define NDCR_PG_PER_BLK (0x1 << 14)
71 #define NDCR_ND_ARB_EN (0x1 << 12)
72 #define NDCR_INT_MASK (0xFFF)
74 #define NDSR_MASK (0xfff)
75 #define NDSR_ERR_CNT_OFF (16)
76 #define NDSR_ERR_CNT_MASK (0x1f)
77 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
78 #define NDSR_RDY (0x1 << 12)
79 #define NDSR_FLASH_RDY (0x1 << 11)
80 #define NDSR_CS0_PAGED (0x1 << 10)
81 #define NDSR_CS1_PAGED (0x1 << 9)
82 #define NDSR_CS0_CMDD (0x1 << 8)
83 #define NDSR_CS1_CMDD (0x1 << 7)
84 #define NDSR_CS0_BBD (0x1 << 6)
85 #define NDSR_CS1_BBD (0x1 << 5)
86 #define NDSR_UNCORERR (0x1 << 4)
87 #define NDSR_CORERR (0x1 << 3)
88 #define NDSR_WRDREQ (0x1 << 2)
89 #define NDSR_RDDREQ (0x1 << 1)
90 #define NDSR_WRCMDREQ (0x1)
92 #define NDCB0_LEN_OVRD (0x1 << 28)
93 #define NDCB0_ST_ROW_EN (0x1 << 26)
94 #define NDCB0_AUTO_RS (0x1 << 25)
95 #define NDCB0_CSEL (0x1 << 24)
96 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
97 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
98 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
99 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
100 #define NDCB0_NC (0x1 << 20)
101 #define NDCB0_DBC (0x1 << 19)
102 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
103 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
104 #define NDCB0_CMD2_MASK (0xff << 8)
105 #define NDCB0_CMD1_MASK (0xff)
106 #define NDCB0_ADDR_CYC_SHIFT (16)
108 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
109 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
110 #define EXT_CMD_TYPE_READ 4 /* Read */
111 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
112 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
113 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
114 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
117 * This should be large enough to read 'ONFI' and 'JEDEC'.
118 * Let's use 7 bytes, which is the maximum ID count supported
119 * by the controller (see NDCR_RD_ID_CNT_MASK).
121 #define READ_ID_BYTES 7
123 /* macros for registers read/write */
124 #define nand_writel(info, off, val) \
125 writel((val), (info)->mmio_base + (off))
127 #define nand_readl(info, off) \
128 readl((info)->mmio_base + (off))
130 /* error code and state */
153 enum pxa3xx_nand_variant {
154 PXA3XX_NAND_VARIANT_PXA,
155 PXA3XX_NAND_VARIANT_ARMADA370,
158 struct pxa3xx_nand_host {
159 struct nand_chip chip;
162 /* page size of attached chip */
166 /* calculated from pxa3xx_nand_flash data */
167 unsigned int col_addr_cycles;
168 unsigned int row_addr_cycles;
171 struct pxa3xx_nand_info {
172 struct nand_hw_control controller;
173 struct pxa3xx_nand_platform_data *pdata;
176 void __iomem *mmio_base;
177 unsigned long mmio_phys;
178 int cmd_complete, dev_ready;
180 unsigned int buf_start;
181 unsigned int buf_count;
182 unsigned int buf_size;
183 unsigned int data_buff_pos;
184 unsigned int oob_buff_pos;
186 unsigned char *data_buff;
187 unsigned char *oob_buff;
189 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
193 * This driver supports NFCv1 (as found in PXA SoC)
194 * and NFCv2 (as found in Armada 370/XP SoC).
196 enum pxa3xx_nand_variant variant;
199 int use_ecc; /* use HW ECC ? */
200 int force_raw; /* prevent use_ecc to be set */
201 int ecc_bch; /* using BCH ECC? */
202 int use_spare; /* use spare ? */
205 /* Amount of real data per full chunk */
206 unsigned int chunk_size;
208 /* Amount of spare data per full chunk */
209 unsigned int spare_size;
211 /* Number of full chunks (i.e chunk_size + spare_size) */
212 unsigned int nfullchunks;
215 * Total number of chunks. If equal to nfullchunks, then there
216 * are only full chunks. Otherwise, there is one last chunk of
217 * size (last_chunk_size + last_spare_size)
219 unsigned int ntotalchunks;
221 /* Amount of real data in the last chunk */
222 unsigned int last_chunk_size;
224 /* Amount of spare data in the last chunk */
225 unsigned int last_spare_size;
227 unsigned int ecc_size;
228 unsigned int ecc_err_cnt;
229 unsigned int max_bitflips;
233 * Variables only valid during command
234 * execution. step_chunk_size and step_spare_size is the
235 * amount of real data and spare data in the current
236 * chunk. cur_chunk is the current chunk being
239 unsigned int step_chunk_size;
240 unsigned int step_spare_size;
241 unsigned int cur_chunk;
243 /* cached register value */
248 /* generated NDCBx register values */
255 static struct pxa3xx_nand_timing timing[] = {
257 * tCH Enable signal hold time
258 * tCS Enable signal setup time
259 * tWH ND_nWE high duration
260 * tWP ND_nWE pulse time
261 * tRH ND_nRE high duration
262 * tRP ND_nRE pulse width
263 * tR ND_nWE high to ND_nRE low for read
264 * tWHR ND_nWE high to ND_nRE low for status read
265 * tAR ND_ALE low to ND_nRE low delay
267 /*ch cs wh wp rh rp r whr ar */
268 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
269 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
270 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
271 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
272 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
275 static struct pxa3xx_nand_flash builtin_flash_types[] = {
278 * flash_width Width of Flash memory (DWIDTH_M)
279 * dfc_width Width of flash controller(DWIDTH_C)
281 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
283 { 0x46ec, 16, 16, &timing[1] },
284 { 0xdaec, 8, 8, &timing[1] },
285 { 0xd7ec, 8, 8, &timing[1] },
286 { 0xa12c, 8, 8, &timing[2] },
287 { 0xb12c, 16, 16, &timing[2] },
288 { 0xdc2c, 8, 8, &timing[2] },
289 { 0xcc2c, 16, 16, &timing[2] },
290 { 0xba20, 16, 16, &timing[3] },
291 { 0xda98, 8, 8, &timing[4] },
294 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
295 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
296 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
298 static struct nand_bbt_descr bbt_main_descr = {
299 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
300 | NAND_BBT_2BIT | NAND_BBT_VERSION,
304 .maxblocks = 8, /* Last 8 blocks in each chip */
305 .pattern = bbt_pattern
308 static struct nand_bbt_descr bbt_mirror_descr = {
309 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
310 | NAND_BBT_2BIT | NAND_BBT_VERSION,
314 .maxblocks = 8, /* Last 8 blocks in each chip */
315 .pattern = bbt_mirror_pattern
319 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
322 32, 33, 34, 35, 36, 37, 38, 39,
323 40, 41, 42, 43, 44, 45, 46, 47,
324 48, 49, 50, 51, 52, 53, 54, 55,
325 56, 57, 58, 59, 60, 61, 62, 63},
326 .oobfree = { {2, 30} }
329 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
332 32, 33, 34, 35, 36, 37, 38, 39,
333 40, 41, 42, 43, 44, 45, 46, 47,
334 48, 49, 50, 51, 52, 53, 54, 55,
335 56, 57, 58, 59, 60, 61, 62, 63,
336 64, 65, 66, 67, 68, 69, 70, 71,
337 72, 73, 74, 75, 76, 77, 78, 79,
338 80, 81, 82, 83, 84, 85, 86, 87,
339 88, 89, 90, 91, 92, 93, 94, 95},
340 .oobfree = { {1, 4}, {6, 26} }
343 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
346 32, 33, 34, 35, 36, 37, 38, 39,
347 40, 41, 42, 43, 44, 45, 46, 47,
348 48, 49, 50, 51, 52, 53, 54, 55,
349 56, 57, 58, 59, 60, 61, 62, 63,
350 96, 97, 98, 99, 100, 101, 102, 103,
351 104, 105, 106, 107, 108, 109, 110, 111,
352 112, 113, 114, 115, 116, 117, 118, 119,
353 120, 121, 122, 123, 124, 125, 126, 127},
354 /* Bootrom looks in bytes 0 & 5 for bad blocks */
355 .oobfree = { {6, 26}, { 64, 32} }
358 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
361 32, 33, 34, 35, 36, 37, 38, 39,
362 40, 41, 42, 43, 44, 45, 46, 47,
363 48, 49, 50, 51, 52, 53, 54, 55,
364 56, 57, 58, 59, 60, 61, 62, 63,
366 96, 97, 98, 99, 100, 101, 102, 103,
367 104, 105, 106, 107, 108, 109, 110, 111,
368 112, 113, 114, 115, 116, 117, 118, 119,
369 120, 121, 122, 123, 124, 125, 126, 127,
371 160, 161, 162, 163, 164, 165, 166, 167,
372 168, 169, 170, 171, 172, 173, 174, 175,
373 176, 177, 178, 179, 180, 181, 182, 183,
374 184, 185, 186, 187, 188, 189, 190, 191,
376 224, 225, 226, 227, 228, 229, 230, 231,
377 232, 233, 234, 235, 236, 237, 238, 239,
378 240, 241, 242, 243, 244, 245, 246, 247,
379 248, 249, 250, 251, 252, 253, 254, 255},
381 /* Bootrom looks in bytes 0 & 5 for bad blocks */
382 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
385 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
388 32, 33, 34, 35, 36, 37, 38, 39,
389 40, 41, 42, 43, 44, 45, 46, 47,
390 48, 49, 50, 51, 52, 53, 54, 55,
391 56, 57, 58, 59, 60, 61, 62, 63},
395 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
398 /* HW ECC handles all ECC data and all spare area is free for OOB */
399 .oobfree = {{0, 160} }
402 #define NDTR0_tCH(c) (min((c), 7) << 19)
403 #define NDTR0_tCS(c) (min((c), 7) << 16)
404 #define NDTR0_tWH(c) (min((c), 7) << 11)
405 #define NDTR0_tWP(c) (min((c), 7) << 8)
406 #define NDTR0_tRH(c) (min((c), 7) << 3)
407 #define NDTR0_tRP(c) (min((c), 7) << 0)
409 #define NDTR1_tR(c) (min((c), 65535) << 16)
410 #define NDTR1_tWHR(c) (min((c), 15) << 4)
411 #define NDTR1_tAR(c) (min((c), 15) << 0)
413 /* convert nano-seconds to nand flash controller clock cycles */
414 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
416 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
418 /* We only support the Armada 370/XP/38x for now */
419 return PXA3XX_NAND_VARIANT_ARMADA370;
422 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
423 const struct pxa3xx_nand_timing *t)
425 struct pxa3xx_nand_info *info = host->info_data;
426 unsigned long nand_clk = mvebu_get_nand_clock();
427 uint32_t ndtr0, ndtr1;
429 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
430 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
431 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
432 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
433 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
434 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
436 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
437 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
438 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
440 info->ndtr0cs0 = ndtr0;
441 info->ndtr1cs0 = ndtr1;
442 nand_writel(info, NDTR0CS0, ndtr0);
443 nand_writel(info, NDTR1CS0, ndtr1);
446 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
447 const struct nand_sdr_timings *t)
449 struct pxa3xx_nand_info *info = host->info_data;
450 struct nand_chip *chip = &host->chip;
451 unsigned long nand_clk = mvebu_get_nand_clock();
452 uint32_t ndtr0, ndtr1;
454 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
455 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
456 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
457 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
458 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
459 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
460 u32 tR = chip->chip_delay * 1000;
461 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
462 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
464 /* fallback to a default value if tR = 0 */
468 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
469 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
470 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
471 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
472 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
473 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
475 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
476 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
477 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
479 info->ndtr0cs0 = ndtr0;
480 info->ndtr1cs0 = ndtr1;
481 nand_writel(info, NDTR0CS0, ndtr0);
482 nand_writel(info, NDTR1CS0, ndtr1);
485 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
487 const struct nand_sdr_timings *timings;
488 struct nand_chip *chip = &host->chip;
489 struct pxa3xx_nand_info *info = host->info_data;
490 const struct pxa3xx_nand_flash *f = NULL;
491 struct mtd_info *mtd = nand_to_mtd(&host->chip);
492 int mode, id, ntypes, i;
494 mode = onfi_get_async_timing_mode(chip);
495 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
496 ntypes = ARRAY_SIZE(builtin_flash_types);
498 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
500 id = chip->read_byte(mtd);
501 id |= chip->read_byte(mtd) << 0x8;
503 for (i = 0; i < ntypes; i++) {
504 f = &builtin_flash_types[i];
506 if (f->chip_id == id)
511 dev_err(&info->pdev->dev, "Error: timings not found\n");
515 pxa3xx_nand_set_timing(host, f->timing);
517 if (f->flash_width == 16) {
518 info->reg_ndcr |= NDCR_DWIDTH_M;
519 chip->options |= NAND_BUSWIDTH_16;
522 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
524 mode = fls(mode) - 1;
528 timings = onfi_async_timing_mode_to_sdr_timings(mode);
530 return PTR_ERR(timings);
532 pxa3xx_nand_set_sdr_timing(host, timings);
539 * NOTE: it is a must to set ND_RUN first, then write
540 * command buffer, otherwise, it does not work.
541 * We enable all the interrupt at the same time, and
542 * let pxa3xx_nand_irq to handle all logic.
544 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
548 ndcr = info->reg_ndcr;
553 nand_writel(info, NDECCCTRL, 0x1);
555 ndcr &= ~NDCR_ECC_EN;
557 nand_writel(info, NDECCCTRL, 0x0);
560 ndcr &= ~NDCR_DMA_EN;
563 ndcr |= NDCR_SPARE_EN;
565 ndcr &= ~NDCR_SPARE_EN;
569 /* clear status bits and run */
570 nand_writel(info, NDSR, NDSR_MASK);
571 nand_writel(info, NDCR, 0);
572 nand_writel(info, NDCR, ndcr);
575 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
579 ndcr = nand_readl(info, NDCR);
580 nand_writel(info, NDCR, ndcr | int_mask);
583 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
585 if (info->ecc_bch && !info->force_raw) {
589 * According to the datasheet, when reading from NDDB
590 * with BCH enabled, after each 32 bytes reads, we
591 * have to make sure that the NDSR.RDDREQ bit is set.
593 * Drain the FIFO 8 32 bits reads at a time, and skip
594 * the polling on the last read.
597 readsl(info->mmio_base + NDDB, data, 8);
600 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
601 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
602 dev_err(&info->pdev->dev,
603 "Timeout on RDDREQ while draining the FIFO\n");
613 readsl(info->mmio_base + NDDB, data, len);
616 static void handle_data_pio(struct pxa3xx_nand_info *info)
618 int data_len = info->step_chunk_size;
621 * In raw mode, include the spare area and the ECC bytes that are not
622 * consumed by the controller in the data section. Do not reorganize
623 * here, do it in the ->read_page_raw() handler instead.
626 data_len += info->step_spare_size + info->ecc_size;
628 switch (info->state) {
629 case STATE_PIO_WRITING:
630 if (info->step_chunk_size)
631 writesl(info->mmio_base + NDDB,
632 info->data_buff + info->data_buff_pos,
633 DIV_ROUND_UP(data_len, 4));
635 if (info->step_spare_size)
636 writesl(info->mmio_base + NDDB,
637 info->oob_buff + info->oob_buff_pos,
638 DIV_ROUND_UP(info->step_spare_size, 4));
640 case STATE_PIO_READING:
641 if (info->step_chunk_size)
643 info->data_buff + info->data_buff_pos,
644 DIV_ROUND_UP(data_len, 4));
649 if (info->step_spare_size)
651 info->oob_buff + info->oob_buff_pos,
652 DIV_ROUND_UP(info->step_spare_size, 4));
655 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
660 /* Update buffer pointers for multi-page read/write */
661 info->data_buff_pos += data_len;
662 info->oob_buff_pos += info->step_spare_size;
665 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
667 handle_data_pio(info);
669 info->state = STATE_CMD_DONE;
670 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
673 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
675 unsigned int status, is_completed = 0, is_ready = 0;
676 unsigned int ready, cmd_done;
677 irqreturn_t ret = IRQ_HANDLED;
680 ready = NDSR_FLASH_RDY;
681 cmd_done = NDSR_CS0_CMDD;
684 cmd_done = NDSR_CS1_CMDD;
687 /* TODO - find out why we need the delay during write operation. */
690 status = nand_readl(info, NDSR);
692 if (status & NDSR_UNCORERR)
693 info->retcode = ERR_UNCORERR;
694 if (status & NDSR_CORERR) {
695 info->retcode = ERR_CORERR;
696 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
698 info->ecc_err_cnt = NDSR_ERR_CNT(status);
700 info->ecc_err_cnt = 1;
703 * Each chunk composing a page is corrected independently,
704 * and we need to store maximum number of corrected bitflips
705 * to return it to the MTD layer in ecc.read_page().
707 info->max_bitflips = max_t(unsigned int,
711 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
712 info->state = (status & NDSR_RDDREQ) ?
713 STATE_PIO_READING : STATE_PIO_WRITING;
714 /* Call the IRQ thread in U-Boot directly */
715 pxa3xx_nand_irq_thread(info);
718 if (status & cmd_done) {
719 info->state = STATE_CMD_DONE;
722 if (status & ready) {
723 info->state = STATE_READY;
728 * Clear all status bit before issuing the next command, which
729 * can and will alter the status bits and will deserve a new
730 * interrupt on its own. This lets the controller exit the IRQ
732 nand_writel(info, NDSR, status);
734 if (status & NDSR_WRCMDREQ) {
735 status &= ~NDSR_WRCMDREQ;
736 info->state = STATE_CMD_HANDLE;
739 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
740 * must be loaded by writing directly either 12 or 16
741 * bytes directly to NDCB0, four bytes at a time.
743 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
744 * but each NDCBx register can be read.
746 nand_writel(info, NDCB0, info->ndcb0);
747 nand_writel(info, NDCB0, info->ndcb1);
748 nand_writel(info, NDCB0, info->ndcb2);
750 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
751 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
752 nand_writel(info, NDCB0, info->ndcb3);
756 info->cmd_complete = 1;
763 static inline int is_buf_blank(uint8_t *buf, size_t len)
765 for (; len > 0; len--)
771 static void set_command_address(struct pxa3xx_nand_info *info,
772 unsigned int page_size, uint16_t column, int page_addr)
774 /* small page addr setting */
775 if (page_size < info->chunk_size) {
776 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
781 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
784 if (page_addr & 0xFF0000)
785 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
791 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
793 struct pxa3xx_nand_host *host = info->host[info->cs];
794 struct mtd_info *mtd = nand_to_mtd(&host->chip);
796 /* reset data and oob column point to handle data */
799 info->data_buff_pos = 0;
800 info->oob_buff_pos = 0;
801 info->step_chunk_size = 0;
802 info->step_spare_size = 0;
806 info->retcode = ERR_NONE;
807 info->ecc_err_cnt = 0;
813 case NAND_CMD_READOOB:
814 case NAND_CMD_PAGEPROG:
815 if (!info->force_raw)
828 * If we are about to issue a read command, or about to set
829 * the write address, then clean the data buffer.
831 if (command == NAND_CMD_READ0 ||
832 command == NAND_CMD_READOOB ||
833 command == NAND_CMD_SEQIN) {
834 info->buf_count = mtd->writesize + mtd->oobsize;
835 memset(info->data_buff, 0xFF, info->buf_count);
839 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
840 int ext_cmd_type, uint16_t column, int page_addr)
842 int addr_cycle, exec_cmd;
843 struct pxa3xx_nand_host *host;
844 struct mtd_info *mtd;
846 host = info->host[info->cs];
847 mtd = nand_to_mtd(&host->chip);
852 info->ndcb0 = NDCB0_CSEL;
856 if (command == NAND_CMD_SEQIN)
859 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
860 + host->col_addr_cycles);
863 case NAND_CMD_READOOB:
865 info->buf_start = column;
866 info->ndcb0 |= NDCB0_CMD_TYPE(0)
870 if (command == NAND_CMD_READOOB)
871 info->buf_start += mtd->writesize;
873 if (info->cur_chunk < info->nfullchunks) {
874 info->step_chunk_size = info->chunk_size;
875 info->step_spare_size = info->spare_size;
877 info->step_chunk_size = info->last_chunk_size;
878 info->step_spare_size = info->last_spare_size;
882 * Multiple page read needs an 'extended command type' field,
883 * which is either naked-read or last-read according to the
886 if (info->force_raw) {
887 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
889 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
890 info->ndcb3 = info->step_chunk_size +
891 info->step_spare_size + info->ecc_size;
892 } else if (mtd->writesize == info->chunk_size) {
893 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
894 } else if (mtd->writesize > info->chunk_size) {
895 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
897 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
898 info->ndcb3 = info->step_chunk_size +
899 info->step_spare_size;
902 set_command_address(info, mtd->writesize, column, page_addr);
907 info->buf_start = column;
908 set_command_address(info, mtd->writesize, 0, page_addr);
911 * Multiple page programming needs to execute the initial
912 * SEQIN command that sets the page address.
914 if (mtd->writesize > info->chunk_size) {
915 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
916 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
923 case NAND_CMD_PAGEPROG:
924 if (is_buf_blank(info->data_buff,
925 (mtd->writesize + mtd->oobsize))) {
930 if (info->cur_chunk < info->nfullchunks) {
931 info->step_chunk_size = info->chunk_size;
932 info->step_spare_size = info->spare_size;
934 info->step_chunk_size = info->last_chunk_size;
935 info->step_spare_size = info->last_spare_size;
938 /* Second command setting for large pages */
939 if (mtd->writesize > info->chunk_size) {
941 * Multiple page write uses the 'extended command'
942 * field. This can be used to issue a command dispatch
943 * or a naked-write depending on the current stage.
945 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
947 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
948 info->ndcb3 = info->step_chunk_size +
949 info->step_spare_size;
952 * This is the command dispatch that completes a chunked
953 * page program operation.
955 if (info->cur_chunk == info->ntotalchunks) {
956 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
957 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
964 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
968 | (NAND_CMD_PAGEPROG << 8)
975 info->buf_count = INIT_BUFFER_SIZE;
976 info->ndcb0 |= NDCB0_CMD_TYPE(0)
980 info->ndcb1 = (column & 0xFF);
981 info->ndcb3 = INIT_BUFFER_SIZE;
982 info->step_chunk_size = INIT_BUFFER_SIZE;
985 case NAND_CMD_READID:
986 info->buf_count = READ_ID_BYTES;
987 info->ndcb0 |= NDCB0_CMD_TYPE(3)
990 info->ndcb1 = (column & 0xFF);
992 info->step_chunk_size = 8;
994 case NAND_CMD_STATUS:
996 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1000 info->step_chunk_size = 8;
1003 case NAND_CMD_ERASE1:
1004 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1008 | (NAND_CMD_ERASE2 << 8)
1010 info->ndcb1 = page_addr;
1014 case NAND_CMD_RESET:
1015 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1020 case NAND_CMD_ERASE2:
1026 dev_err(&info->pdev->dev, "non-supported command %x\n",
1034 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1035 int column, int page_addr)
1037 struct nand_chip *chip = mtd_to_nand(mtd);
1038 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1039 struct pxa3xx_nand_info *info = host->info_data;
1043 * if this is a x16 device ,then convert the input
1044 * "byte" address into a "word" address appropriate
1045 * for indexing a word-oriented device
1047 if (info->reg_ndcr & NDCR_DWIDTH_M)
1051 * There may be different NAND chip hooked to
1052 * different chip select, so check whether
1053 * chip select has been changed, if yes, reset the timing
1055 if (info->cs != host->cs) {
1056 info->cs = host->cs;
1057 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1058 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1061 prepare_start_command(info, command);
1063 info->state = STATE_PREPARED;
1064 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1069 info->cmd_complete = 0;
1070 info->dev_ready = 0;
1071 info->need_wait = 1;
1072 pxa3xx_nand_start(info);
1078 status = nand_readl(info, NDSR);
1080 pxa3xx_nand_irq(info);
1082 if (info->cmd_complete)
1085 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1086 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1091 info->state = STATE_IDLE;
1094 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1095 const unsigned command,
1096 int column, int page_addr)
1098 struct nand_chip *chip = mtd_to_nand(mtd);
1099 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1100 struct pxa3xx_nand_info *info = host->info_data;
1101 int exec_cmd, ext_cmd_type;
1104 * if this is a x16 device then convert the input
1105 * "byte" address into a "word" address appropriate
1106 * for indexing a word-oriented device
1108 if (info->reg_ndcr & NDCR_DWIDTH_M)
1112 * There may be different NAND chip hooked to
1113 * different chip select, so check whether
1114 * chip select has been changed, if yes, reset the timing
1116 if (info->cs != host->cs) {
1117 info->cs = host->cs;
1118 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1119 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1122 /* Select the extended command for the first command */
1124 case NAND_CMD_READ0:
1125 case NAND_CMD_READOOB:
1126 ext_cmd_type = EXT_CMD_TYPE_MONO;
1128 case NAND_CMD_SEQIN:
1129 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1131 case NAND_CMD_PAGEPROG:
1132 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1139 prepare_start_command(info, command);
1142 * Prepare the "is ready" completion before starting a command
1143 * transaction sequence. If the command is not executed the
1144 * completion will be completed, see below.
1146 * We can do that inside the loop because the command variable
1147 * is invariant and thus so is the exec_cmd.
1149 info->need_wait = 1;
1150 info->dev_ready = 0;
1155 info->state = STATE_PREPARED;
1156 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1159 info->need_wait = 0;
1160 info->dev_ready = 1;
1164 info->cmd_complete = 0;
1165 pxa3xx_nand_start(info);
1171 status = nand_readl(info, NDSR);
1173 pxa3xx_nand_irq(info);
1175 if (info->cmd_complete)
1178 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1179 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1184 /* Only a few commands need several steps */
1185 if (command != NAND_CMD_PAGEPROG &&
1186 command != NAND_CMD_READ0 &&
1187 command != NAND_CMD_READOOB)
1192 /* Check if the sequence is complete */
1193 if (info->cur_chunk == info->ntotalchunks &&
1194 command != NAND_CMD_PAGEPROG)
1198 * After a splitted program command sequence has issued
1199 * the command dispatch, the command sequence is complete.
1201 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1202 command == NAND_CMD_PAGEPROG &&
1203 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1206 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1207 /* Last read: issue a 'last naked read' */
1208 if (info->cur_chunk == info->ntotalchunks - 1)
1209 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1211 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1214 * If a splitted program command has no more data to transfer,
1215 * the command dispatch must be issued to complete.
1217 } else if (command == NAND_CMD_PAGEPROG &&
1218 info->cur_chunk == info->ntotalchunks) {
1219 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1223 info->state = STATE_IDLE;
1226 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1227 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1230 chip->write_buf(mtd, buf, mtd->writesize);
1231 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1236 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1237 struct nand_chip *chip, uint8_t *buf, int oob_required,
1240 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1241 struct pxa3xx_nand_info *info = host->info_data;
1244 chip->read_buf(mtd, buf, mtd->writesize);
1245 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1247 if (info->retcode == ERR_CORERR && info->use_ecc) {
1248 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1250 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1252 * Empty pages will trigger uncorrectable errors. Re-read the
1253 * entire page in raw mode and check for bits not being "1".
1254 * If there are more than the supported strength, then it means
1255 * this is an actual uncorrectable error.
1257 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1258 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1259 chip->oob_poi, mtd->oobsize,
1260 NULL, 0, chip->ecc.strength);
1262 mtd->ecc_stats.failed++;
1264 mtd->ecc_stats.corrected += bf;
1265 info->max_bitflips = max_t(unsigned int,
1266 info->max_bitflips, bf);
1267 info->retcode = ERR_CORERR;
1269 info->retcode = ERR_NONE;
1272 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1273 /* Raw read is not supported with Hamming ECC engine */
1274 if (is_buf_blank(buf, mtd->writesize))
1275 info->retcode = ERR_NONE;
1277 mtd->ecc_stats.failed++;
1280 return info->max_bitflips;
1283 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1284 struct nand_chip *chip, uint8_t *buf,
1285 int oob_required, int page)
1287 struct pxa3xx_nand_host *host = chip->priv;
1288 struct pxa3xx_nand_info *info = host->info_data;
1289 int chunk, ecc_off_buf;
1295 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1296 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1298 info->force_raw = true;
1299 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1301 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1302 info->last_spare_size;
1303 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1305 buf + (chunk * info->chunk_size),
1309 (chunk * (info->spare_size)),
1312 chip->oob_poi + ecc_off_buf +
1313 (chunk * (info->ecc_size)),
1314 info->ecc_size - 2);
1317 if (info->ntotalchunks > info->nfullchunks) {
1319 buf + (info->nfullchunks * info->chunk_size),
1320 info->last_chunk_size);
1323 (info->nfullchunks * (info->spare_size)),
1324 info->last_spare_size);
1326 chip->oob_poi + ecc_off_buf +
1327 (info->nfullchunks * (info->ecc_size)),
1328 info->ecc_size - 2);
1331 info->force_raw = false;
1336 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1337 struct nand_chip *chip, int page)
1339 /* Invalidate page cache */
1342 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1346 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1348 struct nand_chip *chip = mtd_to_nand(mtd);
1349 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1350 struct pxa3xx_nand_info *info = host->info_data;
1353 if (info->buf_start < info->buf_count)
1354 /* Has just send a new command? */
1355 retval = info->data_buff[info->buf_start++];
1360 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1362 struct nand_chip *chip = mtd_to_nand(mtd);
1363 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1364 struct pxa3xx_nand_info *info = host->info_data;
1365 u16 retval = 0xFFFF;
1367 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1368 retval = *((u16 *)(info->data_buff+info->buf_start));
1369 info->buf_start += 2;
1374 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1376 struct nand_chip *chip = mtd_to_nand(mtd);
1377 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1378 struct pxa3xx_nand_info *info = host->info_data;
1379 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1381 memcpy(buf, info->data_buff + info->buf_start, real_len);
1382 info->buf_start += real_len;
1385 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1386 const uint8_t *buf, int len)
1388 struct nand_chip *chip = mtd_to_nand(mtd);
1389 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1390 struct pxa3xx_nand_info *info = host->info_data;
1391 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1393 memcpy(info->data_buff + info->buf_start, buf, real_len);
1394 info->buf_start += real_len;
1397 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1402 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1404 struct nand_chip *chip = mtd_to_nand(mtd);
1405 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1406 struct pxa3xx_nand_info *info = host->info_data;
1408 if (info->need_wait) {
1411 info->need_wait = 0;
1417 status = nand_readl(info, NDSR);
1419 pxa3xx_nand_irq(info);
1421 if (info->dev_ready)
1424 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1425 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1426 return NAND_STATUS_FAIL;
1431 /* pxa3xx_nand_send_command has waited for command complete */
1432 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1433 if (info->retcode == ERR_NONE)
1436 return NAND_STATUS_FAIL;
1439 return NAND_STATUS_READY;
1442 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1444 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1446 /* Configure default flash values */
1447 info->reg_ndcr = 0x0; /* enable all interrupts */
1448 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1449 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1450 info->reg_ndcr |= NDCR_SPARE_EN;
1455 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1457 struct pxa3xx_nand_host *host = info->host[info->cs];
1458 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1459 struct nand_chip *chip = mtd_to_nand(mtd);
1461 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1462 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1463 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1466 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1468 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1469 uint32_t ndcr = nand_readl(info, NDCR);
1471 /* Set an initial chunk size */
1472 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1473 info->reg_ndcr = ndcr &
1474 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1475 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1476 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1477 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1480 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1482 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1483 if (info->data_buff == NULL)
1488 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1490 struct pxa3xx_nand_info *info = host->info_data;
1491 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1492 struct mtd_info *mtd;
1493 struct nand_chip *chip;
1494 const struct nand_sdr_timings *timings;
1497 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1498 chip = mtd_to_nand(mtd);
1500 /* configure default flash values */
1501 info->reg_ndcr = 0x0; /* enable all interrupts */
1502 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1503 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1504 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1506 /* use the common timing to make a try */
1507 timings = onfi_async_timing_mode_to_sdr_timings(0);
1508 if (IS_ERR(timings))
1509 return PTR_ERR(timings);
1511 pxa3xx_nand_set_sdr_timing(host, timings);
1513 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1514 ret = chip->waitfunc(mtd, chip);
1515 if (ret & NAND_STATUS_FAIL)
1521 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1522 struct nand_ecc_ctrl *ecc,
1523 int strength, int ecc_stepsize, int page_size)
1525 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1526 info->nfullchunks = 1;
1527 info->ntotalchunks = 1;
1528 info->chunk_size = 2048;
1529 info->spare_size = 40;
1530 info->ecc_size = 24;
1531 ecc->mode = NAND_ECC_HW;
1535 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1536 info->nfullchunks = 1;
1537 info->ntotalchunks = 1;
1538 info->chunk_size = 512;
1539 info->spare_size = 8;
1541 ecc->mode = NAND_ECC_HW;
1546 * Required ECC: 4-bit correction per 512 bytes
1547 * Select: 16-bit correction per 2048 bytes
1549 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1551 info->nfullchunks = 1;
1552 info->ntotalchunks = 1;
1553 info->chunk_size = 2048;
1554 info->spare_size = 32;
1555 info->ecc_size = 32;
1556 ecc->mode = NAND_ECC_HW;
1557 ecc->size = info->chunk_size;
1558 ecc->layout = &ecc_layout_2KB_bch4bit;
1561 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1563 info->nfullchunks = 2;
1564 info->ntotalchunks = 2;
1565 info->chunk_size = 2048;
1566 info->spare_size = 32;
1567 info->ecc_size = 32;
1568 ecc->mode = NAND_ECC_HW;
1569 ecc->size = info->chunk_size;
1570 ecc->layout = &ecc_layout_4KB_bch4bit;
1573 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1575 info->nfullchunks = 4;
1576 info->ntotalchunks = 4;
1577 info->chunk_size = 2048;
1578 info->spare_size = 32;
1579 info->ecc_size = 32;
1580 ecc->mode = NAND_ECC_HW;
1581 ecc->size = info->chunk_size;
1582 ecc->layout = &ecc_layout_8KB_bch4bit;
1586 * Required ECC: 8-bit correction per 512 bytes
1587 * Select: 16-bit correction per 1024 bytes
1589 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1591 info->nfullchunks = 1;
1592 info->ntotalchunks = 2;
1593 info->chunk_size = 1024;
1594 info->spare_size = 0;
1595 info->last_chunk_size = 1024;
1596 info->last_spare_size = 32;
1597 info->ecc_size = 32;
1598 ecc->mode = NAND_ECC_HW;
1599 ecc->size = info->chunk_size;
1600 ecc->layout = &ecc_layout_2KB_bch8bit;
1603 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1605 info->nfullchunks = 4;
1606 info->ntotalchunks = 5;
1607 info->chunk_size = 1024;
1608 info->spare_size = 0;
1609 info->last_chunk_size = 0;
1610 info->last_spare_size = 64;
1611 info->ecc_size = 32;
1612 ecc->mode = NAND_ECC_HW;
1613 ecc->size = info->chunk_size;
1614 ecc->layout = &ecc_layout_4KB_bch8bit;
1617 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1619 info->nfullchunks = 8;
1620 info->ntotalchunks = 9;
1621 info->chunk_size = 1024;
1622 info->spare_size = 0;
1623 info->last_chunk_size = 0;
1624 info->last_spare_size = 160;
1625 info->ecc_size = 32;
1626 ecc->mode = NAND_ECC_HW;
1627 ecc->size = info->chunk_size;
1628 ecc->layout = &ecc_layout_8KB_bch8bit;
1632 dev_err(&info->pdev->dev,
1633 "ECC strength %d at page size %d is not supported\n",
1634 strength, page_size);
1641 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1643 struct nand_chip *chip = mtd_to_nand(mtd);
1644 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1645 struct pxa3xx_nand_info *info = host->info_data;
1646 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1648 uint16_t ecc_strength, ecc_step;
1650 if (pdata->keep_config) {
1651 pxa3xx_nand_detect_config(info);
1653 ret = pxa3xx_nand_config_ident(info);
1656 ret = pxa3xx_nand_sensing(host);
1658 dev_info(&info->pdev->dev,
1659 "There is no chip on cs %d!\n",
1665 /* Device detection must be done with ECC disabled */
1666 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1667 nand_writel(info, NDECCCTRL, 0x0);
1669 if (nand_scan_ident(mtd, 1, NULL))
1672 if (!pdata->keep_config) {
1673 ret = pxa3xx_nand_init_timings(host);
1675 dev_err(&info->pdev->dev,
1676 "Failed to set timings: %d\n", ret);
1681 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1683 * We'll use a bad block table stored in-flash and don't
1684 * allow writing the bad block marker to the flash.
1686 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1687 chip->bbt_td = &bbt_main_descr;
1688 chip->bbt_md = &bbt_mirror_descr;
1691 if (pdata->ecc_strength && pdata->ecc_step_size) {
1692 ecc_strength = pdata->ecc_strength;
1693 ecc_step = pdata->ecc_step_size;
1695 ecc_strength = chip->ecc_strength_ds;
1696 ecc_step = chip->ecc_step_ds;
1699 /* Set default ECC strength requirements on non-ONFI devices */
1700 if (ecc_strength < 1 && ecc_step < 1) {
1705 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1706 ecc_step, mtd->writesize);
1711 * If the page size is bigger than the FIFO size, let's check
1712 * we are given the right variant and then switch to the extended
1713 * (aka split) command handling,
1715 if (mtd->writesize > info->chunk_size) {
1716 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1717 chip->cmdfunc = nand_cmdfunc_extended;
1719 dev_err(&info->pdev->dev,
1720 "unsupported page size on this variant\n");
1725 /* calculate addressing information */
1726 if (mtd->writesize >= 2048)
1727 host->col_addr_cycles = 2;
1729 host->col_addr_cycles = 1;
1731 /* release the initial buffer */
1732 kfree(info->data_buff);
1734 /* allocate the real data + oob buffer */
1735 info->buf_size = mtd->writesize + mtd->oobsize;
1736 ret = pxa3xx_nand_init_buff(info);
1739 info->oob_buff = info->data_buff + mtd->writesize;
1741 if ((mtd->size >> chip->page_shift) > 65536)
1742 host->row_addr_cycles = 3;
1744 host->row_addr_cycles = 2;
1746 if (!pdata->keep_config)
1747 pxa3xx_nand_config_tail(info);
1749 return nand_scan_tail(mtd);
1752 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1754 struct pxa3xx_nand_platform_data *pdata;
1755 struct pxa3xx_nand_host *host;
1756 struct nand_chip *chip = NULL;
1757 struct mtd_info *mtd;
1760 pdata = info->pdata;
1761 if (pdata->num_cs <= 0)
1764 info->variant = pxa3xx_nand_get_variant();
1765 for (cs = 0; cs < pdata->num_cs; cs++) {
1766 chip = (struct nand_chip *)
1767 ((u8 *)&info[1] + sizeof(*host) * cs);
1768 mtd = nand_to_mtd(chip);
1769 host = (struct pxa3xx_nand_host *)chip;
1770 info->host[cs] = host;
1772 host->info_data = info;
1773 mtd->owner = THIS_MODULE;
1775 nand_set_controller_data(chip, host);
1776 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1777 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1778 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1779 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1780 chip->controller = &info->controller;
1781 chip->waitfunc = pxa3xx_nand_waitfunc;
1782 chip->select_chip = pxa3xx_nand_select_chip;
1783 chip->read_word = pxa3xx_nand_read_word;
1784 chip->read_byte = pxa3xx_nand_read_byte;
1785 chip->read_buf = pxa3xx_nand_read_buf;
1786 chip->write_buf = pxa3xx_nand_write_buf;
1787 chip->options |= NAND_NO_SUBPAGE_WRITE;
1788 chip->cmdfunc = nand_cmdfunc;
1791 /* Allocate a buffer to allow flash detection */
1792 info->buf_size = INIT_BUFFER_SIZE;
1793 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1794 if (info->data_buff == NULL) {
1796 goto fail_disable_clk;
1799 /* initialize all interrupts to be disabled */
1800 disable_int(info, NDSR_MASK);
1804 kfree(info->data_buff);
1809 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1811 struct pxa3xx_nand_platform_data *pdata;
1812 const void *blob = gd->fdt_blob;
1815 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1819 /* Get address decoding nodes from the FDT blob */
1821 node = fdt_node_offset_by_compatible(blob, node,
1822 "marvell,mvebu-pxa3xx-nand");
1826 /* Bypass disabeld nodes */
1827 if (!fdtdec_get_is_enabled(blob, node))
1830 /* Get the first enabled NAND controler base address */
1832 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1833 blob, node, "reg", 0, NULL, true);
1835 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1836 if (pdata->num_cs != 1) {
1837 pr_err("pxa3xx driver supports single CS only\n");
1841 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1842 pdata->enable_arbiter = 1;
1844 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1845 pdata->keep_config = 1;
1849 * If these are not set, they will be selected according
1850 * to the detected flash type.
1853 pdata->ecc_strength = fdtdec_get_int(blob, node,
1854 "nand-ecc-strength", 0);
1857 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1858 "nand-ecc-step-size", 0);
1860 info->pdata = pdata;
1862 /* Currently support only a single NAND controller */
1865 } while (node >= 0);
1870 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1872 struct pxa3xx_nand_platform_data *pdata;
1873 int ret, cs, probe_success;
1875 ret = pxa3xx_nand_probe_dt(info);
1879 pdata = info->pdata;
1881 ret = alloc_nand_resource(info);
1883 dev_err(&pdev->dev, "alloc nand resource failed\n");
1888 for (cs = 0; cs < pdata->num_cs; cs++) {
1889 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1892 * The mtd name matches the one used in 'mtdparts' kernel
1893 * parameter. This name cannot be changed or otherwise
1894 * user's mtd partitions configuration would get broken.
1896 mtd->name = "pxa3xx_nand-0";
1898 ret = pxa3xx_nand_scan(mtd);
1900 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1905 if (nand_register(cs, mtd))
1918 * Main initialization routine
1920 void board_nand_init(void)
1922 struct pxa3xx_nand_info *info;
1923 struct pxa3xx_nand_host *host;
1926 info = kzalloc(sizeof(*info) +
1927 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1932 ret = pxa3xx_nand_probe(info);