1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/mtd/nand/raw/pxa3xx_nand.c
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/bug.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/errno.h>
20 #include <asm/arch/cpu.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/rawnand.h>
23 #include <linux/types.h>
25 #include "pxa3xx_nand.h"
27 DECLARE_GLOBAL_DATA_PTR;
29 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
30 #define CHIP_DELAY_TIMEOUT 200
31 #define NAND_STOP_DELAY 40
34 * Define a buffer size for the initial command that detects the flash device:
35 * STATUS, READID and PARAM.
36 * ONFI param page is 256 bytes, and there are three redundant copies
37 * to be read. JEDEC param page is 512 bytes, and there are also three
38 * redundant copies to be read.
39 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
41 #define INIT_BUFFER_SIZE 2048
43 /* registers and bit definitions */
44 #define NDCR (0x00) /* Control register */
45 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
46 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
47 #define NDSR (0x14) /* Status Register */
48 #define NDPCR (0x18) /* Page Count Register */
49 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
50 #define NDBDR1 (0x20) /* Bad Block Register 1 */
51 #define NDECCCTRL (0x28) /* ECC control */
52 #define NDDB (0x40) /* Data Buffer */
53 #define NDCB0 (0x48) /* Command Buffer0 */
54 #define NDCB1 (0x4C) /* Command Buffer1 */
55 #define NDCB2 (0x50) /* Command Buffer2 */
57 #define NDCR_SPARE_EN (0x1 << 31)
58 #define NDCR_ECC_EN (0x1 << 30)
59 #define NDCR_DMA_EN (0x1 << 29)
60 #define NDCR_ND_RUN (0x1 << 28)
61 #define NDCR_DWIDTH_C (0x1 << 27)
62 #define NDCR_DWIDTH_M (0x1 << 26)
63 #define NDCR_PAGE_SZ (0x1 << 24)
64 #define NDCR_NCSX (0x1 << 23)
65 #define NDCR_ND_MODE (0x3 << 21)
66 #define NDCR_NAND_MODE (0x0)
67 #define NDCR_CLR_PG_CNT (0x1 << 20)
68 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
69 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
70 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
72 #define NDCR_RA_START (0x1 << 15)
73 #define NDCR_PG_PER_BLK (0x1 << 14)
74 #define NDCR_ND_ARB_EN (0x1 << 12)
75 #define NDCR_INT_MASK (0xFFF)
77 #define NDSR_MASK (0xfff)
78 #define NDSR_ERR_CNT_OFF (16)
79 #define NDSR_ERR_CNT_MASK (0x1f)
80 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
81 #define NDSR_RDY (0x1 << 12)
82 #define NDSR_FLASH_RDY (0x1 << 11)
83 #define NDSR_CS0_PAGED (0x1 << 10)
84 #define NDSR_CS1_PAGED (0x1 << 9)
85 #define NDSR_CS0_CMDD (0x1 << 8)
86 #define NDSR_CS1_CMDD (0x1 << 7)
87 #define NDSR_CS0_BBD (0x1 << 6)
88 #define NDSR_CS1_BBD (0x1 << 5)
89 #define NDSR_UNCORERR (0x1 << 4)
90 #define NDSR_CORERR (0x1 << 3)
91 #define NDSR_WRDREQ (0x1 << 2)
92 #define NDSR_RDDREQ (0x1 << 1)
93 #define NDSR_WRCMDREQ (0x1)
95 #define NDCB0_LEN_OVRD (0x1 << 28)
96 #define NDCB0_ST_ROW_EN (0x1 << 26)
97 #define NDCB0_AUTO_RS (0x1 << 25)
98 #define NDCB0_CSEL (0x1 << 24)
99 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
100 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
101 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
102 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
103 #define NDCB0_NC (0x1 << 20)
104 #define NDCB0_DBC (0x1 << 19)
105 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
106 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
107 #define NDCB0_CMD2_MASK (0xff << 8)
108 #define NDCB0_CMD1_MASK (0xff)
109 #define NDCB0_ADDR_CYC_SHIFT (16)
111 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
112 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
113 #define EXT_CMD_TYPE_READ 4 /* Read */
114 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
115 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
116 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
117 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
120 * This should be large enough to read 'ONFI' and 'JEDEC'.
121 * Let's use 7 bytes, which is the maximum ID count supported
122 * by the controller (see NDCR_RD_ID_CNT_MASK).
124 #define READ_ID_BYTES 7
126 /* macros for registers read/write */
127 #define nand_writel(info, off, val) \
128 writel((val), (info)->mmio_base + (off))
130 #define nand_readl(info, off) \
131 readl((info)->mmio_base + (off))
133 /* error code and state */
156 enum pxa3xx_nand_variant {
157 PXA3XX_NAND_VARIANT_PXA,
158 PXA3XX_NAND_VARIANT_ARMADA370,
161 struct pxa3xx_nand_host {
162 struct nand_chip chip;
165 /* page size of attached chip */
169 /* calculated from pxa3xx_nand_flash data */
170 unsigned int col_addr_cycles;
171 unsigned int row_addr_cycles;
174 struct pxa3xx_nand_info {
175 struct nand_hw_control controller;
176 struct pxa3xx_nand_platform_data *pdata;
179 void __iomem *mmio_base;
180 unsigned long mmio_phys;
181 int cmd_complete, dev_ready;
183 unsigned int buf_start;
184 unsigned int buf_count;
185 unsigned int buf_size;
186 unsigned int data_buff_pos;
187 unsigned int oob_buff_pos;
189 unsigned char *data_buff;
190 unsigned char *oob_buff;
192 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
196 * This driver supports NFCv1 (as found in PXA SoC)
197 * and NFCv2 (as found in Armada 370/XP SoC).
199 enum pxa3xx_nand_variant variant;
202 int use_ecc; /* use HW ECC ? */
203 int force_raw; /* prevent use_ecc to be set */
204 int ecc_bch; /* using BCH ECC? */
205 int use_spare; /* use spare ? */
208 /* Amount of real data per full chunk */
209 unsigned int chunk_size;
211 /* Amount of spare data per full chunk */
212 unsigned int spare_size;
214 /* Number of full chunks (i.e chunk_size + spare_size) */
215 unsigned int nfullchunks;
218 * Total number of chunks. If equal to nfullchunks, then there
219 * are only full chunks. Otherwise, there is one last chunk of
220 * size (last_chunk_size + last_spare_size)
222 unsigned int ntotalchunks;
224 /* Amount of real data in the last chunk */
225 unsigned int last_chunk_size;
227 /* Amount of spare data in the last chunk */
228 unsigned int last_spare_size;
230 unsigned int ecc_size;
231 unsigned int ecc_err_cnt;
232 unsigned int max_bitflips;
236 * Variables only valid during command
237 * execution. step_chunk_size and step_spare_size is the
238 * amount of real data and spare data in the current
239 * chunk. cur_chunk is the current chunk being
242 unsigned int step_chunk_size;
243 unsigned int step_spare_size;
244 unsigned int cur_chunk;
246 /* cached register value */
251 /* generated NDCBx register values */
258 static struct pxa3xx_nand_timing timing[] = {
260 * tCH Enable signal hold time
261 * tCS Enable signal setup time
262 * tWH ND_nWE high duration
263 * tWP ND_nWE pulse time
264 * tRH ND_nRE high duration
265 * tRP ND_nRE pulse width
266 * tR ND_nWE high to ND_nRE low for read
267 * tWHR ND_nWE high to ND_nRE low for status read
268 * tAR ND_ALE low to ND_nRE low delay
270 /*ch cs wh wp rh rp r whr ar */
271 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
272 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
273 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
274 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
275 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
278 static struct pxa3xx_nand_flash builtin_flash_types[] = {
281 * flash_width Width of Flash memory (DWIDTH_M)
282 * dfc_width Width of flash controller(DWIDTH_C)
284 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
286 { 0x46ec, 16, 16, &timing[1] },
287 { 0xdaec, 8, 8, &timing[1] },
288 { 0xd7ec, 8, 8, &timing[1] },
289 { 0xa12c, 8, 8, &timing[2] },
290 { 0xb12c, 16, 16, &timing[2] },
291 { 0xdc2c, 8, 8, &timing[2] },
292 { 0xcc2c, 16, 16, &timing[2] },
293 { 0xba20, 16, 16, &timing[3] },
294 { 0xda98, 8, 8, &timing[4] },
297 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
298 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
299 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301 static struct nand_bbt_descr bbt_main_descr = {
302 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
303 | NAND_BBT_2BIT | NAND_BBT_VERSION,
307 .maxblocks = 8, /* Last 8 blocks in each chip */
308 .pattern = bbt_pattern
311 static struct nand_bbt_descr bbt_mirror_descr = {
312 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
313 | NAND_BBT_2BIT | NAND_BBT_VERSION,
317 .maxblocks = 8, /* Last 8 blocks in each chip */
318 .pattern = bbt_mirror_pattern
322 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
329 .oobfree = { {2, 30} }
332 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63,
339 64, 65, 66, 67, 68, 69, 70, 71,
340 72, 73, 74, 75, 76, 77, 78, 79,
341 80, 81, 82, 83, 84, 85, 86, 87,
342 88, 89, 90, 91, 92, 93, 94, 95},
343 .oobfree = { {1, 4}, {6, 26} }
346 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
349 32, 33, 34, 35, 36, 37, 38, 39,
350 40, 41, 42, 43, 44, 45, 46, 47,
351 48, 49, 50, 51, 52, 53, 54, 55,
352 56, 57, 58, 59, 60, 61, 62, 63,
353 96, 97, 98, 99, 100, 101, 102, 103,
354 104, 105, 106, 107, 108, 109, 110, 111,
355 112, 113, 114, 115, 116, 117, 118, 119,
356 120, 121, 122, 123, 124, 125, 126, 127},
357 /* Bootrom looks in bytes 0 & 5 for bad blocks */
358 .oobfree = { {6, 26}, { 64, 32} }
361 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
364 32, 33, 34, 35, 36, 37, 38, 39,
365 40, 41, 42, 43, 44, 45, 46, 47,
366 48, 49, 50, 51, 52, 53, 54, 55,
367 56, 57, 58, 59, 60, 61, 62, 63,
369 96, 97, 98, 99, 100, 101, 102, 103,
370 104, 105, 106, 107, 108, 109, 110, 111,
371 112, 113, 114, 115, 116, 117, 118, 119,
372 120, 121, 122, 123, 124, 125, 126, 127,
374 160, 161, 162, 163, 164, 165, 166, 167,
375 168, 169, 170, 171, 172, 173, 174, 175,
376 176, 177, 178, 179, 180, 181, 182, 183,
377 184, 185, 186, 187, 188, 189, 190, 191,
379 224, 225, 226, 227, 228, 229, 230, 231,
380 232, 233, 234, 235, 236, 237, 238, 239,
381 240, 241, 242, 243, 244, 245, 246, 247,
382 248, 249, 250, 251, 252, 253, 254, 255},
384 /* Bootrom looks in bytes 0 & 5 for bad blocks */
385 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
388 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
391 32, 33, 34, 35, 36, 37, 38, 39,
392 40, 41, 42, 43, 44, 45, 46, 47,
393 48, 49, 50, 51, 52, 53, 54, 55,
394 56, 57, 58, 59, 60, 61, 62, 63},
398 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
401 /* HW ECC handles all ECC data and all spare area is free for OOB */
402 .oobfree = {{0, 160} }
405 #define NDTR0_tCH(c) (min((c), 7) << 19)
406 #define NDTR0_tCS(c) (min((c), 7) << 16)
407 #define NDTR0_tWH(c) (min((c), 7) << 11)
408 #define NDTR0_tWP(c) (min((c), 7) << 8)
409 #define NDTR0_tRH(c) (min((c), 7) << 3)
410 #define NDTR0_tRP(c) (min((c), 7) << 0)
412 #define NDTR1_tR(c) (min((c), 65535) << 16)
413 #define NDTR1_tWHR(c) (min((c), 15) << 4)
414 #define NDTR1_tAR(c) (min((c), 15) << 0)
416 /* convert nano-seconds to nand flash controller clock cycles */
417 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
419 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
421 /* We only support the Armada 370/XP/38x for now */
422 return PXA3XX_NAND_VARIANT_ARMADA370;
425 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
426 const struct pxa3xx_nand_timing *t)
428 struct pxa3xx_nand_info *info = host->info_data;
429 unsigned long nand_clk = mvebu_get_nand_clock();
430 uint32_t ndtr0, ndtr1;
432 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
433 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
434 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
435 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
436 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
437 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
439 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
440 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
441 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
443 info->ndtr0cs0 = ndtr0;
444 info->ndtr1cs0 = ndtr1;
445 nand_writel(info, NDTR0CS0, ndtr0);
446 nand_writel(info, NDTR1CS0, ndtr1);
449 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
450 const struct nand_sdr_timings *t)
452 struct pxa3xx_nand_info *info = host->info_data;
453 struct nand_chip *chip = &host->chip;
454 unsigned long nand_clk = mvebu_get_nand_clock();
455 uint32_t ndtr0, ndtr1;
457 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
458 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
459 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
460 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
461 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
462 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
463 u32 tR = chip->chip_delay * 1000;
464 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
465 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
467 /* fallback to a default value if tR = 0 */
471 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
472 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
473 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
474 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
475 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
476 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
478 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
479 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
480 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
482 info->ndtr0cs0 = ndtr0;
483 info->ndtr1cs0 = ndtr1;
484 nand_writel(info, NDTR0CS0, ndtr0);
485 nand_writel(info, NDTR1CS0, ndtr1);
488 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
490 const struct nand_sdr_timings *timings;
491 struct nand_chip *chip = &host->chip;
492 struct pxa3xx_nand_info *info = host->info_data;
493 const struct pxa3xx_nand_flash *f = NULL;
494 struct mtd_info *mtd = nand_to_mtd(&host->chip);
495 int mode, id, ntypes, i;
497 mode = onfi_get_async_timing_mode(chip);
498 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
499 ntypes = ARRAY_SIZE(builtin_flash_types);
501 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
503 id = chip->read_byte(mtd);
504 id |= chip->read_byte(mtd) << 0x8;
506 for (i = 0; i < ntypes; i++) {
507 f = &builtin_flash_types[i];
509 if (f->chip_id == id)
514 dev_err(&info->pdev->dev, "Error: timings not found\n");
518 pxa3xx_nand_set_timing(host, f->timing);
520 if (f->flash_width == 16) {
521 info->reg_ndcr |= NDCR_DWIDTH_M;
522 chip->options |= NAND_BUSWIDTH_16;
525 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
527 mode = fls(mode) - 1;
531 timings = onfi_async_timing_mode_to_sdr_timings(mode);
533 return PTR_ERR(timings);
535 pxa3xx_nand_set_sdr_timing(host, timings);
542 * NOTE: it is a must to set ND_RUN first, then write
543 * command buffer, otherwise, it does not work.
544 * We enable all the interrupt at the same time, and
545 * let pxa3xx_nand_irq to handle all logic.
547 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
551 ndcr = info->reg_ndcr;
556 nand_writel(info, NDECCCTRL, 0x1);
558 ndcr &= ~NDCR_ECC_EN;
560 nand_writel(info, NDECCCTRL, 0x0);
563 ndcr &= ~NDCR_DMA_EN;
566 ndcr |= NDCR_SPARE_EN;
568 ndcr &= ~NDCR_SPARE_EN;
572 /* clear status bits and run */
573 nand_writel(info, NDSR, NDSR_MASK);
574 nand_writel(info, NDCR, 0);
575 nand_writel(info, NDCR, ndcr);
578 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
582 ndcr = nand_readl(info, NDCR);
583 nand_writel(info, NDCR, ndcr | int_mask);
586 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
588 if (info->ecc_bch && !info->force_raw) {
592 * According to the datasheet, when reading from NDDB
593 * with BCH enabled, after each 32 bytes reads, we
594 * have to make sure that the NDSR.RDDREQ bit is set.
596 * Drain the FIFO 8 32 bits reads at a time, and skip
597 * the polling on the last read.
600 readsl(info->mmio_base + NDDB, data, 8);
603 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
604 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
605 dev_err(&info->pdev->dev,
606 "Timeout on RDDREQ while draining the FIFO\n");
616 readsl(info->mmio_base + NDDB, data, len);
619 static void handle_data_pio(struct pxa3xx_nand_info *info)
621 int data_len = info->step_chunk_size;
624 * In raw mode, include the spare area and the ECC bytes that are not
625 * consumed by the controller in the data section. Do not reorganize
626 * here, do it in the ->read_page_raw() handler instead.
629 data_len += info->step_spare_size + info->ecc_size;
631 switch (info->state) {
632 case STATE_PIO_WRITING:
633 if (info->step_chunk_size)
634 writesl(info->mmio_base + NDDB,
635 info->data_buff + info->data_buff_pos,
636 DIV_ROUND_UP(data_len, 4));
638 if (info->step_spare_size)
639 writesl(info->mmio_base + NDDB,
640 info->oob_buff + info->oob_buff_pos,
641 DIV_ROUND_UP(info->step_spare_size, 4));
643 case STATE_PIO_READING:
646 info->data_buff + info->data_buff_pos,
647 DIV_ROUND_UP(data_len, 4));
652 if (info->step_spare_size)
654 info->oob_buff + info->oob_buff_pos,
655 DIV_ROUND_UP(info->step_spare_size, 4));
658 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
663 /* Update buffer pointers for multi-page read/write */
664 info->data_buff_pos += data_len;
665 info->oob_buff_pos += info->step_spare_size;
668 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
670 handle_data_pio(info);
672 info->state = STATE_CMD_DONE;
673 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
676 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
678 unsigned int status, is_completed = 0, is_ready = 0;
679 unsigned int ready, cmd_done;
680 irqreturn_t ret = IRQ_HANDLED;
683 ready = NDSR_FLASH_RDY;
684 cmd_done = NDSR_CS0_CMDD;
687 cmd_done = NDSR_CS1_CMDD;
690 /* TODO - find out why we need the delay during write operation. */
693 status = nand_readl(info, NDSR);
695 if (status & NDSR_UNCORERR)
696 info->retcode = ERR_UNCORERR;
697 if (status & NDSR_CORERR) {
698 info->retcode = ERR_CORERR;
699 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
701 info->ecc_err_cnt = NDSR_ERR_CNT(status);
703 info->ecc_err_cnt = 1;
706 * Each chunk composing a page is corrected independently,
707 * and we need to store maximum number of corrected bitflips
708 * to return it to the MTD layer in ecc.read_page().
710 info->max_bitflips = max_t(unsigned int,
714 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
715 info->state = (status & NDSR_RDDREQ) ?
716 STATE_PIO_READING : STATE_PIO_WRITING;
717 /* Call the IRQ thread in U-Boot directly */
718 pxa3xx_nand_irq_thread(info);
721 if (status & cmd_done) {
722 info->state = STATE_CMD_DONE;
725 if (status & ready) {
726 info->state = STATE_READY;
731 * Clear all status bit before issuing the next command, which
732 * can and will alter the status bits and will deserve a new
733 * interrupt on its own. This lets the controller exit the IRQ
735 nand_writel(info, NDSR, status);
737 if (status & NDSR_WRCMDREQ) {
738 status &= ~NDSR_WRCMDREQ;
739 info->state = STATE_CMD_HANDLE;
742 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
743 * must be loaded by writing directly either 12 or 16
744 * bytes directly to NDCB0, four bytes at a time.
746 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
747 * but each NDCBx register can be read.
749 nand_writel(info, NDCB0, info->ndcb0);
750 nand_writel(info, NDCB0, info->ndcb1);
751 nand_writel(info, NDCB0, info->ndcb2);
753 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
754 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
755 nand_writel(info, NDCB0, info->ndcb3);
759 info->cmd_complete = 1;
766 static inline int is_buf_blank(uint8_t *buf, size_t len)
768 for (; len > 0; len--)
774 static void set_command_address(struct pxa3xx_nand_info *info,
775 unsigned int page_size, uint16_t column, int page_addr)
777 /* small page addr setting */
778 if (page_size < info->chunk_size) {
779 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
784 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
787 if (page_addr & 0xFF0000)
788 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
794 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
796 struct pxa3xx_nand_host *host = info->host[info->cs];
797 struct mtd_info *mtd = nand_to_mtd(&host->chip);
799 /* reset data and oob column point to handle data */
802 info->data_buff_pos = 0;
803 info->oob_buff_pos = 0;
804 info->step_chunk_size = 0;
805 info->step_spare_size = 0;
809 info->retcode = ERR_NONE;
810 info->ecc_err_cnt = 0;
816 case NAND_CMD_READOOB:
817 case NAND_CMD_PAGEPROG:
818 if (!info->force_raw)
831 * If we are about to issue a read command, or about to set
832 * the write address, then clean the data buffer.
834 if (command == NAND_CMD_READ0 ||
835 command == NAND_CMD_READOOB ||
836 command == NAND_CMD_SEQIN) {
837 info->buf_count = mtd->writesize + mtd->oobsize;
838 memset(info->data_buff, 0xFF, info->buf_count);
842 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
843 int ext_cmd_type, uint16_t column, int page_addr)
845 int addr_cycle, exec_cmd;
846 struct pxa3xx_nand_host *host;
847 struct mtd_info *mtd;
849 host = info->host[info->cs];
850 mtd = nand_to_mtd(&host->chip);
855 info->ndcb0 = NDCB0_CSEL;
859 if (command == NAND_CMD_SEQIN)
862 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
863 + host->col_addr_cycles);
866 case NAND_CMD_READOOB:
868 info->buf_start = column;
869 info->ndcb0 |= NDCB0_CMD_TYPE(0)
873 if (command == NAND_CMD_READOOB)
874 info->buf_start += mtd->writesize;
876 if (info->cur_chunk < info->nfullchunks) {
877 info->step_chunk_size = info->chunk_size;
878 info->step_spare_size = info->spare_size;
880 info->step_chunk_size = info->last_chunk_size;
881 info->step_spare_size = info->last_spare_size;
885 * Multiple page read needs an 'extended command type' field,
886 * which is either naked-read or last-read according to the
889 if (info->force_raw) {
890 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
892 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
893 info->ndcb3 = info->step_chunk_size +
894 info->step_spare_size + info->ecc_size;
895 } else if (mtd->writesize == info->chunk_size) {
896 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
897 } else if (mtd->writesize > info->chunk_size) {
898 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
900 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
901 info->ndcb3 = info->step_chunk_size +
902 info->step_spare_size;
905 set_command_address(info, mtd->writesize, column, page_addr);
910 info->buf_start = column;
911 set_command_address(info, mtd->writesize, 0, page_addr);
914 * Multiple page programming needs to execute the initial
915 * SEQIN command that sets the page address.
917 if (mtd->writesize > info->chunk_size) {
918 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
919 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
926 case NAND_CMD_PAGEPROG:
927 if (is_buf_blank(info->data_buff,
928 (mtd->writesize + mtd->oobsize))) {
933 if (info->cur_chunk < info->nfullchunks) {
934 info->step_chunk_size = info->chunk_size;
935 info->step_spare_size = info->spare_size;
937 info->step_chunk_size = info->last_chunk_size;
938 info->step_spare_size = info->last_spare_size;
941 /* Second command setting for large pages */
942 if (mtd->writesize > info->chunk_size) {
944 * Multiple page write uses the 'extended command'
945 * field. This can be used to issue a command dispatch
946 * or a naked-write depending on the current stage.
948 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
950 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
951 info->ndcb3 = info->step_chunk_size +
952 info->step_spare_size;
955 * This is the command dispatch that completes a chunked
956 * page program operation.
958 if (info->cur_chunk == info->ntotalchunks) {
959 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
960 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
967 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
971 | (NAND_CMD_PAGEPROG << 8)
978 info->buf_count = INIT_BUFFER_SIZE;
979 info->ndcb0 |= NDCB0_CMD_TYPE(0)
983 info->ndcb1 = (column & 0xFF);
984 info->ndcb3 = INIT_BUFFER_SIZE;
985 info->step_chunk_size = INIT_BUFFER_SIZE;
988 case NAND_CMD_READID:
989 info->buf_count = READ_ID_BYTES;
990 info->ndcb0 |= NDCB0_CMD_TYPE(3)
993 info->ndcb1 = (column & 0xFF);
995 info->step_chunk_size = 8;
997 case NAND_CMD_STATUS:
999 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1003 info->step_chunk_size = 8;
1006 case NAND_CMD_ERASE1:
1007 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1011 | (NAND_CMD_ERASE2 << 8)
1013 info->ndcb1 = page_addr;
1017 case NAND_CMD_RESET:
1018 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1023 case NAND_CMD_ERASE2:
1029 dev_err(&info->pdev->dev, "non-supported command %x\n",
1037 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1038 int column, int page_addr)
1040 struct nand_chip *chip = mtd_to_nand(mtd);
1041 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1042 struct pxa3xx_nand_info *info = host->info_data;
1046 * if this is a x16 device ,then convert the input
1047 * "byte" address into a "word" address appropriate
1048 * for indexing a word-oriented device
1050 if (info->reg_ndcr & NDCR_DWIDTH_M)
1054 * There may be different NAND chip hooked to
1055 * different chip select, so check whether
1056 * chip select has been changed, if yes, reset the timing
1058 if (info->cs != host->cs) {
1059 info->cs = host->cs;
1060 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1061 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1064 prepare_start_command(info, command);
1066 info->state = STATE_PREPARED;
1067 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1072 info->cmd_complete = 0;
1073 info->dev_ready = 0;
1074 info->need_wait = 1;
1075 pxa3xx_nand_start(info);
1081 status = nand_readl(info, NDSR);
1083 pxa3xx_nand_irq(info);
1085 if (info->cmd_complete)
1088 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1089 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1094 info->state = STATE_IDLE;
1097 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1098 const unsigned command,
1099 int column, int page_addr)
1101 struct nand_chip *chip = mtd_to_nand(mtd);
1102 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1103 struct pxa3xx_nand_info *info = host->info_data;
1104 int exec_cmd, ext_cmd_type;
1107 * if this is a x16 device then convert the input
1108 * "byte" address into a "word" address appropriate
1109 * for indexing a word-oriented device
1111 if (info->reg_ndcr & NDCR_DWIDTH_M)
1115 * There may be different NAND chip hooked to
1116 * different chip select, so check whether
1117 * chip select has been changed, if yes, reset the timing
1119 if (info->cs != host->cs) {
1120 info->cs = host->cs;
1121 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1122 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1125 /* Select the extended command for the first command */
1127 case NAND_CMD_READ0:
1128 case NAND_CMD_READOOB:
1129 ext_cmd_type = EXT_CMD_TYPE_MONO;
1131 case NAND_CMD_SEQIN:
1132 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1134 case NAND_CMD_PAGEPROG:
1135 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1142 prepare_start_command(info, command);
1145 * Prepare the "is ready" completion before starting a command
1146 * transaction sequence. If the command is not executed the
1147 * completion will be completed, see below.
1149 * We can do that inside the loop because the command variable
1150 * is invariant and thus so is the exec_cmd.
1152 info->need_wait = 1;
1153 info->dev_ready = 0;
1158 info->state = STATE_PREPARED;
1159 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1162 info->need_wait = 0;
1163 info->dev_ready = 1;
1167 info->cmd_complete = 0;
1168 pxa3xx_nand_start(info);
1174 status = nand_readl(info, NDSR);
1176 pxa3xx_nand_irq(info);
1178 if (info->cmd_complete)
1181 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1182 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1187 /* Only a few commands need several steps */
1188 if (command != NAND_CMD_PAGEPROG &&
1189 command != NAND_CMD_READ0 &&
1190 command != NAND_CMD_READOOB)
1195 /* Check if the sequence is complete */
1196 if (info->cur_chunk == info->ntotalchunks &&
1197 command != NAND_CMD_PAGEPROG)
1201 * After a splitted program command sequence has issued
1202 * the command dispatch, the command sequence is complete.
1204 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1205 command == NAND_CMD_PAGEPROG &&
1206 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1209 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1210 /* Last read: issue a 'last naked read' */
1211 if (info->cur_chunk == info->ntotalchunks - 1)
1212 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1214 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1217 * If a splitted program command has no more data to transfer,
1218 * the command dispatch must be issued to complete.
1220 } else if (command == NAND_CMD_PAGEPROG &&
1221 info->cur_chunk == info->ntotalchunks) {
1222 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1226 info->state = STATE_IDLE;
1229 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1230 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1233 chip->write_buf(mtd, buf, mtd->writesize);
1234 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1239 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1240 struct nand_chip *chip, uint8_t *buf, int oob_required,
1243 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1244 struct pxa3xx_nand_info *info = host->info_data;
1247 chip->read_buf(mtd, buf, mtd->writesize);
1248 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1250 if (info->retcode == ERR_CORERR && info->use_ecc) {
1251 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1253 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1255 * Empty pages will trigger uncorrectable errors. Re-read the
1256 * entire page in raw mode and check for bits not being "1".
1257 * If there are more than the supported strength, then it means
1258 * this is an actual uncorrectable error.
1260 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1261 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1262 chip->oob_poi, mtd->oobsize,
1263 NULL, 0, chip->ecc.strength);
1265 mtd->ecc_stats.failed++;
1267 mtd->ecc_stats.corrected += bf;
1268 info->max_bitflips = max_t(unsigned int,
1269 info->max_bitflips, bf);
1270 info->retcode = ERR_CORERR;
1272 info->retcode = ERR_NONE;
1275 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1276 /* Raw read is not supported with Hamming ECC engine */
1277 if (is_buf_blank(buf, mtd->writesize))
1278 info->retcode = ERR_NONE;
1280 mtd->ecc_stats.failed++;
1283 return info->max_bitflips;
1286 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1287 struct nand_chip *chip, uint8_t *buf,
1288 int oob_required, int page)
1290 struct pxa3xx_nand_host *host = chip->priv;
1291 struct pxa3xx_nand_info *info = host->info_data;
1292 int chunk, ecc_off_buf;
1298 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1299 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1301 info->force_raw = true;
1302 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1304 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1305 info->last_spare_size;
1306 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1308 buf + (chunk * info->chunk_size),
1312 (chunk * (info->spare_size)),
1315 chip->oob_poi + ecc_off_buf +
1316 (chunk * (info->ecc_size)),
1317 info->ecc_size - 2);
1320 if (info->ntotalchunks > info->nfullchunks) {
1322 buf + (info->nfullchunks * info->chunk_size),
1323 info->last_chunk_size);
1326 (info->nfullchunks * (info->spare_size)),
1327 info->last_spare_size);
1329 chip->oob_poi + ecc_off_buf +
1330 (info->nfullchunks * (info->ecc_size)),
1331 info->ecc_size - 2);
1334 info->force_raw = false;
1339 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1340 struct nand_chip *chip, int page)
1342 /* Invalidate page cache */
1345 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1349 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1351 struct nand_chip *chip = mtd_to_nand(mtd);
1352 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1353 struct pxa3xx_nand_info *info = host->info_data;
1356 if (info->buf_start < info->buf_count)
1357 /* Has just send a new command? */
1358 retval = info->data_buff[info->buf_start++];
1363 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1365 struct nand_chip *chip = mtd_to_nand(mtd);
1366 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1367 struct pxa3xx_nand_info *info = host->info_data;
1368 u16 retval = 0xFFFF;
1370 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1371 retval = *((u16 *)(info->data_buff+info->buf_start));
1372 info->buf_start += 2;
1377 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1379 struct nand_chip *chip = mtd_to_nand(mtd);
1380 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1381 struct pxa3xx_nand_info *info = host->info_data;
1382 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1384 memcpy(buf, info->data_buff + info->buf_start, real_len);
1385 info->buf_start += real_len;
1388 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1389 const uint8_t *buf, int len)
1391 struct nand_chip *chip = mtd_to_nand(mtd);
1392 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1393 struct pxa3xx_nand_info *info = host->info_data;
1394 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1396 memcpy(info->data_buff + info->buf_start, buf, real_len);
1397 info->buf_start += real_len;
1400 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1405 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1407 struct nand_chip *chip = mtd_to_nand(mtd);
1408 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1409 struct pxa3xx_nand_info *info = host->info_data;
1411 if (info->need_wait) {
1414 info->need_wait = 0;
1420 status = nand_readl(info, NDSR);
1422 pxa3xx_nand_irq(info);
1424 if (info->dev_ready)
1427 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1428 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1429 return NAND_STATUS_FAIL;
1434 /* pxa3xx_nand_send_command has waited for command complete */
1435 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1436 if (info->retcode == ERR_NONE)
1439 return NAND_STATUS_FAIL;
1442 return NAND_STATUS_READY;
1445 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1447 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1449 /* Configure default flash values */
1450 info->reg_ndcr = 0x0; /* enable all interrupts */
1451 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1452 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1453 info->reg_ndcr |= NDCR_SPARE_EN;
1458 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1460 struct pxa3xx_nand_host *host = info->host[info->cs];
1461 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1462 struct nand_chip *chip = mtd_to_nand(mtd);
1464 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1465 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1466 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1469 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1471 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1472 uint32_t ndcr = nand_readl(info, NDCR);
1474 /* Set an initial chunk size */
1475 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1476 info->reg_ndcr = ndcr &
1477 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1478 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1479 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1480 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1483 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1485 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1486 if (info->data_buff == NULL)
1491 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1493 struct pxa3xx_nand_info *info = host->info_data;
1494 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1495 struct mtd_info *mtd;
1496 struct nand_chip *chip;
1497 const struct nand_sdr_timings *timings;
1500 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1501 chip = mtd_to_nand(mtd);
1503 /* configure default flash values */
1504 info->reg_ndcr = 0x0; /* enable all interrupts */
1505 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1506 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1507 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1509 /* use the common timing to make a try */
1510 timings = onfi_async_timing_mode_to_sdr_timings(0);
1511 if (IS_ERR(timings))
1512 return PTR_ERR(timings);
1514 pxa3xx_nand_set_sdr_timing(host, timings);
1516 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1517 ret = chip->waitfunc(mtd, chip);
1518 if (ret & NAND_STATUS_FAIL)
1524 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1525 struct nand_ecc_ctrl *ecc,
1526 int strength, int ecc_stepsize, int page_size)
1528 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1529 info->nfullchunks = 1;
1530 info->ntotalchunks = 1;
1531 info->chunk_size = 2048;
1532 info->spare_size = 40;
1533 info->ecc_size = 24;
1534 ecc->mode = NAND_ECC_HW;
1538 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1539 info->nfullchunks = 1;
1540 info->ntotalchunks = 1;
1541 info->chunk_size = 512;
1542 info->spare_size = 8;
1544 ecc->mode = NAND_ECC_HW;
1549 * Required ECC: 4-bit correction per 512 bytes
1550 * Select: 16-bit correction per 2048 bytes
1552 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1554 info->nfullchunks = 1;
1555 info->ntotalchunks = 1;
1556 info->chunk_size = 2048;
1557 info->spare_size = 32;
1558 info->ecc_size = 32;
1559 ecc->mode = NAND_ECC_HW;
1560 ecc->size = info->chunk_size;
1561 ecc->layout = &ecc_layout_2KB_bch4bit;
1564 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1566 info->nfullchunks = 2;
1567 info->ntotalchunks = 2;
1568 info->chunk_size = 2048;
1569 info->spare_size = 32;
1570 info->ecc_size = 32;
1571 ecc->mode = NAND_ECC_HW;
1572 ecc->size = info->chunk_size;
1573 ecc->layout = &ecc_layout_4KB_bch4bit;
1576 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1578 info->nfullchunks = 4;
1579 info->ntotalchunks = 4;
1580 info->chunk_size = 2048;
1581 info->spare_size = 32;
1582 info->ecc_size = 32;
1583 ecc->mode = NAND_ECC_HW;
1584 ecc->size = info->chunk_size;
1585 ecc->layout = &ecc_layout_8KB_bch4bit;
1589 * Required ECC: 8-bit correction per 512 bytes
1590 * Select: 16-bit correction per 1024 bytes
1592 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1594 info->nfullchunks = 1;
1595 info->ntotalchunks = 2;
1596 info->chunk_size = 1024;
1597 info->spare_size = 0;
1598 info->last_chunk_size = 1024;
1599 info->last_spare_size = 32;
1600 info->ecc_size = 32;
1601 ecc->mode = NAND_ECC_HW;
1602 ecc->size = info->chunk_size;
1603 ecc->layout = &ecc_layout_2KB_bch8bit;
1606 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1608 info->nfullchunks = 4;
1609 info->ntotalchunks = 5;
1610 info->chunk_size = 1024;
1611 info->spare_size = 0;
1612 info->last_chunk_size = 0;
1613 info->last_spare_size = 64;
1614 info->ecc_size = 32;
1615 ecc->mode = NAND_ECC_HW;
1616 ecc->size = info->chunk_size;
1617 ecc->layout = &ecc_layout_4KB_bch8bit;
1620 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1622 info->nfullchunks = 8;
1623 info->ntotalchunks = 9;
1624 info->chunk_size = 1024;
1625 info->spare_size = 0;
1626 info->last_chunk_size = 0;
1627 info->last_spare_size = 160;
1628 info->ecc_size = 32;
1629 ecc->mode = NAND_ECC_HW;
1630 ecc->size = info->chunk_size;
1631 ecc->layout = &ecc_layout_8KB_bch8bit;
1635 dev_err(&info->pdev->dev,
1636 "ECC strength %d at page size %d is not supported\n",
1637 strength, page_size);
1644 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1646 struct nand_chip *chip = mtd_to_nand(mtd);
1647 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1648 struct pxa3xx_nand_info *info = host->info_data;
1649 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1651 uint16_t ecc_strength, ecc_step;
1653 if (pdata->keep_config) {
1654 pxa3xx_nand_detect_config(info);
1656 ret = pxa3xx_nand_config_ident(info);
1659 ret = pxa3xx_nand_sensing(host);
1661 dev_info(&info->pdev->dev,
1662 "There is no chip on cs %d!\n",
1668 /* Device detection must be done with ECC disabled */
1669 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1670 nand_writel(info, NDECCCTRL, 0x0);
1672 if (nand_scan_ident(mtd, 1, NULL))
1675 if (!pdata->keep_config) {
1676 ret = pxa3xx_nand_init_timings(host);
1678 dev_err(&info->pdev->dev,
1679 "Failed to set timings: %d\n", ret);
1684 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1686 * We'll use a bad block table stored in-flash and don't
1687 * allow writing the bad block marker to the flash.
1689 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1690 chip->bbt_td = &bbt_main_descr;
1691 chip->bbt_md = &bbt_mirror_descr;
1694 if (pdata->ecc_strength && pdata->ecc_step_size) {
1695 ecc_strength = pdata->ecc_strength;
1696 ecc_step = pdata->ecc_step_size;
1698 ecc_strength = chip->ecc_strength_ds;
1699 ecc_step = chip->ecc_step_ds;
1702 /* Set default ECC strength requirements on non-ONFI devices */
1703 if (ecc_strength < 1 && ecc_step < 1) {
1708 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1709 ecc_step, mtd->writesize);
1714 * If the page size is bigger than the FIFO size, let's check
1715 * we are given the right variant and then switch to the extended
1716 * (aka split) command handling,
1718 if (mtd->writesize > info->chunk_size) {
1719 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1720 chip->cmdfunc = nand_cmdfunc_extended;
1722 dev_err(&info->pdev->dev,
1723 "unsupported page size on this variant\n");
1728 /* calculate addressing information */
1729 if (mtd->writesize >= 2048)
1730 host->col_addr_cycles = 2;
1732 host->col_addr_cycles = 1;
1734 /* release the initial buffer */
1735 kfree(info->data_buff);
1737 /* allocate the real data + oob buffer */
1738 info->buf_size = mtd->writesize + mtd->oobsize;
1739 ret = pxa3xx_nand_init_buff(info);
1742 info->oob_buff = info->data_buff + mtd->writesize;
1744 if ((mtd->size >> chip->page_shift) > 65536)
1745 host->row_addr_cycles = 3;
1747 host->row_addr_cycles = 2;
1749 if (!pdata->keep_config)
1750 pxa3xx_nand_config_tail(info);
1752 return nand_scan_tail(mtd);
1755 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1757 struct pxa3xx_nand_platform_data *pdata;
1758 struct pxa3xx_nand_host *host;
1759 struct nand_chip *chip = NULL;
1760 struct mtd_info *mtd;
1763 pdata = info->pdata;
1764 if (pdata->num_cs <= 0)
1767 info->variant = pxa3xx_nand_get_variant();
1768 for (cs = 0; cs < pdata->num_cs; cs++) {
1769 chip = (struct nand_chip *)
1770 ((u8 *)&info[1] + sizeof(*host) * cs);
1771 mtd = nand_to_mtd(chip);
1772 host = (struct pxa3xx_nand_host *)chip;
1773 info->host[cs] = host;
1775 host->info_data = info;
1776 mtd->owner = THIS_MODULE;
1778 nand_set_controller_data(chip, host);
1779 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1780 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1781 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1782 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1783 chip->controller = &info->controller;
1784 chip->waitfunc = pxa3xx_nand_waitfunc;
1785 chip->select_chip = pxa3xx_nand_select_chip;
1786 chip->read_word = pxa3xx_nand_read_word;
1787 chip->read_byte = pxa3xx_nand_read_byte;
1788 chip->read_buf = pxa3xx_nand_read_buf;
1789 chip->write_buf = pxa3xx_nand_write_buf;
1790 chip->options |= NAND_NO_SUBPAGE_WRITE;
1791 chip->cmdfunc = nand_cmdfunc;
1794 /* Allocate a buffer to allow flash detection */
1795 info->buf_size = INIT_BUFFER_SIZE;
1796 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1797 if (info->data_buff == NULL) {
1799 goto fail_disable_clk;
1802 /* initialize all interrupts to be disabled */
1803 disable_int(info, NDSR_MASK);
1807 kfree(info->data_buff);
1812 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1814 struct pxa3xx_nand_platform_data *pdata;
1815 const void *blob = gd->fdt_blob;
1818 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1822 /* Get address decoding nodes from the FDT blob */
1824 node = fdt_node_offset_by_compatible(blob, node,
1825 "marvell,mvebu-pxa3xx-nand");
1829 /* Bypass disabeld nodes */
1830 if (!fdtdec_get_is_enabled(blob, node))
1833 /* Get the first enabled NAND controler base address */
1835 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1836 blob, node, "reg", 0, NULL, true);
1838 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1839 if (pdata->num_cs != 1) {
1840 pr_err("pxa3xx driver supports single CS only\n");
1844 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1845 pdata->enable_arbiter = 1;
1847 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1848 pdata->keep_config = 1;
1852 * If these are not set, they will be selected according
1853 * to the detected flash type.
1856 pdata->ecc_strength = fdtdec_get_int(blob, node,
1857 "nand-ecc-strength", 0);
1860 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1861 "nand-ecc-step-size", 0);
1863 info->pdata = pdata;
1865 /* Currently support only a single NAND controller */
1868 } while (node >= 0);
1873 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1875 struct pxa3xx_nand_platform_data *pdata;
1876 int ret, cs, probe_success;
1878 ret = pxa3xx_nand_probe_dt(info);
1882 pdata = info->pdata;
1884 ret = alloc_nand_resource(info);
1886 dev_err(&pdev->dev, "alloc nand resource failed\n");
1891 for (cs = 0; cs < pdata->num_cs; cs++) {
1892 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1895 * The mtd name matches the one used in 'mtdparts' kernel
1896 * parameter. This name cannot be changed or otherwise
1897 * user's mtd partitions configuration would get broken.
1899 mtd->name = "pxa3xx_nand-0";
1901 ret = pxa3xx_nand_scan(mtd);
1903 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1908 if (nand_register(cs, mtd))
1921 * Main initialization routine
1923 void board_nand_init(void)
1925 struct pxa3xx_nand_info *info;
1926 struct pxa3xx_nand_host *host;
1929 info = kzalloc(sizeof(*info) +
1930 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1935 ret = pxa3xx_nand_probe(info);