dm: core: Create a new header file for 'compat' features
[oweals/u-boot.git] / drivers / mtd / nand / raw / pxa3xx_nand.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/mtd/nand/raw/pxa3xx_nand.c
4  *
5  * Copyright © 2005 Intel Corporation
6  * Copyright © 2006 Marvell International Ltd.
7  */
8
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/err.h>
16 #include <linux/errno.h>
17 #include <asm/io.h>
18 #include <asm/arch/cpu.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/rawnand.h>
21 #include <linux/types.h>
22
23 #include "pxa3xx_nand.h"
24
25 DECLARE_GLOBAL_DATA_PTR;
26
27 #define TIMEOUT_DRAIN_FIFO      5       /* in ms */
28 #define CHIP_DELAY_TIMEOUT      200
29 #define NAND_STOP_DELAY         40
30
31 /*
32  * Define a buffer size for the initial command that detects the flash device:
33  * STATUS, READID and PARAM.
34  * ONFI param page is 256 bytes, and there are three redundant copies
35  * to be read. JEDEC param page is 512 bytes, and there are also three
36  * redundant copies to be read.
37  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
38  */
39 #define INIT_BUFFER_SIZE        2048
40
41 /* registers and bit definitions */
42 #define NDCR            (0x00) /* Control register */
43 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
44 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
45 #define NDSR            (0x14) /* Status Register */
46 #define NDPCR           (0x18) /* Page Count Register */
47 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
48 #define NDBDR1          (0x20) /* Bad Block Register 1 */
49 #define NDECCCTRL       (0x28) /* ECC control */
50 #define NDDB            (0x40) /* Data Buffer */
51 #define NDCB0           (0x48) /* Command Buffer0 */
52 #define NDCB1           (0x4C) /* Command Buffer1 */
53 #define NDCB2           (0x50) /* Command Buffer2 */
54
55 #define NDCR_SPARE_EN           (0x1 << 31)
56 #define NDCR_ECC_EN             (0x1 << 30)
57 #define NDCR_DMA_EN             (0x1 << 29)
58 #define NDCR_ND_RUN             (0x1 << 28)
59 #define NDCR_DWIDTH_C           (0x1 << 27)
60 #define NDCR_DWIDTH_M           (0x1 << 26)
61 #define NDCR_PAGE_SZ            (0x1 << 24)
62 #define NDCR_NCSX               (0x1 << 23)
63 #define NDCR_ND_MODE            (0x3 << 21)
64 #define NDCR_NAND_MODE          (0x0)
65 #define NDCR_CLR_PG_CNT         (0x1 << 20)
66 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
67 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
68 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
69
70 #define NDCR_RA_START           (0x1 << 15)
71 #define NDCR_PG_PER_BLK         (0x1 << 14)
72 #define NDCR_ND_ARB_EN          (0x1 << 12)
73 #define NDCR_INT_MASK           (0xFFF)
74
75 #define NDSR_MASK               (0xfff)
76 #define NDSR_ERR_CNT_OFF        (16)
77 #define NDSR_ERR_CNT_MASK       (0x1f)
78 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
79 #define NDSR_RDY                (0x1 << 12)
80 #define NDSR_FLASH_RDY          (0x1 << 11)
81 #define NDSR_CS0_PAGED          (0x1 << 10)
82 #define NDSR_CS1_PAGED          (0x1 << 9)
83 #define NDSR_CS0_CMDD           (0x1 << 8)
84 #define NDSR_CS1_CMDD           (0x1 << 7)
85 #define NDSR_CS0_BBD            (0x1 << 6)
86 #define NDSR_CS1_BBD            (0x1 << 5)
87 #define NDSR_UNCORERR           (0x1 << 4)
88 #define NDSR_CORERR             (0x1 << 3)
89 #define NDSR_WRDREQ             (0x1 << 2)
90 #define NDSR_RDDREQ             (0x1 << 1)
91 #define NDSR_WRCMDREQ           (0x1)
92
93 #define NDCB0_LEN_OVRD          (0x1 << 28)
94 #define NDCB0_ST_ROW_EN         (0x1 << 26)
95 #define NDCB0_AUTO_RS           (0x1 << 25)
96 #define NDCB0_CSEL              (0x1 << 24)
97 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
98 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
99 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
100 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
101 #define NDCB0_NC                (0x1 << 20)
102 #define NDCB0_DBC               (0x1 << 19)
103 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
104 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
105 #define NDCB0_CMD2_MASK         (0xff << 8)
106 #define NDCB0_CMD1_MASK         (0xff)
107 #define NDCB0_ADDR_CYC_SHIFT    (16)
108
109 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
110 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
111 #define EXT_CMD_TYPE_READ       4 /* Read */
112 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
113 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
114 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
115 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
116
117 /*
118  * This should be large enough to read 'ONFI' and 'JEDEC'.
119  * Let's use 7 bytes, which is the maximum ID count supported
120  * by the controller (see NDCR_RD_ID_CNT_MASK).
121  */
122 #define READ_ID_BYTES           7
123
124 /* macros for registers read/write */
125 #define nand_writel(info, off, val)     \
126         writel((val), (info)->mmio_base + (off))
127
128 #define nand_readl(info, off)           \
129         readl((info)->mmio_base + (off))
130
131 /* error code and state */
132 enum {
133         ERR_NONE        = 0,
134         ERR_DMABUSERR   = -1,
135         ERR_SENDCMD     = -2,
136         ERR_UNCORERR    = -3,
137         ERR_BBERR       = -4,
138         ERR_CORERR      = -5,
139 };
140
141 enum {
142         STATE_IDLE = 0,
143         STATE_PREPARED,
144         STATE_CMD_HANDLE,
145         STATE_DMA_READING,
146         STATE_DMA_WRITING,
147         STATE_DMA_DONE,
148         STATE_PIO_READING,
149         STATE_PIO_WRITING,
150         STATE_CMD_DONE,
151         STATE_READY,
152 };
153
154 enum pxa3xx_nand_variant {
155         PXA3XX_NAND_VARIANT_PXA,
156         PXA3XX_NAND_VARIANT_ARMADA370,
157 };
158
159 struct pxa3xx_nand_host {
160         struct nand_chip        chip;
161         void                    *info_data;
162
163         /* page size of attached chip */
164         int                     use_ecc;
165         int                     cs;
166
167         /* calculated from pxa3xx_nand_flash data */
168         unsigned int            col_addr_cycles;
169         unsigned int            row_addr_cycles;
170 };
171
172 struct pxa3xx_nand_info {
173         struct nand_hw_control  controller;
174         struct pxa3xx_nand_platform_data *pdata;
175
176         struct clk              *clk;
177         void __iomem            *mmio_base;
178         unsigned long           mmio_phys;
179         int                     cmd_complete, dev_ready;
180
181         unsigned int            buf_start;
182         unsigned int            buf_count;
183         unsigned int            buf_size;
184         unsigned int            data_buff_pos;
185         unsigned int            oob_buff_pos;
186
187         unsigned char           *data_buff;
188         unsigned char           *oob_buff;
189
190         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
191         unsigned int            state;
192
193         /*
194          * This driver supports NFCv1 (as found in PXA SoC)
195          * and NFCv2 (as found in Armada 370/XP SoC).
196          */
197         enum pxa3xx_nand_variant variant;
198
199         int                     cs;
200         int                     use_ecc;        /* use HW ECC ? */
201         int                     force_raw;      /* prevent use_ecc to be set */
202         int                     ecc_bch;        /* using BCH ECC? */
203         int                     use_spare;      /* use spare ? */
204         int                     need_wait;
205
206         /* Amount of real data per full chunk */
207         unsigned int            chunk_size;
208
209         /* Amount of spare data per full chunk */
210         unsigned int            spare_size;
211
212         /* Number of full chunks (i.e chunk_size + spare_size) */
213         unsigned int            nfullchunks;
214
215         /*
216          * Total number of chunks. If equal to nfullchunks, then there
217          * are only full chunks. Otherwise, there is one last chunk of
218          * size (last_chunk_size + last_spare_size)
219          */
220         unsigned int            ntotalchunks;
221
222         /* Amount of real data in the last chunk */
223         unsigned int            last_chunk_size;
224
225         /* Amount of spare data in the last chunk */
226         unsigned int            last_spare_size;
227
228         unsigned int            ecc_size;
229         unsigned int            ecc_err_cnt;
230         unsigned int            max_bitflips;
231         int                     retcode;
232
233         /*
234          * Variables only valid during command
235          * execution. step_chunk_size and step_spare_size is the
236          * amount of real data and spare data in the current
237          * chunk. cur_chunk is the current chunk being
238          * read/programmed.
239          */
240         unsigned int            step_chunk_size;
241         unsigned int            step_spare_size;
242         unsigned int            cur_chunk;
243
244         /* cached register value */
245         uint32_t                reg_ndcr;
246         uint32_t                ndtr0cs0;
247         uint32_t                ndtr1cs0;
248
249         /* generated NDCBx register values */
250         uint32_t                ndcb0;
251         uint32_t                ndcb1;
252         uint32_t                ndcb2;
253         uint32_t                ndcb3;
254 };
255
256 static struct pxa3xx_nand_timing timing[] = {
257         /*
258          * tCH  Enable signal hold time
259          * tCS  Enable signal setup time
260          * tWH  ND_nWE high duration
261          * tWP  ND_nWE pulse time
262          * tRH  ND_nRE high duration
263          * tRP  ND_nRE pulse width
264          * tR   ND_nWE high to ND_nRE low for read
265          * tWHR ND_nWE high to ND_nRE low for status read
266          * tAR  ND_ALE low to ND_nRE low delay
267          */
268         /*ch  cs  wh  wp   rh  rp   r      whr  ar */
269         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
270         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
271         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
272         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
273         {  5, 20, 10,  12, 10,  12, 25000,  60, 10, },
274 };
275
276 static struct pxa3xx_nand_flash builtin_flash_types[] = {
277         /*
278          * chip_id
279          * flash_width  Width of Flash memory (DWIDTH_M)
280          * dfc_width    Width of flash controller(DWIDTH_C)
281          * *timing
282          * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
283          */
284         { 0x46ec, 16, 16, &timing[1] },
285         { 0xdaec,  8,  8, &timing[1] },
286         { 0xd7ec,  8,  8, &timing[1] },
287         { 0xa12c,  8,  8, &timing[2] },
288         { 0xb12c, 16, 16, &timing[2] },
289         { 0xdc2c,  8,  8, &timing[2] },
290         { 0xcc2c, 16, 16, &timing[2] },
291         { 0xba20, 16, 16, &timing[3] },
292         { 0xda98,  8,  8, &timing[4] },
293 };
294
295 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
296 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
297 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
298
299 static struct nand_bbt_descr bbt_main_descr = {
300         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
301                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
302         .offs = 8,
303         .len = 6,
304         .veroffs = 14,
305         .maxblocks = 8,         /* Last 8 blocks in each chip */
306         .pattern = bbt_pattern
307 };
308
309 static struct nand_bbt_descr bbt_mirror_descr = {
310         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
311                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
312         .offs = 8,
313         .len = 6,
314         .veroffs = 14,
315         .maxblocks = 8,         /* Last 8 blocks in each chip */
316         .pattern = bbt_mirror_pattern
317 };
318 #endif
319
320 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
321         .eccbytes = 32,
322         .eccpos = {
323                 32, 33, 34, 35, 36, 37, 38, 39,
324                 40, 41, 42, 43, 44, 45, 46, 47,
325                 48, 49, 50, 51, 52, 53, 54, 55,
326                 56, 57, 58, 59, 60, 61, 62, 63},
327         .oobfree = { {2, 30} }
328 };
329
330 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
331         .eccbytes = 64,
332         .eccpos = {
333                 32, 33, 34, 35, 36, 37, 38, 39,
334                 40, 41, 42, 43, 44, 45, 46, 47,
335                 48, 49, 50, 51, 52, 53, 54, 55,
336                 56, 57, 58, 59, 60, 61, 62, 63,
337                 64, 65, 66, 67, 68, 69, 70, 71,
338                 72, 73, 74, 75, 76, 77, 78, 79,
339                 80, 81, 82, 83, 84, 85, 86, 87,
340                 88, 89, 90, 91, 92, 93, 94, 95},
341         .oobfree = { {1, 4}, {6, 26} }
342 };
343
344 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
345         .eccbytes = 64,
346         .eccpos = {
347                 32,  33,  34,  35,  36,  37,  38,  39,
348                 40,  41,  42,  43,  44,  45,  46,  47,
349                 48,  49,  50,  51,  52,  53,  54,  55,
350                 56,  57,  58,  59,  60,  61,  62,  63,
351                 96,  97,  98,  99,  100, 101, 102, 103,
352                 104, 105, 106, 107, 108, 109, 110, 111,
353                 112, 113, 114, 115, 116, 117, 118, 119,
354                 120, 121, 122, 123, 124, 125, 126, 127},
355         /* Bootrom looks in bytes 0 & 5 for bad blocks */
356         .oobfree = { {6, 26}, { 64, 32} }
357 };
358
359 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
360         .eccbytes = 128,
361         .eccpos = {
362                 32,  33,  34,  35,  36,  37,  38,  39,
363                 40,  41,  42,  43,  44,  45,  46,  47,
364                 48,  49,  50,  51,  52,  53,  54,  55,
365                 56,  57,  58,  59,  60,  61,  62,  63,
366
367                 96,  97,  98,  99,  100, 101, 102, 103,
368                 104, 105, 106, 107, 108, 109, 110, 111,
369                 112, 113, 114, 115, 116, 117, 118, 119,
370                 120, 121, 122, 123, 124, 125, 126, 127,
371
372                 160, 161, 162, 163, 164, 165, 166, 167,
373                 168, 169, 170, 171, 172, 173, 174, 175,
374                 176, 177, 178, 179, 180, 181, 182, 183,
375                 184, 185, 186, 187, 188, 189, 190, 191,
376
377                 224, 225, 226, 227, 228, 229, 230, 231,
378                 232, 233, 234, 235, 236, 237, 238, 239,
379                 240, 241, 242, 243, 244, 245, 246, 247,
380                 248, 249, 250, 251, 252, 253, 254, 255},
381
382         /* Bootrom looks in bytes 0 & 5 for bad blocks */
383         .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
384 };
385
386 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
387         .eccbytes = 128,
388         .eccpos = {
389                 32,  33,  34,  35,  36,  37,  38,  39,
390                 40,  41,  42,  43,  44,  45,  46,  47,
391                 48,  49,  50,  51,  52,  53,  54,  55,
392                 56,  57,  58,  59,  60,  61,  62,  63},
393         .oobfree = { }
394 };
395
396 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
397         .eccbytes = 256,
398         .eccpos = {},
399         /* HW ECC handles all ECC data and all spare area is free for OOB */
400         .oobfree = {{0, 160} }
401 };
402
403 #define NDTR0_tCH(c)    (min((c), 7) << 19)
404 #define NDTR0_tCS(c)    (min((c), 7) << 16)
405 #define NDTR0_tWH(c)    (min((c), 7) << 11)
406 #define NDTR0_tWP(c)    (min((c), 7) << 8)
407 #define NDTR0_tRH(c)    (min((c), 7) << 3)
408 #define NDTR0_tRP(c)    (min((c), 7) << 0)
409
410 #define NDTR1_tR(c)     (min((c), 65535) << 16)
411 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
412 #define NDTR1_tAR(c)    (min((c), 15) << 0)
413
414 /* convert nano-seconds to nand flash controller clock cycles */
415 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
416
417 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
418 {
419         /* We only support the Armada 370/XP/38x for now */
420         return PXA3XX_NAND_VARIANT_ARMADA370;
421 }
422
423 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
424                                    const struct pxa3xx_nand_timing *t)
425 {
426         struct pxa3xx_nand_info *info = host->info_data;
427         unsigned long nand_clk = mvebu_get_nand_clock();
428         uint32_t ndtr0, ndtr1;
429
430         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
431                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
432                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
433                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
434                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
435                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
436
437         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
438                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
439                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
440
441         info->ndtr0cs0 = ndtr0;
442         info->ndtr1cs0 = ndtr1;
443         nand_writel(info, NDTR0CS0, ndtr0);
444         nand_writel(info, NDTR1CS0, ndtr1);
445 }
446
447 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
448                                        const struct nand_sdr_timings *t)
449 {
450         struct pxa3xx_nand_info *info = host->info_data;
451         struct nand_chip *chip = &host->chip;
452         unsigned long nand_clk = mvebu_get_nand_clock();
453         uint32_t ndtr0, ndtr1;
454
455         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
456         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
457         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
458         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
459         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
460         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
461         u32 tR = chip->chip_delay * 1000;
462         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
463         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
464
465         /* fallback to a default value if tR = 0 */
466         if (!tR)
467                 tR = 20000;
468
469         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
470                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
471                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
472                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
473                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
474                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
475
476         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
477                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
478                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
479
480         info->ndtr0cs0 = ndtr0;
481         info->ndtr1cs0 = ndtr1;
482         nand_writel(info, NDTR0CS0, ndtr0);
483         nand_writel(info, NDTR1CS0, ndtr1);
484 }
485
486 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
487 {
488         const struct nand_sdr_timings *timings;
489         struct nand_chip *chip = &host->chip;
490         struct pxa3xx_nand_info *info = host->info_data;
491         const struct pxa3xx_nand_flash *f = NULL;
492         struct mtd_info *mtd = nand_to_mtd(&host->chip);
493         int mode, id, ntypes, i;
494
495         mode = onfi_get_async_timing_mode(chip);
496         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
497                 ntypes = ARRAY_SIZE(builtin_flash_types);
498
499                 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
500
501                 id = chip->read_byte(mtd);
502                 id |= chip->read_byte(mtd) << 0x8;
503
504                 for (i = 0; i < ntypes; i++) {
505                         f = &builtin_flash_types[i];
506
507                         if (f->chip_id == id)
508                                 break;
509                 }
510
511                 if (i == ntypes) {
512                         dev_err(&info->pdev->dev, "Error: timings not found\n");
513                         return -EINVAL;
514                 }
515
516                 pxa3xx_nand_set_timing(host, f->timing);
517
518                 if (f->flash_width == 16) {
519                         info->reg_ndcr |= NDCR_DWIDTH_M;
520                         chip->options |= NAND_BUSWIDTH_16;
521                 }
522
523                 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
524         } else {
525                 mode = fls(mode) - 1;
526                 if (mode < 0)
527                         mode = 0;
528
529                 timings = onfi_async_timing_mode_to_sdr_timings(mode);
530                 if (IS_ERR(timings))
531                         return PTR_ERR(timings);
532
533                 pxa3xx_nand_set_sdr_timing(host, timings);
534         }
535
536         return 0;
537 }
538
539 /**
540  * NOTE: it is a must to set ND_RUN first, then write
541  * command buffer, otherwise, it does not work.
542  * We enable all the interrupt at the same time, and
543  * let pxa3xx_nand_irq to handle all logic.
544  */
545 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
546 {
547         uint32_t ndcr;
548
549         ndcr = info->reg_ndcr;
550
551         if (info->use_ecc) {
552                 ndcr |= NDCR_ECC_EN;
553                 if (info->ecc_bch)
554                         nand_writel(info, NDECCCTRL, 0x1);
555         } else {
556                 ndcr &= ~NDCR_ECC_EN;
557                 if (info->ecc_bch)
558                         nand_writel(info, NDECCCTRL, 0x0);
559         }
560
561         ndcr &= ~NDCR_DMA_EN;
562
563         if (info->use_spare)
564                 ndcr |= NDCR_SPARE_EN;
565         else
566                 ndcr &= ~NDCR_SPARE_EN;
567
568         ndcr |= NDCR_ND_RUN;
569
570         /* clear status bits and run */
571         nand_writel(info, NDSR, NDSR_MASK);
572         nand_writel(info, NDCR, 0);
573         nand_writel(info, NDCR, ndcr);
574 }
575
576 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
577 {
578         uint32_t ndcr;
579
580         ndcr = nand_readl(info, NDCR);
581         nand_writel(info, NDCR, ndcr | int_mask);
582 }
583
584 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
585 {
586         if (info->ecc_bch && !info->force_raw) {
587                 u32 ts;
588
589                 /*
590                  * According to the datasheet, when reading from NDDB
591                  * with BCH enabled, after each 32 bytes reads, we
592                  * have to make sure that the NDSR.RDDREQ bit is set.
593                  *
594                  * Drain the FIFO 8 32 bits reads at a time, and skip
595                  * the polling on the last read.
596                  */
597                 while (len > 8) {
598                         readsl(info->mmio_base + NDDB, data, 8);
599
600                         ts = get_timer(0);
601                         while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
602                                 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
603                                         dev_err(&info->pdev->dev,
604                                                 "Timeout on RDDREQ while draining the FIFO\n");
605                                         return;
606                                 }
607                         }
608
609                         data += 32;
610                         len -= 8;
611                 }
612         }
613
614         readsl(info->mmio_base + NDDB, data, len);
615 }
616
617 static void handle_data_pio(struct pxa3xx_nand_info *info)
618 {
619         int data_len = info->step_chunk_size;
620
621         /*
622          * In raw mode, include the spare area and the ECC bytes that are not
623          * consumed by the controller in the data section. Do not reorganize
624          * here, do it in the ->read_page_raw() handler instead.
625          */
626         if (info->force_raw)
627                 data_len += info->step_spare_size + info->ecc_size;
628
629         switch (info->state) {
630         case STATE_PIO_WRITING:
631                 if (info->step_chunk_size)
632                         writesl(info->mmio_base + NDDB,
633                                 info->data_buff + info->data_buff_pos,
634                                 DIV_ROUND_UP(data_len, 4));
635
636                 if (info->step_spare_size)
637                         writesl(info->mmio_base + NDDB,
638                                 info->oob_buff + info->oob_buff_pos,
639                                 DIV_ROUND_UP(info->step_spare_size, 4));
640                 break;
641         case STATE_PIO_READING:
642                 if (info->step_chunk_size)
643                         drain_fifo(info,
644                                    info->data_buff + info->data_buff_pos,
645                                    DIV_ROUND_UP(data_len, 4));
646
647                 if (info->force_raw)
648                         break;
649
650                 if (info->step_spare_size)
651                         drain_fifo(info,
652                                    info->oob_buff + info->oob_buff_pos,
653                                    DIV_ROUND_UP(info->step_spare_size, 4));
654                 break;
655         default:
656                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
657                                 info->state);
658                 BUG();
659         }
660
661         /* Update buffer pointers for multi-page read/write */
662         info->data_buff_pos += data_len;
663         info->oob_buff_pos += info->step_spare_size;
664 }
665
666 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
667 {
668         handle_data_pio(info);
669
670         info->state = STATE_CMD_DONE;
671         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
672 }
673
674 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
675 {
676         unsigned int status, is_completed = 0, is_ready = 0;
677         unsigned int ready, cmd_done;
678         irqreturn_t ret = IRQ_HANDLED;
679
680         if (info->cs == 0) {
681                 ready           = NDSR_FLASH_RDY;
682                 cmd_done        = NDSR_CS0_CMDD;
683         } else {
684                 ready           = NDSR_RDY;
685                 cmd_done        = NDSR_CS1_CMDD;
686         }
687
688         /* TODO - find out why we need the delay during write operation. */
689         ndelay(1);
690
691         status = nand_readl(info, NDSR);
692
693         if (status & NDSR_UNCORERR)
694                 info->retcode = ERR_UNCORERR;
695         if (status & NDSR_CORERR) {
696                 info->retcode = ERR_CORERR;
697                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
698                     info->ecc_bch)
699                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
700                 else
701                         info->ecc_err_cnt = 1;
702
703                 /*
704                  * Each chunk composing a page is corrected independently,
705                  * and we need to store maximum number of corrected bitflips
706                  * to return it to the MTD layer in ecc.read_page().
707                  */
708                 info->max_bitflips = max_t(unsigned int,
709                                            info->max_bitflips,
710                                            info->ecc_err_cnt);
711         }
712         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
713                 info->state = (status & NDSR_RDDREQ) ?
714                         STATE_PIO_READING : STATE_PIO_WRITING;
715                 /* Call the IRQ thread in U-Boot directly */
716                 pxa3xx_nand_irq_thread(info);
717                 return 0;
718         }
719         if (status & cmd_done) {
720                 info->state = STATE_CMD_DONE;
721                 is_completed = 1;
722         }
723         if (status & ready) {
724                 info->state = STATE_READY;
725                 is_ready = 1;
726         }
727
728         /*
729          * Clear all status bit before issuing the next command, which
730          * can and will alter the status bits and will deserve a new
731          * interrupt on its own. This lets the controller exit the IRQ
732          */
733         nand_writel(info, NDSR, status);
734
735         if (status & NDSR_WRCMDREQ) {
736                 status &= ~NDSR_WRCMDREQ;
737                 info->state = STATE_CMD_HANDLE;
738
739                 /*
740                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
741                  * must be loaded by writing directly either 12 or 16
742                  * bytes directly to NDCB0, four bytes at a time.
743                  *
744                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
745                  * but each NDCBx register can be read.
746                  */
747                 nand_writel(info, NDCB0, info->ndcb0);
748                 nand_writel(info, NDCB0, info->ndcb1);
749                 nand_writel(info, NDCB0, info->ndcb2);
750
751                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
752                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
753                         nand_writel(info, NDCB0, info->ndcb3);
754         }
755
756         if (is_completed)
757                 info->cmd_complete = 1;
758         if (is_ready)
759                 info->dev_ready = 1;
760
761         return ret;
762 }
763
764 static inline int is_buf_blank(uint8_t *buf, size_t len)
765 {
766         for (; len > 0; len--)
767                 if (*buf++ != 0xff)
768                         return 0;
769         return 1;
770 }
771
772 static void set_command_address(struct pxa3xx_nand_info *info,
773                 unsigned int page_size, uint16_t column, int page_addr)
774 {
775         /* small page addr setting */
776         if (page_size < info->chunk_size) {
777                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
778                                 | (column & 0xFF);
779
780                 info->ndcb2 = 0;
781         } else {
782                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
783                                 | (column & 0xFFFF);
784
785                 if (page_addr & 0xFF0000)
786                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
787                 else
788                         info->ndcb2 = 0;
789         }
790 }
791
792 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
793 {
794         struct pxa3xx_nand_host *host = info->host[info->cs];
795         struct mtd_info *mtd = nand_to_mtd(&host->chip);
796
797         /* reset data and oob column point to handle data */
798         info->buf_start         = 0;
799         info->buf_count         = 0;
800         info->data_buff_pos     = 0;
801         info->oob_buff_pos      = 0;
802         info->step_chunk_size   = 0;
803         info->step_spare_size   = 0;
804         info->cur_chunk         = 0;
805         info->use_ecc           = 0;
806         info->use_spare         = 1;
807         info->retcode           = ERR_NONE;
808         info->ecc_err_cnt       = 0;
809         info->ndcb3             = 0;
810         info->need_wait         = 0;
811
812         switch (command) {
813         case NAND_CMD_READ0:
814         case NAND_CMD_READOOB:
815         case NAND_CMD_PAGEPROG:
816                 if (!info->force_raw)
817                         info->use_ecc = 1;
818                 break;
819         case NAND_CMD_PARAM:
820                 info->use_spare = 0;
821                 break;
822         default:
823                 info->ndcb1 = 0;
824                 info->ndcb2 = 0;
825                 break;
826         }
827
828         /*
829          * If we are about to issue a read command, or about to set
830          * the write address, then clean the data buffer.
831          */
832         if (command == NAND_CMD_READ0 ||
833             command == NAND_CMD_READOOB ||
834             command == NAND_CMD_SEQIN) {
835                 info->buf_count = mtd->writesize + mtd->oobsize;
836                 memset(info->data_buff, 0xFF, info->buf_count);
837         }
838 }
839
840 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
841                 int ext_cmd_type, uint16_t column, int page_addr)
842 {
843         int addr_cycle, exec_cmd;
844         struct pxa3xx_nand_host *host;
845         struct mtd_info *mtd;
846
847         host = info->host[info->cs];
848         mtd = nand_to_mtd(&host->chip);
849         addr_cycle = 0;
850         exec_cmd = 1;
851
852         if (info->cs != 0)
853                 info->ndcb0 = NDCB0_CSEL;
854         else
855                 info->ndcb0 = 0;
856
857         if (command == NAND_CMD_SEQIN)
858                 exec_cmd = 0;
859
860         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
861                                     + host->col_addr_cycles);
862
863         switch (command) {
864         case NAND_CMD_READOOB:
865         case NAND_CMD_READ0:
866                 info->buf_start = column;
867                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
868                                 | addr_cycle
869                                 | NAND_CMD_READ0;
870
871                 if (command == NAND_CMD_READOOB)
872                         info->buf_start += mtd->writesize;
873
874                 if (info->cur_chunk < info->nfullchunks) {
875                         info->step_chunk_size = info->chunk_size;
876                         info->step_spare_size = info->spare_size;
877                 } else {
878                         info->step_chunk_size = info->last_chunk_size;
879                         info->step_spare_size = info->last_spare_size;
880                 }
881
882                 /*
883                  * Multiple page read needs an 'extended command type' field,
884                  * which is either naked-read or last-read according to the
885                  * state.
886                  */
887                 if (info->force_raw) {
888                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
889                                        NDCB0_LEN_OVRD |
890                                        NDCB0_EXT_CMD_TYPE(ext_cmd_type);
891                         info->ndcb3 = info->step_chunk_size +
892                                       info->step_spare_size + info->ecc_size;
893                 } else if (mtd->writesize == info->chunk_size) {
894                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
895                 } else if (mtd->writesize > info->chunk_size) {
896                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
897                                         | NDCB0_LEN_OVRD
898                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
899                         info->ndcb3 = info->step_chunk_size +
900                                 info->step_spare_size;
901                 }
902
903                 set_command_address(info, mtd->writesize, column, page_addr);
904                 break;
905
906         case NAND_CMD_SEQIN:
907
908                 info->buf_start = column;
909                 set_command_address(info, mtd->writesize, 0, page_addr);
910
911                 /*
912                  * Multiple page programming needs to execute the initial
913                  * SEQIN command that sets the page address.
914                  */
915                 if (mtd->writesize > info->chunk_size) {
916                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
917                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
918                                 | addr_cycle
919                                 | command;
920                         exec_cmd = 1;
921                 }
922                 break;
923
924         case NAND_CMD_PAGEPROG:
925                 if (is_buf_blank(info->data_buff,
926                                  (mtd->writesize + mtd->oobsize))) {
927                         exec_cmd = 0;
928                         break;
929                 }
930
931                 if (info->cur_chunk < info->nfullchunks) {
932                         info->step_chunk_size = info->chunk_size;
933                         info->step_spare_size = info->spare_size;
934                 } else {
935                         info->step_chunk_size = info->last_chunk_size;
936                         info->step_spare_size = info->last_spare_size;
937                 }
938
939                 /* Second command setting for large pages */
940                 if (mtd->writesize > info->chunk_size) {
941                         /*
942                          * Multiple page write uses the 'extended command'
943                          * field. This can be used to issue a command dispatch
944                          * or a naked-write depending on the current stage.
945                          */
946                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
947                                         | NDCB0_LEN_OVRD
948                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
949                         info->ndcb3 = info->step_chunk_size +
950                                       info->step_spare_size;
951
952                         /*
953                          * This is the command dispatch that completes a chunked
954                          * page program operation.
955                          */
956                         if (info->cur_chunk == info->ntotalchunks) {
957                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
958                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
959                                         | command;
960                                 info->ndcb1 = 0;
961                                 info->ndcb2 = 0;
962                                 info->ndcb3 = 0;
963                         }
964                 } else {
965                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
966                                         | NDCB0_AUTO_RS
967                                         | NDCB0_ST_ROW_EN
968                                         | NDCB0_DBC
969                                         | (NAND_CMD_PAGEPROG << 8)
970                                         | NAND_CMD_SEQIN
971                                         | addr_cycle;
972                 }
973                 break;
974
975         case NAND_CMD_PARAM:
976                 info->buf_count = INIT_BUFFER_SIZE;
977                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
978                                 | NDCB0_ADDR_CYC(1)
979                                 | NDCB0_LEN_OVRD
980                                 | command;
981                 info->ndcb1 = (column & 0xFF);
982                 info->ndcb3 = INIT_BUFFER_SIZE;
983                 info->step_chunk_size = INIT_BUFFER_SIZE;
984                 break;
985
986         case NAND_CMD_READID:
987                 info->buf_count = READ_ID_BYTES;
988                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
989                                 | NDCB0_ADDR_CYC(1)
990                                 | command;
991                 info->ndcb1 = (column & 0xFF);
992
993                 info->step_chunk_size = 8;
994                 break;
995         case NAND_CMD_STATUS:
996                 info->buf_count = 1;
997                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
998                                 | NDCB0_ADDR_CYC(1)
999                                 | command;
1000
1001                 info->step_chunk_size = 8;
1002                 break;
1003
1004         case NAND_CMD_ERASE1:
1005                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1006                                 | NDCB0_AUTO_RS
1007                                 | NDCB0_ADDR_CYC(3)
1008                                 | NDCB0_DBC
1009                                 | (NAND_CMD_ERASE2 << 8)
1010                                 | NAND_CMD_ERASE1;
1011                 info->ndcb1 = page_addr;
1012                 info->ndcb2 = 0;
1013
1014                 break;
1015         case NAND_CMD_RESET:
1016                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1017                                 | command;
1018
1019                 break;
1020
1021         case NAND_CMD_ERASE2:
1022                 exec_cmd = 0;
1023                 break;
1024
1025         default:
1026                 exec_cmd = 0;
1027                 dev_err(&info->pdev->dev, "non-supported command %x\n",
1028                         command);
1029                 break;
1030         }
1031
1032         return exec_cmd;
1033 }
1034
1035 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1036                          int column, int page_addr)
1037 {
1038         struct nand_chip *chip = mtd_to_nand(mtd);
1039         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1040         struct pxa3xx_nand_info *info = host->info_data;
1041         int exec_cmd;
1042
1043         /*
1044          * if this is a x16 device ,then convert the input
1045          * "byte" address into a "word" address appropriate
1046          * for indexing a word-oriented device
1047          */
1048         if (info->reg_ndcr & NDCR_DWIDTH_M)
1049                 column /= 2;
1050
1051         /*
1052          * There may be different NAND chip hooked to
1053          * different chip select, so check whether
1054          * chip select has been changed, if yes, reset the timing
1055          */
1056         if (info->cs != host->cs) {
1057                 info->cs = host->cs;
1058                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1059                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1060         }
1061
1062         prepare_start_command(info, command);
1063
1064         info->state = STATE_PREPARED;
1065         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1066
1067         if (exec_cmd) {
1068                 u32 ts;
1069
1070                 info->cmd_complete = 0;
1071                 info->dev_ready = 0;
1072                 info->need_wait = 1;
1073                 pxa3xx_nand_start(info);
1074
1075                 ts = get_timer(0);
1076                 while (1) {
1077                         u32 status;
1078
1079                         status = nand_readl(info, NDSR);
1080                         if (status)
1081                                 pxa3xx_nand_irq(info);
1082
1083                         if (info->cmd_complete)
1084                                 break;
1085
1086                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1087                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1088                                 return;
1089                         }
1090                 }
1091         }
1092         info->state = STATE_IDLE;
1093 }
1094
1095 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1096                                   const unsigned command,
1097                                   int column, int page_addr)
1098 {
1099         struct nand_chip *chip = mtd_to_nand(mtd);
1100         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1101         struct pxa3xx_nand_info *info = host->info_data;
1102         int exec_cmd, ext_cmd_type;
1103
1104         /*
1105          * if this is a x16 device then convert the input
1106          * "byte" address into a "word" address appropriate
1107          * for indexing a word-oriented device
1108          */
1109         if (info->reg_ndcr & NDCR_DWIDTH_M)
1110                 column /= 2;
1111
1112         /*
1113          * There may be different NAND chip hooked to
1114          * different chip select, so check whether
1115          * chip select has been changed, if yes, reset the timing
1116          */
1117         if (info->cs != host->cs) {
1118                 info->cs = host->cs;
1119                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1120                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1121         }
1122
1123         /* Select the extended command for the first command */
1124         switch (command) {
1125         case NAND_CMD_READ0:
1126         case NAND_CMD_READOOB:
1127                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1128                 break;
1129         case NAND_CMD_SEQIN:
1130                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1131                 break;
1132         case NAND_CMD_PAGEPROG:
1133                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1134                 break;
1135         default:
1136                 ext_cmd_type = 0;
1137                 break;
1138         }
1139
1140         prepare_start_command(info, command);
1141
1142         /*
1143          * Prepare the "is ready" completion before starting a command
1144          * transaction sequence. If the command is not executed the
1145          * completion will be completed, see below.
1146          *
1147          * We can do that inside the loop because the command variable
1148          * is invariant and thus so is the exec_cmd.
1149          */
1150         info->need_wait = 1;
1151         info->dev_ready = 0;
1152
1153         do {
1154                 u32 ts;
1155
1156                 info->state = STATE_PREPARED;
1157                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1158                                                column, page_addr);
1159                 if (!exec_cmd) {
1160                         info->need_wait = 0;
1161                         info->dev_ready = 1;
1162                         break;
1163                 }
1164
1165                 info->cmd_complete = 0;
1166                 pxa3xx_nand_start(info);
1167
1168                 ts = get_timer(0);
1169                 while (1) {
1170                         u32 status;
1171
1172                         status = nand_readl(info, NDSR);
1173                         if (status)
1174                                 pxa3xx_nand_irq(info);
1175
1176                         if (info->cmd_complete)
1177                                 break;
1178
1179                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1180                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1181                                 return;
1182                         }
1183                 }
1184
1185                 /* Only a few commands need several steps */
1186                 if (command != NAND_CMD_PAGEPROG &&
1187                     command != NAND_CMD_READ0    &&
1188                     command != NAND_CMD_READOOB)
1189                         break;
1190
1191                 info->cur_chunk++;
1192
1193                 /* Check if the sequence is complete */
1194                 if (info->cur_chunk == info->ntotalchunks &&
1195                     command != NAND_CMD_PAGEPROG)
1196                         break;
1197
1198                 /*
1199                  * After a splitted program command sequence has issued
1200                  * the command dispatch, the command sequence is complete.
1201                  */
1202                 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1203                     command == NAND_CMD_PAGEPROG &&
1204                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1205                         break;
1206
1207                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1208                         /* Last read: issue a 'last naked read' */
1209                         if (info->cur_chunk == info->ntotalchunks - 1)
1210                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1211                         else
1212                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1213
1214                 /*
1215                  * If a splitted program command has no more data to transfer,
1216                  * the command dispatch must be issued to complete.
1217                  */
1218                 } else if (command == NAND_CMD_PAGEPROG &&
1219                            info->cur_chunk == info->ntotalchunks) {
1220                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1221                 }
1222         } while (1);
1223
1224         info->state = STATE_IDLE;
1225 }
1226
1227 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1228                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1229                 int page)
1230 {
1231         chip->write_buf(mtd, buf, mtd->writesize);
1232         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1233
1234         return 0;
1235 }
1236
1237 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1238                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1239                 int page)
1240 {
1241         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1242         struct pxa3xx_nand_info *info = host->info_data;
1243         int bf;
1244
1245         chip->read_buf(mtd, buf, mtd->writesize);
1246         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1247
1248         if (info->retcode == ERR_CORERR && info->use_ecc) {
1249                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1250
1251         } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1252                 /*
1253                  * Empty pages will trigger uncorrectable errors. Re-read the
1254                  * entire page in raw mode and check for bits not being "1".
1255                  * If there are more than the supported strength, then it means
1256                  * this is an actual uncorrectable error.
1257                  */
1258                 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1259                 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1260                                                  chip->oob_poi, mtd->oobsize,
1261                                                  NULL, 0, chip->ecc.strength);
1262                 if (bf < 0) {
1263                         mtd->ecc_stats.failed++;
1264                 } else if (bf) {
1265                         mtd->ecc_stats.corrected += bf;
1266                         info->max_bitflips = max_t(unsigned int,
1267                                                    info->max_bitflips, bf);
1268                         info->retcode = ERR_CORERR;
1269                 } else {
1270                         info->retcode = ERR_NONE;
1271                 }
1272
1273         } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1274                 /* Raw read is not supported with Hamming ECC engine */
1275                 if (is_buf_blank(buf, mtd->writesize))
1276                         info->retcode = ERR_NONE;
1277                 else
1278                         mtd->ecc_stats.failed++;
1279         }
1280
1281         return info->max_bitflips;
1282 }
1283
1284 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1285                                      struct nand_chip *chip, uint8_t *buf,
1286                                      int oob_required, int page)
1287 {
1288         struct pxa3xx_nand_host *host = chip->priv;
1289         struct pxa3xx_nand_info *info = host->info_data;
1290         int chunk, ecc_off_buf;
1291
1292         if (!info->ecc_bch)
1293                 return -ENOTSUPP;
1294
1295         /*
1296          * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1297          * pxa3xx_nand_start(), which will actually disable the ECC engine.
1298          */
1299         info->force_raw = true;
1300         chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1301
1302         ecc_off_buf = (info->nfullchunks * info->spare_size) +
1303                       info->last_spare_size;
1304         for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1305                 chip->read_buf(mtd,
1306                                buf + (chunk * info->chunk_size),
1307                                info->chunk_size);
1308                 chip->read_buf(mtd,
1309                                chip->oob_poi +
1310                                (chunk * (info->spare_size)),
1311                                info->spare_size);
1312                 chip->read_buf(mtd,
1313                                chip->oob_poi + ecc_off_buf +
1314                                (chunk * (info->ecc_size)),
1315                                info->ecc_size - 2);
1316         }
1317
1318         if (info->ntotalchunks > info->nfullchunks) {
1319                 chip->read_buf(mtd,
1320                                buf + (info->nfullchunks * info->chunk_size),
1321                                info->last_chunk_size);
1322                 chip->read_buf(mtd,
1323                                chip->oob_poi +
1324                                (info->nfullchunks * (info->spare_size)),
1325                                info->last_spare_size);
1326                 chip->read_buf(mtd,
1327                                chip->oob_poi + ecc_off_buf +
1328                                (info->nfullchunks * (info->ecc_size)),
1329                                info->ecc_size - 2);
1330         }
1331
1332         info->force_raw = false;
1333
1334         return 0;
1335 }
1336
1337 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1338                                     struct nand_chip *chip, int page)
1339 {
1340         /* Invalidate page cache */
1341         chip->pagebuf = -1;
1342
1343         return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1344                                        page);
1345 }
1346
1347 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1348 {
1349         struct nand_chip *chip = mtd_to_nand(mtd);
1350         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1351         struct pxa3xx_nand_info *info = host->info_data;
1352         char retval = 0xFF;
1353
1354         if (info->buf_start < info->buf_count)
1355                 /* Has just send a new command? */
1356                 retval = info->data_buff[info->buf_start++];
1357
1358         return retval;
1359 }
1360
1361 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1362 {
1363         struct nand_chip *chip = mtd_to_nand(mtd);
1364         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1365         struct pxa3xx_nand_info *info = host->info_data;
1366         u16 retval = 0xFFFF;
1367
1368         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1369                 retval = *((u16 *)(info->data_buff+info->buf_start));
1370                 info->buf_start += 2;
1371         }
1372         return retval;
1373 }
1374
1375 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1376 {
1377         struct nand_chip *chip = mtd_to_nand(mtd);
1378         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1379         struct pxa3xx_nand_info *info = host->info_data;
1380         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1381
1382         memcpy(buf, info->data_buff + info->buf_start, real_len);
1383         info->buf_start += real_len;
1384 }
1385
1386 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1387                 const uint8_t *buf, int len)
1388 {
1389         struct nand_chip *chip = mtd_to_nand(mtd);
1390         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1391         struct pxa3xx_nand_info *info = host->info_data;
1392         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1393
1394         memcpy(info->data_buff + info->buf_start, buf, real_len);
1395         info->buf_start += real_len;
1396 }
1397
1398 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1399 {
1400         return;
1401 }
1402
1403 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1404 {
1405         struct nand_chip *chip = mtd_to_nand(mtd);
1406         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1407         struct pxa3xx_nand_info *info = host->info_data;
1408
1409         if (info->need_wait) {
1410                 u32 ts;
1411
1412                 info->need_wait = 0;
1413
1414                 ts = get_timer(0);
1415                 while (1) {
1416                         u32 status;
1417
1418                         status = nand_readl(info, NDSR);
1419                         if (status)
1420                                 pxa3xx_nand_irq(info);
1421
1422                         if (info->dev_ready)
1423                                 break;
1424
1425                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1426                                 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1427                                 return NAND_STATUS_FAIL;
1428                         }
1429                 }
1430         }
1431
1432         /* pxa3xx_nand_send_command has waited for command complete */
1433         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1434                 if (info->retcode == ERR_NONE)
1435                         return 0;
1436                 else
1437                         return NAND_STATUS_FAIL;
1438         }
1439
1440         return NAND_STATUS_READY;
1441 }
1442
1443 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1444 {
1445         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1446
1447         /* Configure default flash values */
1448         info->reg_ndcr = 0x0; /* enable all interrupts */
1449         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1450         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1451         info->reg_ndcr |= NDCR_SPARE_EN;
1452
1453         return 0;
1454 }
1455
1456 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1457 {
1458         struct pxa3xx_nand_host *host = info->host[info->cs];
1459         struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1460         struct nand_chip *chip = mtd_to_nand(mtd);
1461
1462         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1463         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1464         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1465 }
1466
1467 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1468 {
1469         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1470         uint32_t ndcr = nand_readl(info, NDCR);
1471
1472         /* Set an initial chunk size */
1473         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1474         info->reg_ndcr = ndcr &
1475                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1476         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1477         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1478         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1479 }
1480
1481 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1482 {
1483         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1484         if (info->data_buff == NULL)
1485                 return -ENOMEM;
1486         return 0;
1487 }
1488
1489 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1490 {
1491         struct pxa3xx_nand_info *info = host->info_data;
1492         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1493         struct mtd_info *mtd;
1494         struct nand_chip *chip;
1495         const struct nand_sdr_timings *timings;
1496         int ret;
1497
1498         mtd = nand_to_mtd(&info->host[info->cs]->chip);
1499         chip = mtd_to_nand(mtd);
1500
1501         /* configure default flash values */
1502         info->reg_ndcr = 0x0; /* enable all interrupts */
1503         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1504         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1505         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1506
1507         /* use the common timing to make a try */
1508         timings = onfi_async_timing_mode_to_sdr_timings(0);
1509         if (IS_ERR(timings))
1510                 return PTR_ERR(timings);
1511
1512         pxa3xx_nand_set_sdr_timing(host, timings);
1513
1514         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1515         ret = chip->waitfunc(mtd, chip);
1516         if (ret & NAND_STATUS_FAIL)
1517                 return -ENODEV;
1518
1519         return 0;
1520 }
1521
1522 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1523                         struct nand_ecc_ctrl *ecc,
1524                         int strength, int ecc_stepsize, int page_size)
1525 {
1526         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1527                 info->nfullchunks = 1;
1528                 info->ntotalchunks = 1;
1529                 info->chunk_size = 2048;
1530                 info->spare_size = 40;
1531                 info->ecc_size = 24;
1532                 ecc->mode = NAND_ECC_HW;
1533                 ecc->size = 512;
1534                 ecc->strength = 1;
1535
1536         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1537                 info->nfullchunks = 1;
1538                 info->ntotalchunks = 1;
1539                 info->chunk_size = 512;
1540                 info->spare_size = 8;
1541                 info->ecc_size = 8;
1542                 ecc->mode = NAND_ECC_HW;
1543                 ecc->size = 512;
1544                 ecc->strength = 1;
1545
1546         /*
1547          * Required ECC: 4-bit correction per 512 bytes
1548          * Select: 16-bit correction per 2048 bytes
1549          */
1550         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1551                 info->ecc_bch = 1;
1552                 info->nfullchunks = 1;
1553                 info->ntotalchunks = 1;
1554                 info->chunk_size = 2048;
1555                 info->spare_size = 32;
1556                 info->ecc_size = 32;
1557                 ecc->mode = NAND_ECC_HW;
1558                 ecc->size = info->chunk_size;
1559                 ecc->layout = &ecc_layout_2KB_bch4bit;
1560                 ecc->strength = 16;
1561
1562         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1563                 info->ecc_bch = 1;
1564                 info->nfullchunks = 2;
1565                 info->ntotalchunks = 2;
1566                 info->chunk_size = 2048;
1567                 info->spare_size = 32;
1568                 info->ecc_size = 32;
1569                 ecc->mode = NAND_ECC_HW;
1570                 ecc->size = info->chunk_size;
1571                 ecc->layout = &ecc_layout_4KB_bch4bit;
1572                 ecc->strength = 16;
1573
1574         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1575                 info->ecc_bch = 1;
1576                 info->nfullchunks = 4;
1577                 info->ntotalchunks = 4;
1578                 info->chunk_size = 2048;
1579                 info->spare_size = 32;
1580                 info->ecc_size = 32;
1581                 ecc->mode = NAND_ECC_HW;
1582                 ecc->size = info->chunk_size;
1583                 ecc->layout = &ecc_layout_8KB_bch4bit;
1584                 ecc->strength = 16;
1585
1586         /*
1587          * Required ECC: 8-bit correction per 512 bytes
1588          * Select: 16-bit correction per 1024 bytes
1589          */
1590         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1591                 info->ecc_bch = 1;
1592                 info->nfullchunks = 1;
1593                 info->ntotalchunks = 2;
1594                 info->chunk_size = 1024;
1595                 info->spare_size = 0;
1596                 info->last_chunk_size = 1024;
1597                 info->last_spare_size = 32;
1598                 info->ecc_size = 32;
1599                 ecc->mode = NAND_ECC_HW;
1600                 ecc->size = info->chunk_size;
1601                 ecc->layout = &ecc_layout_2KB_bch8bit;
1602                 ecc->strength = 16;
1603
1604         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1605                 info->ecc_bch = 1;
1606                 info->nfullchunks = 4;
1607                 info->ntotalchunks = 5;
1608                 info->chunk_size = 1024;
1609                 info->spare_size = 0;
1610                 info->last_chunk_size = 0;
1611                 info->last_spare_size = 64;
1612                 info->ecc_size = 32;
1613                 ecc->mode = NAND_ECC_HW;
1614                 ecc->size = info->chunk_size;
1615                 ecc->layout = &ecc_layout_4KB_bch8bit;
1616                 ecc->strength = 16;
1617
1618         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1619                 info->ecc_bch = 1;
1620                 info->nfullchunks = 8;
1621                 info->ntotalchunks = 9;
1622                 info->chunk_size = 1024;
1623                 info->spare_size = 0;
1624                 info->last_chunk_size = 0;
1625                 info->last_spare_size = 160;
1626                 info->ecc_size = 32;
1627                 ecc->mode = NAND_ECC_HW;
1628                 ecc->size = info->chunk_size;
1629                 ecc->layout = &ecc_layout_8KB_bch8bit;
1630                 ecc->strength = 16;
1631
1632         } else {
1633                 dev_err(&info->pdev->dev,
1634                         "ECC strength %d at page size %d is not supported\n",
1635                         strength, page_size);
1636                 return -ENODEV;
1637         }
1638
1639         return 0;
1640 }
1641
1642 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1643 {
1644         struct nand_chip *chip = mtd_to_nand(mtd);
1645         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1646         struct pxa3xx_nand_info *info = host->info_data;
1647         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1648         int ret;
1649         uint16_t ecc_strength, ecc_step;
1650
1651         if (pdata->keep_config) {
1652                 pxa3xx_nand_detect_config(info);
1653         } else {
1654                 ret = pxa3xx_nand_config_ident(info);
1655                 if (ret)
1656                         return ret;
1657                 ret = pxa3xx_nand_sensing(host);
1658                 if (ret) {
1659                         dev_info(&info->pdev->dev,
1660                                  "There is no chip on cs %d!\n",
1661                                  info->cs);
1662                         return ret;
1663                 }
1664         }
1665
1666         /* Device detection must be done with ECC disabled */
1667         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1668                 nand_writel(info, NDECCCTRL, 0x0);
1669
1670         if (nand_scan_ident(mtd, 1, NULL))
1671                 return -ENODEV;
1672
1673         if (!pdata->keep_config) {
1674                 ret = pxa3xx_nand_init_timings(host);
1675                 if (ret) {
1676                         dev_err(&info->pdev->dev,
1677                                 "Failed to set timings: %d\n", ret);
1678                         return ret;
1679                 }
1680         }
1681
1682 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1683         /*
1684          * We'll use a bad block table stored in-flash and don't
1685          * allow writing the bad block marker to the flash.
1686          */
1687         chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1688         chip->bbt_td = &bbt_main_descr;
1689         chip->bbt_md = &bbt_mirror_descr;
1690 #endif
1691
1692         if (pdata->ecc_strength && pdata->ecc_step_size) {
1693                 ecc_strength = pdata->ecc_strength;
1694                 ecc_step = pdata->ecc_step_size;
1695         } else {
1696                 ecc_strength = chip->ecc_strength_ds;
1697                 ecc_step = chip->ecc_step_ds;
1698         }
1699
1700         /* Set default ECC strength requirements on non-ONFI devices */
1701         if (ecc_strength < 1 && ecc_step < 1) {
1702                 ecc_strength = 1;
1703                 ecc_step = 512;
1704         }
1705
1706         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1707                            ecc_step, mtd->writesize);
1708         if (ret)
1709                 return ret;
1710
1711         /*
1712          * If the page size is bigger than the FIFO size, let's check
1713          * we are given the right variant and then switch to the extended
1714          * (aka split) command handling,
1715          */
1716         if (mtd->writesize > info->chunk_size) {
1717                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1718                         chip->cmdfunc = nand_cmdfunc_extended;
1719                 } else {
1720                         dev_err(&info->pdev->dev,
1721                                 "unsupported page size on this variant\n");
1722                         return -ENODEV;
1723                 }
1724         }
1725
1726         /* calculate addressing information */
1727         if (mtd->writesize >= 2048)
1728                 host->col_addr_cycles = 2;
1729         else
1730                 host->col_addr_cycles = 1;
1731
1732         /* release the initial buffer */
1733         kfree(info->data_buff);
1734
1735         /* allocate the real data + oob buffer */
1736         info->buf_size = mtd->writesize + mtd->oobsize;
1737         ret = pxa3xx_nand_init_buff(info);
1738         if (ret)
1739                 return ret;
1740         info->oob_buff = info->data_buff + mtd->writesize;
1741
1742         if ((mtd->size >> chip->page_shift) > 65536)
1743                 host->row_addr_cycles = 3;
1744         else
1745                 host->row_addr_cycles = 2;
1746
1747         if (!pdata->keep_config)
1748                 pxa3xx_nand_config_tail(info);
1749
1750         return nand_scan_tail(mtd);
1751 }
1752
1753 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1754 {
1755         struct pxa3xx_nand_platform_data *pdata;
1756         struct pxa3xx_nand_host *host;
1757         struct nand_chip *chip = NULL;
1758         struct mtd_info *mtd;
1759         int ret, cs;
1760
1761         pdata = info->pdata;
1762         if (pdata->num_cs <= 0)
1763                 return -ENODEV;
1764
1765         info->variant = pxa3xx_nand_get_variant();
1766         for (cs = 0; cs < pdata->num_cs; cs++) {
1767                 chip = (struct nand_chip *)
1768                         ((u8 *)&info[1] + sizeof(*host) * cs);
1769                 mtd = nand_to_mtd(chip);
1770                 host = (struct pxa3xx_nand_host *)chip;
1771                 info->host[cs] = host;
1772                 host->cs = cs;
1773                 host->info_data = info;
1774                 mtd->owner = THIS_MODULE;
1775
1776                 nand_set_controller_data(chip, host);
1777                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1778                 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1779                 chip->ecc.read_oob_raw  = pxa3xx_nand_read_oob_raw;
1780                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1781                 chip->controller        = &info->controller;
1782                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1783                 chip->select_chip       = pxa3xx_nand_select_chip;
1784                 chip->read_word         = pxa3xx_nand_read_word;
1785                 chip->read_byte         = pxa3xx_nand_read_byte;
1786                 chip->read_buf          = pxa3xx_nand_read_buf;
1787                 chip->write_buf         = pxa3xx_nand_write_buf;
1788                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1789                 chip->cmdfunc           = nand_cmdfunc;
1790         }
1791
1792         /* Allocate a buffer to allow flash detection */
1793         info->buf_size = INIT_BUFFER_SIZE;
1794         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1795         if (info->data_buff == NULL) {
1796                 ret = -ENOMEM;
1797                 goto fail_disable_clk;
1798         }
1799
1800         /* initialize all interrupts to be disabled */
1801         disable_int(info, NDSR_MASK);
1802
1803         return 0;
1804
1805         kfree(info->data_buff);
1806 fail_disable_clk:
1807         return ret;
1808 }
1809
1810 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1811 {
1812         struct pxa3xx_nand_platform_data *pdata;
1813         const void *blob = gd->fdt_blob;
1814         int node = -1;
1815
1816         pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1817         if (!pdata)
1818                 return -ENOMEM;
1819
1820         /* Get address decoding nodes from the FDT blob */
1821         do {
1822                 node = fdt_node_offset_by_compatible(blob, node,
1823                                                      "marvell,mvebu-pxa3xx-nand");
1824                 if (node < 0)
1825                         break;
1826
1827                 /* Bypass disabeld nodes */
1828                 if (!fdtdec_get_is_enabled(blob, node))
1829                         continue;
1830
1831                 /* Get the first enabled NAND controler base address */
1832                 info->mmio_base =
1833                         (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1834                                         blob, node, "reg", 0, NULL, true);
1835
1836                 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1837                 if (pdata->num_cs != 1) {
1838                         pr_err("pxa3xx driver supports single CS only\n");
1839                         break;
1840                 }
1841
1842                 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1843                         pdata->enable_arbiter = 1;
1844
1845                 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1846                         pdata->keep_config = 1;
1847
1848                 /*
1849                  * ECC parameters.
1850                  * If these are not set, they will be selected according
1851                  * to the detected flash type.
1852                  */
1853                 /* ECC strength */
1854                 pdata->ecc_strength = fdtdec_get_int(blob, node,
1855                                                      "nand-ecc-strength", 0);
1856
1857                 /* ECC step size */
1858                 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1859                                                       "nand-ecc-step-size", 0);
1860
1861                 info->pdata = pdata;
1862
1863                 /* Currently support only a single NAND controller */
1864                 return 0;
1865
1866         } while (node >= 0);
1867
1868         return -EINVAL;
1869 }
1870
1871 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1872 {
1873         struct pxa3xx_nand_platform_data *pdata;
1874         int ret, cs, probe_success;
1875
1876         ret = pxa3xx_nand_probe_dt(info);
1877         if (ret)
1878                 return ret;
1879
1880         pdata = info->pdata;
1881
1882         ret = alloc_nand_resource(info);
1883         if (ret) {
1884                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1885                 return ret;
1886         }
1887
1888         probe_success = 0;
1889         for (cs = 0; cs < pdata->num_cs; cs++) {
1890                 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1891
1892                 /*
1893                  * The mtd name matches the one used in 'mtdparts' kernel
1894                  * parameter. This name cannot be changed or otherwise
1895                  * user's mtd partitions configuration would get broken.
1896                  */
1897                 mtd->name = "pxa3xx_nand-0";
1898                 info->cs = cs;
1899                 ret = pxa3xx_nand_scan(mtd);
1900                 if (ret) {
1901                         dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1902                                  cs);
1903                         continue;
1904                 }
1905
1906                 if (nand_register(cs, mtd))
1907                         continue;
1908
1909                 probe_success = 1;
1910         }
1911
1912         if (!probe_success)
1913                 return -ENODEV;
1914
1915         return 0;
1916 }
1917
1918 /*
1919  * Main initialization routine
1920  */
1921 void board_nand_init(void)
1922 {
1923         struct pxa3xx_nand_info *info;
1924         struct pxa3xx_nand_host *host;
1925         int ret;
1926
1927         info = kzalloc(sizeof(*info) +
1928                        sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1929                        GFP_KERNEL);
1930         if (!info)
1931                 return;
1932
1933         ret = pxa3xx_nand_probe(info);
1934         if (ret)
1935                 return;
1936 }