dm: core: Require users of devres to include the header
[oweals/u-boot.git] / drivers / mtd / nand / raw / pxa3xx_nand.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/mtd/nand/raw/pxa3xx_nand.c
4  *
5  * Copyright © 2005 Intel Corporation
6  * Copyright © 2006 Marvell International Ltd.
7  */
8
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <dm/devres.h>
14 #include <linux/err.h>
15 #include <linux/errno.h>
16 #include <asm/io.h>
17 #include <asm/arch/cpu.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/rawnand.h>
20 #include <linux/types.h>
21
22 #include "pxa3xx_nand.h"
23
24 DECLARE_GLOBAL_DATA_PTR;
25
26 #define TIMEOUT_DRAIN_FIFO      5       /* in ms */
27 #define CHIP_DELAY_TIMEOUT      200
28 #define NAND_STOP_DELAY         40
29
30 /*
31  * Define a buffer size for the initial command that detects the flash device:
32  * STATUS, READID and PARAM.
33  * ONFI param page is 256 bytes, and there are three redundant copies
34  * to be read. JEDEC param page is 512 bytes, and there are also three
35  * redundant copies to be read.
36  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
37  */
38 #define INIT_BUFFER_SIZE        2048
39
40 /* registers and bit definitions */
41 #define NDCR            (0x00) /* Control register */
42 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
43 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
44 #define NDSR            (0x14) /* Status Register */
45 #define NDPCR           (0x18) /* Page Count Register */
46 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
47 #define NDBDR1          (0x20) /* Bad Block Register 1 */
48 #define NDECCCTRL       (0x28) /* ECC control */
49 #define NDDB            (0x40) /* Data Buffer */
50 #define NDCB0           (0x48) /* Command Buffer0 */
51 #define NDCB1           (0x4C) /* Command Buffer1 */
52 #define NDCB2           (0x50) /* Command Buffer2 */
53
54 #define NDCR_SPARE_EN           (0x1 << 31)
55 #define NDCR_ECC_EN             (0x1 << 30)
56 #define NDCR_DMA_EN             (0x1 << 29)
57 #define NDCR_ND_RUN             (0x1 << 28)
58 #define NDCR_DWIDTH_C           (0x1 << 27)
59 #define NDCR_DWIDTH_M           (0x1 << 26)
60 #define NDCR_PAGE_SZ            (0x1 << 24)
61 #define NDCR_NCSX               (0x1 << 23)
62 #define NDCR_ND_MODE            (0x3 << 21)
63 #define NDCR_NAND_MODE          (0x0)
64 #define NDCR_CLR_PG_CNT         (0x1 << 20)
65 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
66 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
67 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
68
69 #define NDCR_RA_START           (0x1 << 15)
70 #define NDCR_PG_PER_BLK         (0x1 << 14)
71 #define NDCR_ND_ARB_EN          (0x1 << 12)
72 #define NDCR_INT_MASK           (0xFFF)
73
74 #define NDSR_MASK               (0xfff)
75 #define NDSR_ERR_CNT_OFF        (16)
76 #define NDSR_ERR_CNT_MASK       (0x1f)
77 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
78 #define NDSR_RDY                (0x1 << 12)
79 #define NDSR_FLASH_RDY          (0x1 << 11)
80 #define NDSR_CS0_PAGED          (0x1 << 10)
81 #define NDSR_CS1_PAGED          (0x1 << 9)
82 #define NDSR_CS0_CMDD           (0x1 << 8)
83 #define NDSR_CS1_CMDD           (0x1 << 7)
84 #define NDSR_CS0_BBD            (0x1 << 6)
85 #define NDSR_CS1_BBD            (0x1 << 5)
86 #define NDSR_UNCORERR           (0x1 << 4)
87 #define NDSR_CORERR             (0x1 << 3)
88 #define NDSR_WRDREQ             (0x1 << 2)
89 #define NDSR_RDDREQ             (0x1 << 1)
90 #define NDSR_WRCMDREQ           (0x1)
91
92 #define NDCB0_LEN_OVRD          (0x1 << 28)
93 #define NDCB0_ST_ROW_EN         (0x1 << 26)
94 #define NDCB0_AUTO_RS           (0x1 << 25)
95 #define NDCB0_CSEL              (0x1 << 24)
96 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
97 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
98 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
99 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
100 #define NDCB0_NC                (0x1 << 20)
101 #define NDCB0_DBC               (0x1 << 19)
102 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
103 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
104 #define NDCB0_CMD2_MASK         (0xff << 8)
105 #define NDCB0_CMD1_MASK         (0xff)
106 #define NDCB0_ADDR_CYC_SHIFT    (16)
107
108 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
109 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
110 #define EXT_CMD_TYPE_READ       4 /* Read */
111 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
112 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
113 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
114 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
115
116 /*
117  * This should be large enough to read 'ONFI' and 'JEDEC'.
118  * Let's use 7 bytes, which is the maximum ID count supported
119  * by the controller (see NDCR_RD_ID_CNT_MASK).
120  */
121 #define READ_ID_BYTES           7
122
123 /* macros for registers read/write */
124 #define nand_writel(info, off, val)     \
125         writel((val), (info)->mmio_base + (off))
126
127 #define nand_readl(info, off)           \
128         readl((info)->mmio_base + (off))
129
130 /* error code and state */
131 enum {
132         ERR_NONE        = 0,
133         ERR_DMABUSERR   = -1,
134         ERR_SENDCMD     = -2,
135         ERR_UNCORERR    = -3,
136         ERR_BBERR       = -4,
137         ERR_CORERR      = -5,
138 };
139
140 enum {
141         STATE_IDLE = 0,
142         STATE_PREPARED,
143         STATE_CMD_HANDLE,
144         STATE_DMA_READING,
145         STATE_DMA_WRITING,
146         STATE_DMA_DONE,
147         STATE_PIO_READING,
148         STATE_PIO_WRITING,
149         STATE_CMD_DONE,
150         STATE_READY,
151 };
152
153 enum pxa3xx_nand_variant {
154         PXA3XX_NAND_VARIANT_PXA,
155         PXA3XX_NAND_VARIANT_ARMADA370,
156 };
157
158 struct pxa3xx_nand_host {
159         struct nand_chip        chip;
160         void                    *info_data;
161
162         /* page size of attached chip */
163         int                     use_ecc;
164         int                     cs;
165
166         /* calculated from pxa3xx_nand_flash data */
167         unsigned int            col_addr_cycles;
168         unsigned int            row_addr_cycles;
169 };
170
171 struct pxa3xx_nand_info {
172         struct nand_hw_control  controller;
173         struct pxa3xx_nand_platform_data *pdata;
174
175         struct clk              *clk;
176         void __iomem            *mmio_base;
177         unsigned long           mmio_phys;
178         int                     cmd_complete, dev_ready;
179
180         unsigned int            buf_start;
181         unsigned int            buf_count;
182         unsigned int            buf_size;
183         unsigned int            data_buff_pos;
184         unsigned int            oob_buff_pos;
185
186         unsigned char           *data_buff;
187         unsigned char           *oob_buff;
188
189         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
190         unsigned int            state;
191
192         /*
193          * This driver supports NFCv1 (as found in PXA SoC)
194          * and NFCv2 (as found in Armada 370/XP SoC).
195          */
196         enum pxa3xx_nand_variant variant;
197
198         int                     cs;
199         int                     use_ecc;        /* use HW ECC ? */
200         int                     force_raw;      /* prevent use_ecc to be set */
201         int                     ecc_bch;        /* using BCH ECC? */
202         int                     use_spare;      /* use spare ? */
203         int                     need_wait;
204
205         /* Amount of real data per full chunk */
206         unsigned int            chunk_size;
207
208         /* Amount of spare data per full chunk */
209         unsigned int            spare_size;
210
211         /* Number of full chunks (i.e chunk_size + spare_size) */
212         unsigned int            nfullchunks;
213
214         /*
215          * Total number of chunks. If equal to nfullchunks, then there
216          * are only full chunks. Otherwise, there is one last chunk of
217          * size (last_chunk_size + last_spare_size)
218          */
219         unsigned int            ntotalchunks;
220
221         /* Amount of real data in the last chunk */
222         unsigned int            last_chunk_size;
223
224         /* Amount of spare data in the last chunk */
225         unsigned int            last_spare_size;
226
227         unsigned int            ecc_size;
228         unsigned int            ecc_err_cnt;
229         unsigned int            max_bitflips;
230         int                     retcode;
231
232         /*
233          * Variables only valid during command
234          * execution. step_chunk_size and step_spare_size is the
235          * amount of real data and spare data in the current
236          * chunk. cur_chunk is the current chunk being
237          * read/programmed.
238          */
239         unsigned int            step_chunk_size;
240         unsigned int            step_spare_size;
241         unsigned int            cur_chunk;
242
243         /* cached register value */
244         uint32_t                reg_ndcr;
245         uint32_t                ndtr0cs0;
246         uint32_t                ndtr1cs0;
247
248         /* generated NDCBx register values */
249         uint32_t                ndcb0;
250         uint32_t                ndcb1;
251         uint32_t                ndcb2;
252         uint32_t                ndcb3;
253 };
254
255 static struct pxa3xx_nand_timing timing[] = {
256         /*
257          * tCH  Enable signal hold time
258          * tCS  Enable signal setup time
259          * tWH  ND_nWE high duration
260          * tWP  ND_nWE pulse time
261          * tRH  ND_nRE high duration
262          * tRP  ND_nRE pulse width
263          * tR   ND_nWE high to ND_nRE low for read
264          * tWHR ND_nWE high to ND_nRE low for status read
265          * tAR  ND_ALE low to ND_nRE low delay
266          */
267         /*ch  cs  wh  wp   rh  rp   r      whr  ar */
268         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
269         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
270         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
271         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
272         {  5, 20, 10,  12, 10,  12, 25000,  60, 10, },
273 };
274
275 static struct pxa3xx_nand_flash builtin_flash_types[] = {
276         /*
277          * chip_id
278          * flash_width  Width of Flash memory (DWIDTH_M)
279          * dfc_width    Width of flash controller(DWIDTH_C)
280          * *timing
281          * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
282          */
283         { 0x46ec, 16, 16, &timing[1] },
284         { 0xdaec,  8,  8, &timing[1] },
285         { 0xd7ec,  8,  8, &timing[1] },
286         { 0xa12c,  8,  8, &timing[2] },
287         { 0xb12c, 16, 16, &timing[2] },
288         { 0xdc2c,  8,  8, &timing[2] },
289         { 0xcc2c, 16, 16, &timing[2] },
290         { 0xba20, 16, 16, &timing[3] },
291         { 0xda98,  8,  8, &timing[4] },
292 };
293
294 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
295 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
296 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
297
298 static struct nand_bbt_descr bbt_main_descr = {
299         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
300                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
301         .offs = 8,
302         .len = 6,
303         .veroffs = 14,
304         .maxblocks = 8,         /* Last 8 blocks in each chip */
305         .pattern = bbt_pattern
306 };
307
308 static struct nand_bbt_descr bbt_mirror_descr = {
309         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
310                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
311         .offs = 8,
312         .len = 6,
313         .veroffs = 14,
314         .maxblocks = 8,         /* Last 8 blocks in each chip */
315         .pattern = bbt_mirror_pattern
316 };
317 #endif
318
319 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
320         .eccbytes = 32,
321         .eccpos = {
322                 32, 33, 34, 35, 36, 37, 38, 39,
323                 40, 41, 42, 43, 44, 45, 46, 47,
324                 48, 49, 50, 51, 52, 53, 54, 55,
325                 56, 57, 58, 59, 60, 61, 62, 63},
326         .oobfree = { {2, 30} }
327 };
328
329 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
330         .eccbytes = 64,
331         .eccpos = {
332                 32, 33, 34, 35, 36, 37, 38, 39,
333                 40, 41, 42, 43, 44, 45, 46, 47,
334                 48, 49, 50, 51, 52, 53, 54, 55,
335                 56, 57, 58, 59, 60, 61, 62, 63,
336                 64, 65, 66, 67, 68, 69, 70, 71,
337                 72, 73, 74, 75, 76, 77, 78, 79,
338                 80, 81, 82, 83, 84, 85, 86, 87,
339                 88, 89, 90, 91, 92, 93, 94, 95},
340         .oobfree = { {1, 4}, {6, 26} }
341 };
342
343 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
344         .eccbytes = 64,
345         .eccpos = {
346                 32,  33,  34,  35,  36,  37,  38,  39,
347                 40,  41,  42,  43,  44,  45,  46,  47,
348                 48,  49,  50,  51,  52,  53,  54,  55,
349                 56,  57,  58,  59,  60,  61,  62,  63,
350                 96,  97,  98,  99,  100, 101, 102, 103,
351                 104, 105, 106, 107, 108, 109, 110, 111,
352                 112, 113, 114, 115, 116, 117, 118, 119,
353                 120, 121, 122, 123, 124, 125, 126, 127},
354         /* Bootrom looks in bytes 0 & 5 for bad blocks */
355         .oobfree = { {6, 26}, { 64, 32} }
356 };
357
358 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
359         .eccbytes = 128,
360         .eccpos = {
361                 32,  33,  34,  35,  36,  37,  38,  39,
362                 40,  41,  42,  43,  44,  45,  46,  47,
363                 48,  49,  50,  51,  52,  53,  54,  55,
364                 56,  57,  58,  59,  60,  61,  62,  63,
365
366                 96,  97,  98,  99,  100, 101, 102, 103,
367                 104, 105, 106, 107, 108, 109, 110, 111,
368                 112, 113, 114, 115, 116, 117, 118, 119,
369                 120, 121, 122, 123, 124, 125, 126, 127,
370
371                 160, 161, 162, 163, 164, 165, 166, 167,
372                 168, 169, 170, 171, 172, 173, 174, 175,
373                 176, 177, 178, 179, 180, 181, 182, 183,
374                 184, 185, 186, 187, 188, 189, 190, 191,
375
376                 224, 225, 226, 227, 228, 229, 230, 231,
377                 232, 233, 234, 235, 236, 237, 238, 239,
378                 240, 241, 242, 243, 244, 245, 246, 247,
379                 248, 249, 250, 251, 252, 253, 254, 255},
380
381         /* Bootrom looks in bytes 0 & 5 for bad blocks */
382         .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
383 };
384
385 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
386         .eccbytes = 128,
387         .eccpos = {
388                 32,  33,  34,  35,  36,  37,  38,  39,
389                 40,  41,  42,  43,  44,  45,  46,  47,
390                 48,  49,  50,  51,  52,  53,  54,  55,
391                 56,  57,  58,  59,  60,  61,  62,  63},
392         .oobfree = { }
393 };
394
395 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
396         .eccbytes = 256,
397         .eccpos = {},
398         /* HW ECC handles all ECC data and all spare area is free for OOB */
399         .oobfree = {{0, 160} }
400 };
401
402 #define NDTR0_tCH(c)    (min((c), 7) << 19)
403 #define NDTR0_tCS(c)    (min((c), 7) << 16)
404 #define NDTR0_tWH(c)    (min((c), 7) << 11)
405 #define NDTR0_tWP(c)    (min((c), 7) << 8)
406 #define NDTR0_tRH(c)    (min((c), 7) << 3)
407 #define NDTR0_tRP(c)    (min((c), 7) << 0)
408
409 #define NDTR1_tR(c)     (min((c), 65535) << 16)
410 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
411 #define NDTR1_tAR(c)    (min((c), 15) << 0)
412
413 /* convert nano-seconds to nand flash controller clock cycles */
414 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
415
416 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
417 {
418         /* We only support the Armada 370/XP/38x for now */
419         return PXA3XX_NAND_VARIANT_ARMADA370;
420 }
421
422 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
423                                    const struct pxa3xx_nand_timing *t)
424 {
425         struct pxa3xx_nand_info *info = host->info_data;
426         unsigned long nand_clk = mvebu_get_nand_clock();
427         uint32_t ndtr0, ndtr1;
428
429         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
430                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
431                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
432                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
433                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
434                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
435
436         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
437                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
438                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
439
440         info->ndtr0cs0 = ndtr0;
441         info->ndtr1cs0 = ndtr1;
442         nand_writel(info, NDTR0CS0, ndtr0);
443         nand_writel(info, NDTR1CS0, ndtr1);
444 }
445
446 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
447                                        const struct nand_sdr_timings *t)
448 {
449         struct pxa3xx_nand_info *info = host->info_data;
450         struct nand_chip *chip = &host->chip;
451         unsigned long nand_clk = mvebu_get_nand_clock();
452         uint32_t ndtr0, ndtr1;
453
454         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
455         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
456         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
457         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
458         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
459         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
460         u32 tR = chip->chip_delay * 1000;
461         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
462         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
463
464         /* fallback to a default value if tR = 0 */
465         if (!tR)
466                 tR = 20000;
467
468         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
469                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
470                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
471                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
472                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
473                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
474
475         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
476                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
477                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
478
479         info->ndtr0cs0 = ndtr0;
480         info->ndtr1cs0 = ndtr1;
481         nand_writel(info, NDTR0CS0, ndtr0);
482         nand_writel(info, NDTR1CS0, ndtr1);
483 }
484
485 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
486 {
487         const struct nand_sdr_timings *timings;
488         struct nand_chip *chip = &host->chip;
489         struct pxa3xx_nand_info *info = host->info_data;
490         const struct pxa3xx_nand_flash *f = NULL;
491         struct mtd_info *mtd = nand_to_mtd(&host->chip);
492         int mode, id, ntypes, i;
493
494         mode = onfi_get_async_timing_mode(chip);
495         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
496                 ntypes = ARRAY_SIZE(builtin_flash_types);
497
498                 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
499
500                 id = chip->read_byte(mtd);
501                 id |= chip->read_byte(mtd) << 0x8;
502
503                 for (i = 0; i < ntypes; i++) {
504                         f = &builtin_flash_types[i];
505
506                         if (f->chip_id == id)
507                                 break;
508                 }
509
510                 if (i == ntypes) {
511                         dev_err(&info->pdev->dev, "Error: timings not found\n");
512                         return -EINVAL;
513                 }
514
515                 pxa3xx_nand_set_timing(host, f->timing);
516
517                 if (f->flash_width == 16) {
518                         info->reg_ndcr |= NDCR_DWIDTH_M;
519                         chip->options |= NAND_BUSWIDTH_16;
520                 }
521
522                 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
523         } else {
524                 mode = fls(mode) - 1;
525                 if (mode < 0)
526                         mode = 0;
527
528                 timings = onfi_async_timing_mode_to_sdr_timings(mode);
529                 if (IS_ERR(timings))
530                         return PTR_ERR(timings);
531
532                 pxa3xx_nand_set_sdr_timing(host, timings);
533         }
534
535         return 0;
536 }
537
538 /**
539  * NOTE: it is a must to set ND_RUN first, then write
540  * command buffer, otherwise, it does not work.
541  * We enable all the interrupt at the same time, and
542  * let pxa3xx_nand_irq to handle all logic.
543  */
544 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
545 {
546         uint32_t ndcr;
547
548         ndcr = info->reg_ndcr;
549
550         if (info->use_ecc) {
551                 ndcr |= NDCR_ECC_EN;
552                 if (info->ecc_bch)
553                         nand_writel(info, NDECCCTRL, 0x1);
554         } else {
555                 ndcr &= ~NDCR_ECC_EN;
556                 if (info->ecc_bch)
557                         nand_writel(info, NDECCCTRL, 0x0);
558         }
559
560         ndcr &= ~NDCR_DMA_EN;
561
562         if (info->use_spare)
563                 ndcr |= NDCR_SPARE_EN;
564         else
565                 ndcr &= ~NDCR_SPARE_EN;
566
567         ndcr |= NDCR_ND_RUN;
568
569         /* clear status bits and run */
570         nand_writel(info, NDSR, NDSR_MASK);
571         nand_writel(info, NDCR, 0);
572         nand_writel(info, NDCR, ndcr);
573 }
574
575 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
576 {
577         uint32_t ndcr;
578
579         ndcr = nand_readl(info, NDCR);
580         nand_writel(info, NDCR, ndcr | int_mask);
581 }
582
583 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
584 {
585         if (info->ecc_bch && !info->force_raw) {
586                 u32 ts;
587
588                 /*
589                  * According to the datasheet, when reading from NDDB
590                  * with BCH enabled, after each 32 bytes reads, we
591                  * have to make sure that the NDSR.RDDREQ bit is set.
592                  *
593                  * Drain the FIFO 8 32 bits reads at a time, and skip
594                  * the polling on the last read.
595                  */
596                 while (len > 8) {
597                         readsl(info->mmio_base + NDDB, data, 8);
598
599                         ts = get_timer(0);
600                         while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
601                                 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
602                                         dev_err(&info->pdev->dev,
603                                                 "Timeout on RDDREQ while draining the FIFO\n");
604                                         return;
605                                 }
606                         }
607
608                         data += 32;
609                         len -= 8;
610                 }
611         }
612
613         readsl(info->mmio_base + NDDB, data, len);
614 }
615
616 static void handle_data_pio(struct pxa3xx_nand_info *info)
617 {
618         int data_len = info->step_chunk_size;
619
620         /*
621          * In raw mode, include the spare area and the ECC bytes that are not
622          * consumed by the controller in the data section. Do not reorganize
623          * here, do it in the ->read_page_raw() handler instead.
624          */
625         if (info->force_raw)
626                 data_len += info->step_spare_size + info->ecc_size;
627
628         switch (info->state) {
629         case STATE_PIO_WRITING:
630                 if (info->step_chunk_size)
631                         writesl(info->mmio_base + NDDB,
632                                 info->data_buff + info->data_buff_pos,
633                                 DIV_ROUND_UP(data_len, 4));
634
635                 if (info->step_spare_size)
636                         writesl(info->mmio_base + NDDB,
637                                 info->oob_buff + info->oob_buff_pos,
638                                 DIV_ROUND_UP(info->step_spare_size, 4));
639                 break;
640         case STATE_PIO_READING:
641                 if (info->step_chunk_size)
642                         drain_fifo(info,
643                                    info->data_buff + info->data_buff_pos,
644                                    DIV_ROUND_UP(data_len, 4));
645
646                 if (info->force_raw)
647                         break;
648
649                 if (info->step_spare_size)
650                         drain_fifo(info,
651                                    info->oob_buff + info->oob_buff_pos,
652                                    DIV_ROUND_UP(info->step_spare_size, 4));
653                 break;
654         default:
655                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
656                                 info->state);
657                 BUG();
658         }
659
660         /* Update buffer pointers for multi-page read/write */
661         info->data_buff_pos += data_len;
662         info->oob_buff_pos += info->step_spare_size;
663 }
664
665 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
666 {
667         handle_data_pio(info);
668
669         info->state = STATE_CMD_DONE;
670         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
671 }
672
673 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
674 {
675         unsigned int status, is_completed = 0, is_ready = 0;
676         unsigned int ready, cmd_done;
677         irqreturn_t ret = IRQ_HANDLED;
678
679         if (info->cs == 0) {
680                 ready           = NDSR_FLASH_RDY;
681                 cmd_done        = NDSR_CS0_CMDD;
682         } else {
683                 ready           = NDSR_RDY;
684                 cmd_done        = NDSR_CS1_CMDD;
685         }
686
687         /* TODO - find out why we need the delay during write operation. */
688         ndelay(1);
689
690         status = nand_readl(info, NDSR);
691
692         if (status & NDSR_UNCORERR)
693                 info->retcode = ERR_UNCORERR;
694         if (status & NDSR_CORERR) {
695                 info->retcode = ERR_CORERR;
696                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
697                     info->ecc_bch)
698                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
699                 else
700                         info->ecc_err_cnt = 1;
701
702                 /*
703                  * Each chunk composing a page is corrected independently,
704                  * and we need to store maximum number of corrected bitflips
705                  * to return it to the MTD layer in ecc.read_page().
706                  */
707                 info->max_bitflips = max_t(unsigned int,
708                                            info->max_bitflips,
709                                            info->ecc_err_cnt);
710         }
711         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
712                 info->state = (status & NDSR_RDDREQ) ?
713                         STATE_PIO_READING : STATE_PIO_WRITING;
714                 /* Call the IRQ thread in U-Boot directly */
715                 pxa3xx_nand_irq_thread(info);
716                 return 0;
717         }
718         if (status & cmd_done) {
719                 info->state = STATE_CMD_DONE;
720                 is_completed = 1;
721         }
722         if (status & ready) {
723                 info->state = STATE_READY;
724                 is_ready = 1;
725         }
726
727         /*
728          * Clear all status bit before issuing the next command, which
729          * can and will alter the status bits and will deserve a new
730          * interrupt on its own. This lets the controller exit the IRQ
731          */
732         nand_writel(info, NDSR, status);
733
734         if (status & NDSR_WRCMDREQ) {
735                 status &= ~NDSR_WRCMDREQ;
736                 info->state = STATE_CMD_HANDLE;
737
738                 /*
739                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
740                  * must be loaded by writing directly either 12 or 16
741                  * bytes directly to NDCB0, four bytes at a time.
742                  *
743                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
744                  * but each NDCBx register can be read.
745                  */
746                 nand_writel(info, NDCB0, info->ndcb0);
747                 nand_writel(info, NDCB0, info->ndcb1);
748                 nand_writel(info, NDCB0, info->ndcb2);
749
750                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
751                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
752                         nand_writel(info, NDCB0, info->ndcb3);
753         }
754
755         if (is_completed)
756                 info->cmd_complete = 1;
757         if (is_ready)
758                 info->dev_ready = 1;
759
760         return ret;
761 }
762
763 static inline int is_buf_blank(uint8_t *buf, size_t len)
764 {
765         for (; len > 0; len--)
766                 if (*buf++ != 0xff)
767                         return 0;
768         return 1;
769 }
770
771 static void set_command_address(struct pxa3xx_nand_info *info,
772                 unsigned int page_size, uint16_t column, int page_addr)
773 {
774         /* small page addr setting */
775         if (page_size < info->chunk_size) {
776                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
777                                 | (column & 0xFF);
778
779                 info->ndcb2 = 0;
780         } else {
781                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
782                                 | (column & 0xFFFF);
783
784                 if (page_addr & 0xFF0000)
785                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
786                 else
787                         info->ndcb2 = 0;
788         }
789 }
790
791 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
792 {
793         struct pxa3xx_nand_host *host = info->host[info->cs];
794         struct mtd_info *mtd = nand_to_mtd(&host->chip);
795
796         /* reset data and oob column point to handle data */
797         info->buf_start         = 0;
798         info->buf_count         = 0;
799         info->data_buff_pos     = 0;
800         info->oob_buff_pos      = 0;
801         info->step_chunk_size   = 0;
802         info->step_spare_size   = 0;
803         info->cur_chunk         = 0;
804         info->use_ecc           = 0;
805         info->use_spare         = 1;
806         info->retcode           = ERR_NONE;
807         info->ecc_err_cnt       = 0;
808         info->ndcb3             = 0;
809         info->need_wait         = 0;
810
811         switch (command) {
812         case NAND_CMD_READ0:
813         case NAND_CMD_READOOB:
814         case NAND_CMD_PAGEPROG:
815                 if (!info->force_raw)
816                         info->use_ecc = 1;
817                 break;
818         case NAND_CMD_PARAM:
819                 info->use_spare = 0;
820                 break;
821         default:
822                 info->ndcb1 = 0;
823                 info->ndcb2 = 0;
824                 break;
825         }
826
827         /*
828          * If we are about to issue a read command, or about to set
829          * the write address, then clean the data buffer.
830          */
831         if (command == NAND_CMD_READ0 ||
832             command == NAND_CMD_READOOB ||
833             command == NAND_CMD_SEQIN) {
834                 info->buf_count = mtd->writesize + mtd->oobsize;
835                 memset(info->data_buff, 0xFF, info->buf_count);
836         }
837 }
838
839 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
840                 int ext_cmd_type, uint16_t column, int page_addr)
841 {
842         int addr_cycle, exec_cmd;
843         struct pxa3xx_nand_host *host;
844         struct mtd_info *mtd;
845
846         host = info->host[info->cs];
847         mtd = nand_to_mtd(&host->chip);
848         addr_cycle = 0;
849         exec_cmd = 1;
850
851         if (info->cs != 0)
852                 info->ndcb0 = NDCB0_CSEL;
853         else
854                 info->ndcb0 = 0;
855
856         if (command == NAND_CMD_SEQIN)
857                 exec_cmd = 0;
858
859         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
860                                     + host->col_addr_cycles);
861
862         switch (command) {
863         case NAND_CMD_READOOB:
864         case NAND_CMD_READ0:
865                 info->buf_start = column;
866                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
867                                 | addr_cycle
868                                 | NAND_CMD_READ0;
869
870                 if (command == NAND_CMD_READOOB)
871                         info->buf_start += mtd->writesize;
872
873                 if (info->cur_chunk < info->nfullchunks) {
874                         info->step_chunk_size = info->chunk_size;
875                         info->step_spare_size = info->spare_size;
876                 } else {
877                         info->step_chunk_size = info->last_chunk_size;
878                         info->step_spare_size = info->last_spare_size;
879                 }
880
881                 /*
882                  * Multiple page read needs an 'extended command type' field,
883                  * which is either naked-read or last-read according to the
884                  * state.
885                  */
886                 if (info->force_raw) {
887                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
888                                        NDCB0_LEN_OVRD |
889                                        NDCB0_EXT_CMD_TYPE(ext_cmd_type);
890                         info->ndcb3 = info->step_chunk_size +
891                                       info->step_spare_size + info->ecc_size;
892                 } else if (mtd->writesize == info->chunk_size) {
893                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
894                 } else if (mtd->writesize > info->chunk_size) {
895                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
896                                         | NDCB0_LEN_OVRD
897                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
898                         info->ndcb3 = info->step_chunk_size +
899                                 info->step_spare_size;
900                 }
901
902                 set_command_address(info, mtd->writesize, column, page_addr);
903                 break;
904
905         case NAND_CMD_SEQIN:
906
907                 info->buf_start = column;
908                 set_command_address(info, mtd->writesize, 0, page_addr);
909
910                 /*
911                  * Multiple page programming needs to execute the initial
912                  * SEQIN command that sets the page address.
913                  */
914                 if (mtd->writesize > info->chunk_size) {
915                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
916                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
917                                 | addr_cycle
918                                 | command;
919                         exec_cmd = 1;
920                 }
921                 break;
922
923         case NAND_CMD_PAGEPROG:
924                 if (is_buf_blank(info->data_buff,
925                                  (mtd->writesize + mtd->oobsize))) {
926                         exec_cmd = 0;
927                         break;
928                 }
929
930                 if (info->cur_chunk < info->nfullchunks) {
931                         info->step_chunk_size = info->chunk_size;
932                         info->step_spare_size = info->spare_size;
933                 } else {
934                         info->step_chunk_size = info->last_chunk_size;
935                         info->step_spare_size = info->last_spare_size;
936                 }
937
938                 /* Second command setting for large pages */
939                 if (mtd->writesize > info->chunk_size) {
940                         /*
941                          * Multiple page write uses the 'extended command'
942                          * field. This can be used to issue a command dispatch
943                          * or a naked-write depending on the current stage.
944                          */
945                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
946                                         | NDCB0_LEN_OVRD
947                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
948                         info->ndcb3 = info->step_chunk_size +
949                                       info->step_spare_size;
950
951                         /*
952                          * This is the command dispatch that completes a chunked
953                          * page program operation.
954                          */
955                         if (info->cur_chunk == info->ntotalchunks) {
956                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
957                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
958                                         | command;
959                                 info->ndcb1 = 0;
960                                 info->ndcb2 = 0;
961                                 info->ndcb3 = 0;
962                         }
963                 } else {
964                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
965                                         | NDCB0_AUTO_RS
966                                         | NDCB0_ST_ROW_EN
967                                         | NDCB0_DBC
968                                         | (NAND_CMD_PAGEPROG << 8)
969                                         | NAND_CMD_SEQIN
970                                         | addr_cycle;
971                 }
972                 break;
973
974         case NAND_CMD_PARAM:
975                 info->buf_count = INIT_BUFFER_SIZE;
976                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
977                                 | NDCB0_ADDR_CYC(1)
978                                 | NDCB0_LEN_OVRD
979                                 | command;
980                 info->ndcb1 = (column & 0xFF);
981                 info->ndcb3 = INIT_BUFFER_SIZE;
982                 info->step_chunk_size = INIT_BUFFER_SIZE;
983                 break;
984
985         case NAND_CMD_READID:
986                 info->buf_count = READ_ID_BYTES;
987                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
988                                 | NDCB0_ADDR_CYC(1)
989                                 | command;
990                 info->ndcb1 = (column & 0xFF);
991
992                 info->step_chunk_size = 8;
993                 break;
994         case NAND_CMD_STATUS:
995                 info->buf_count = 1;
996                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
997                                 | NDCB0_ADDR_CYC(1)
998                                 | command;
999
1000                 info->step_chunk_size = 8;
1001                 break;
1002
1003         case NAND_CMD_ERASE1:
1004                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1005                                 | NDCB0_AUTO_RS
1006                                 | NDCB0_ADDR_CYC(3)
1007                                 | NDCB0_DBC
1008                                 | (NAND_CMD_ERASE2 << 8)
1009                                 | NAND_CMD_ERASE1;
1010                 info->ndcb1 = page_addr;
1011                 info->ndcb2 = 0;
1012
1013                 break;
1014         case NAND_CMD_RESET:
1015                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1016                                 | command;
1017
1018                 break;
1019
1020         case NAND_CMD_ERASE2:
1021                 exec_cmd = 0;
1022                 break;
1023
1024         default:
1025                 exec_cmd = 0;
1026                 dev_err(&info->pdev->dev, "non-supported command %x\n",
1027                         command);
1028                 break;
1029         }
1030
1031         return exec_cmd;
1032 }
1033
1034 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1035                          int column, int page_addr)
1036 {
1037         struct nand_chip *chip = mtd_to_nand(mtd);
1038         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1039         struct pxa3xx_nand_info *info = host->info_data;
1040         int exec_cmd;
1041
1042         /*
1043          * if this is a x16 device ,then convert the input
1044          * "byte" address into a "word" address appropriate
1045          * for indexing a word-oriented device
1046          */
1047         if (info->reg_ndcr & NDCR_DWIDTH_M)
1048                 column /= 2;
1049
1050         /*
1051          * There may be different NAND chip hooked to
1052          * different chip select, so check whether
1053          * chip select has been changed, if yes, reset the timing
1054          */
1055         if (info->cs != host->cs) {
1056                 info->cs = host->cs;
1057                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1058                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1059         }
1060
1061         prepare_start_command(info, command);
1062
1063         info->state = STATE_PREPARED;
1064         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1065
1066         if (exec_cmd) {
1067                 u32 ts;
1068
1069                 info->cmd_complete = 0;
1070                 info->dev_ready = 0;
1071                 info->need_wait = 1;
1072                 pxa3xx_nand_start(info);
1073
1074                 ts = get_timer(0);
1075                 while (1) {
1076                         u32 status;
1077
1078                         status = nand_readl(info, NDSR);
1079                         if (status)
1080                                 pxa3xx_nand_irq(info);
1081
1082                         if (info->cmd_complete)
1083                                 break;
1084
1085                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1086                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1087                                 return;
1088                         }
1089                 }
1090         }
1091         info->state = STATE_IDLE;
1092 }
1093
1094 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1095                                   const unsigned command,
1096                                   int column, int page_addr)
1097 {
1098         struct nand_chip *chip = mtd_to_nand(mtd);
1099         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1100         struct pxa3xx_nand_info *info = host->info_data;
1101         int exec_cmd, ext_cmd_type;
1102
1103         /*
1104          * if this is a x16 device then convert the input
1105          * "byte" address into a "word" address appropriate
1106          * for indexing a word-oriented device
1107          */
1108         if (info->reg_ndcr & NDCR_DWIDTH_M)
1109                 column /= 2;
1110
1111         /*
1112          * There may be different NAND chip hooked to
1113          * different chip select, so check whether
1114          * chip select has been changed, if yes, reset the timing
1115          */
1116         if (info->cs != host->cs) {
1117                 info->cs = host->cs;
1118                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1119                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1120         }
1121
1122         /* Select the extended command for the first command */
1123         switch (command) {
1124         case NAND_CMD_READ0:
1125         case NAND_CMD_READOOB:
1126                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1127                 break;
1128         case NAND_CMD_SEQIN:
1129                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1130                 break;
1131         case NAND_CMD_PAGEPROG:
1132                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1133                 break;
1134         default:
1135                 ext_cmd_type = 0;
1136                 break;
1137         }
1138
1139         prepare_start_command(info, command);
1140
1141         /*
1142          * Prepare the "is ready" completion before starting a command
1143          * transaction sequence. If the command is not executed the
1144          * completion will be completed, see below.
1145          *
1146          * We can do that inside the loop because the command variable
1147          * is invariant and thus so is the exec_cmd.
1148          */
1149         info->need_wait = 1;
1150         info->dev_ready = 0;
1151
1152         do {
1153                 u32 ts;
1154
1155                 info->state = STATE_PREPARED;
1156                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1157                                                column, page_addr);
1158                 if (!exec_cmd) {
1159                         info->need_wait = 0;
1160                         info->dev_ready = 1;
1161                         break;
1162                 }
1163
1164                 info->cmd_complete = 0;
1165                 pxa3xx_nand_start(info);
1166
1167                 ts = get_timer(0);
1168                 while (1) {
1169                         u32 status;
1170
1171                         status = nand_readl(info, NDSR);
1172                         if (status)
1173                                 pxa3xx_nand_irq(info);
1174
1175                         if (info->cmd_complete)
1176                                 break;
1177
1178                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1179                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1180                                 return;
1181                         }
1182                 }
1183
1184                 /* Only a few commands need several steps */
1185                 if (command != NAND_CMD_PAGEPROG &&
1186                     command != NAND_CMD_READ0    &&
1187                     command != NAND_CMD_READOOB)
1188                         break;
1189
1190                 info->cur_chunk++;
1191
1192                 /* Check if the sequence is complete */
1193                 if (info->cur_chunk == info->ntotalchunks &&
1194                     command != NAND_CMD_PAGEPROG)
1195                         break;
1196
1197                 /*
1198                  * After a splitted program command sequence has issued
1199                  * the command dispatch, the command sequence is complete.
1200                  */
1201                 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1202                     command == NAND_CMD_PAGEPROG &&
1203                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1204                         break;
1205
1206                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1207                         /* Last read: issue a 'last naked read' */
1208                         if (info->cur_chunk == info->ntotalchunks - 1)
1209                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1210                         else
1211                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1212
1213                 /*
1214                  * If a splitted program command has no more data to transfer,
1215                  * the command dispatch must be issued to complete.
1216                  */
1217                 } else if (command == NAND_CMD_PAGEPROG &&
1218                            info->cur_chunk == info->ntotalchunks) {
1219                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1220                 }
1221         } while (1);
1222
1223         info->state = STATE_IDLE;
1224 }
1225
1226 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1227                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1228                 int page)
1229 {
1230         chip->write_buf(mtd, buf, mtd->writesize);
1231         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1232
1233         return 0;
1234 }
1235
1236 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1237                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1238                 int page)
1239 {
1240         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1241         struct pxa3xx_nand_info *info = host->info_data;
1242         int bf;
1243
1244         chip->read_buf(mtd, buf, mtd->writesize);
1245         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1246
1247         if (info->retcode == ERR_CORERR && info->use_ecc) {
1248                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1249
1250         } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1251                 /*
1252                  * Empty pages will trigger uncorrectable errors. Re-read the
1253                  * entire page in raw mode and check for bits not being "1".
1254                  * If there are more than the supported strength, then it means
1255                  * this is an actual uncorrectable error.
1256                  */
1257                 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1258                 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1259                                                  chip->oob_poi, mtd->oobsize,
1260                                                  NULL, 0, chip->ecc.strength);
1261                 if (bf < 0) {
1262                         mtd->ecc_stats.failed++;
1263                 } else if (bf) {
1264                         mtd->ecc_stats.corrected += bf;
1265                         info->max_bitflips = max_t(unsigned int,
1266                                                    info->max_bitflips, bf);
1267                         info->retcode = ERR_CORERR;
1268                 } else {
1269                         info->retcode = ERR_NONE;
1270                 }
1271
1272         } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1273                 /* Raw read is not supported with Hamming ECC engine */
1274                 if (is_buf_blank(buf, mtd->writesize))
1275                         info->retcode = ERR_NONE;
1276                 else
1277                         mtd->ecc_stats.failed++;
1278         }
1279
1280         return info->max_bitflips;
1281 }
1282
1283 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1284                                      struct nand_chip *chip, uint8_t *buf,
1285                                      int oob_required, int page)
1286 {
1287         struct pxa3xx_nand_host *host = chip->priv;
1288         struct pxa3xx_nand_info *info = host->info_data;
1289         int chunk, ecc_off_buf;
1290
1291         if (!info->ecc_bch)
1292                 return -ENOTSUPP;
1293
1294         /*
1295          * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1296          * pxa3xx_nand_start(), which will actually disable the ECC engine.
1297          */
1298         info->force_raw = true;
1299         chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1300
1301         ecc_off_buf = (info->nfullchunks * info->spare_size) +
1302                       info->last_spare_size;
1303         for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1304                 chip->read_buf(mtd,
1305                                buf + (chunk * info->chunk_size),
1306                                info->chunk_size);
1307                 chip->read_buf(mtd,
1308                                chip->oob_poi +
1309                                (chunk * (info->spare_size)),
1310                                info->spare_size);
1311                 chip->read_buf(mtd,
1312                                chip->oob_poi + ecc_off_buf +
1313                                (chunk * (info->ecc_size)),
1314                                info->ecc_size - 2);
1315         }
1316
1317         if (info->ntotalchunks > info->nfullchunks) {
1318                 chip->read_buf(mtd,
1319                                buf + (info->nfullchunks * info->chunk_size),
1320                                info->last_chunk_size);
1321                 chip->read_buf(mtd,
1322                                chip->oob_poi +
1323                                (info->nfullchunks * (info->spare_size)),
1324                                info->last_spare_size);
1325                 chip->read_buf(mtd,
1326                                chip->oob_poi + ecc_off_buf +
1327                                (info->nfullchunks * (info->ecc_size)),
1328                                info->ecc_size - 2);
1329         }
1330
1331         info->force_raw = false;
1332
1333         return 0;
1334 }
1335
1336 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1337                                     struct nand_chip *chip, int page)
1338 {
1339         /* Invalidate page cache */
1340         chip->pagebuf = -1;
1341
1342         return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1343                                        page);
1344 }
1345
1346 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1347 {
1348         struct nand_chip *chip = mtd_to_nand(mtd);
1349         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1350         struct pxa3xx_nand_info *info = host->info_data;
1351         char retval = 0xFF;
1352
1353         if (info->buf_start < info->buf_count)
1354                 /* Has just send a new command? */
1355                 retval = info->data_buff[info->buf_start++];
1356
1357         return retval;
1358 }
1359
1360 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1361 {
1362         struct nand_chip *chip = mtd_to_nand(mtd);
1363         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1364         struct pxa3xx_nand_info *info = host->info_data;
1365         u16 retval = 0xFFFF;
1366
1367         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1368                 retval = *((u16 *)(info->data_buff+info->buf_start));
1369                 info->buf_start += 2;
1370         }
1371         return retval;
1372 }
1373
1374 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1375 {
1376         struct nand_chip *chip = mtd_to_nand(mtd);
1377         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1378         struct pxa3xx_nand_info *info = host->info_data;
1379         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1380
1381         memcpy(buf, info->data_buff + info->buf_start, real_len);
1382         info->buf_start += real_len;
1383 }
1384
1385 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1386                 const uint8_t *buf, int len)
1387 {
1388         struct nand_chip *chip = mtd_to_nand(mtd);
1389         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1390         struct pxa3xx_nand_info *info = host->info_data;
1391         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1392
1393         memcpy(info->data_buff + info->buf_start, buf, real_len);
1394         info->buf_start += real_len;
1395 }
1396
1397 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1398 {
1399         return;
1400 }
1401
1402 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1403 {
1404         struct nand_chip *chip = mtd_to_nand(mtd);
1405         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1406         struct pxa3xx_nand_info *info = host->info_data;
1407
1408         if (info->need_wait) {
1409                 u32 ts;
1410
1411                 info->need_wait = 0;
1412
1413                 ts = get_timer(0);
1414                 while (1) {
1415                         u32 status;
1416
1417                         status = nand_readl(info, NDSR);
1418                         if (status)
1419                                 pxa3xx_nand_irq(info);
1420
1421                         if (info->dev_ready)
1422                                 break;
1423
1424                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1425                                 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1426                                 return NAND_STATUS_FAIL;
1427                         }
1428                 }
1429         }
1430
1431         /* pxa3xx_nand_send_command has waited for command complete */
1432         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1433                 if (info->retcode == ERR_NONE)
1434                         return 0;
1435                 else
1436                         return NAND_STATUS_FAIL;
1437         }
1438
1439         return NAND_STATUS_READY;
1440 }
1441
1442 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1443 {
1444         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1445
1446         /* Configure default flash values */
1447         info->reg_ndcr = 0x0; /* enable all interrupts */
1448         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1449         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1450         info->reg_ndcr |= NDCR_SPARE_EN;
1451
1452         return 0;
1453 }
1454
1455 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1456 {
1457         struct pxa3xx_nand_host *host = info->host[info->cs];
1458         struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1459         struct nand_chip *chip = mtd_to_nand(mtd);
1460
1461         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1462         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1463         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1464 }
1465
1466 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1467 {
1468         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1469         uint32_t ndcr = nand_readl(info, NDCR);
1470
1471         /* Set an initial chunk size */
1472         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1473         info->reg_ndcr = ndcr &
1474                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1475         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1476         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1477         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1478 }
1479
1480 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1481 {
1482         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1483         if (info->data_buff == NULL)
1484                 return -ENOMEM;
1485         return 0;
1486 }
1487
1488 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1489 {
1490         struct pxa3xx_nand_info *info = host->info_data;
1491         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1492         struct mtd_info *mtd;
1493         struct nand_chip *chip;
1494         const struct nand_sdr_timings *timings;
1495         int ret;
1496
1497         mtd = nand_to_mtd(&info->host[info->cs]->chip);
1498         chip = mtd_to_nand(mtd);
1499
1500         /* configure default flash values */
1501         info->reg_ndcr = 0x0; /* enable all interrupts */
1502         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1503         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1504         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1505
1506         /* use the common timing to make a try */
1507         timings = onfi_async_timing_mode_to_sdr_timings(0);
1508         if (IS_ERR(timings))
1509                 return PTR_ERR(timings);
1510
1511         pxa3xx_nand_set_sdr_timing(host, timings);
1512
1513         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1514         ret = chip->waitfunc(mtd, chip);
1515         if (ret & NAND_STATUS_FAIL)
1516                 return -ENODEV;
1517
1518         return 0;
1519 }
1520
1521 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1522                         struct nand_ecc_ctrl *ecc,
1523                         int strength, int ecc_stepsize, int page_size)
1524 {
1525         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1526                 info->nfullchunks = 1;
1527                 info->ntotalchunks = 1;
1528                 info->chunk_size = 2048;
1529                 info->spare_size = 40;
1530                 info->ecc_size = 24;
1531                 ecc->mode = NAND_ECC_HW;
1532                 ecc->size = 512;
1533                 ecc->strength = 1;
1534
1535         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1536                 info->nfullchunks = 1;
1537                 info->ntotalchunks = 1;
1538                 info->chunk_size = 512;
1539                 info->spare_size = 8;
1540                 info->ecc_size = 8;
1541                 ecc->mode = NAND_ECC_HW;
1542                 ecc->size = 512;
1543                 ecc->strength = 1;
1544
1545         /*
1546          * Required ECC: 4-bit correction per 512 bytes
1547          * Select: 16-bit correction per 2048 bytes
1548          */
1549         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1550                 info->ecc_bch = 1;
1551                 info->nfullchunks = 1;
1552                 info->ntotalchunks = 1;
1553                 info->chunk_size = 2048;
1554                 info->spare_size = 32;
1555                 info->ecc_size = 32;
1556                 ecc->mode = NAND_ECC_HW;
1557                 ecc->size = info->chunk_size;
1558                 ecc->layout = &ecc_layout_2KB_bch4bit;
1559                 ecc->strength = 16;
1560
1561         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1562                 info->ecc_bch = 1;
1563                 info->nfullchunks = 2;
1564                 info->ntotalchunks = 2;
1565                 info->chunk_size = 2048;
1566                 info->spare_size = 32;
1567                 info->ecc_size = 32;
1568                 ecc->mode = NAND_ECC_HW;
1569                 ecc->size = info->chunk_size;
1570                 ecc->layout = &ecc_layout_4KB_bch4bit;
1571                 ecc->strength = 16;
1572
1573         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1574                 info->ecc_bch = 1;
1575                 info->nfullchunks = 4;
1576                 info->ntotalchunks = 4;
1577                 info->chunk_size = 2048;
1578                 info->spare_size = 32;
1579                 info->ecc_size = 32;
1580                 ecc->mode = NAND_ECC_HW;
1581                 ecc->size = info->chunk_size;
1582                 ecc->layout = &ecc_layout_8KB_bch4bit;
1583                 ecc->strength = 16;
1584
1585         /*
1586          * Required ECC: 8-bit correction per 512 bytes
1587          * Select: 16-bit correction per 1024 bytes
1588          */
1589         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1590                 info->ecc_bch = 1;
1591                 info->nfullchunks = 1;
1592                 info->ntotalchunks = 2;
1593                 info->chunk_size = 1024;
1594                 info->spare_size = 0;
1595                 info->last_chunk_size = 1024;
1596                 info->last_spare_size = 32;
1597                 info->ecc_size = 32;
1598                 ecc->mode = NAND_ECC_HW;
1599                 ecc->size = info->chunk_size;
1600                 ecc->layout = &ecc_layout_2KB_bch8bit;
1601                 ecc->strength = 16;
1602
1603         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1604                 info->ecc_bch = 1;
1605                 info->nfullchunks = 4;
1606                 info->ntotalchunks = 5;
1607                 info->chunk_size = 1024;
1608                 info->spare_size = 0;
1609                 info->last_chunk_size = 0;
1610                 info->last_spare_size = 64;
1611                 info->ecc_size = 32;
1612                 ecc->mode = NAND_ECC_HW;
1613                 ecc->size = info->chunk_size;
1614                 ecc->layout = &ecc_layout_4KB_bch8bit;
1615                 ecc->strength = 16;
1616
1617         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1618                 info->ecc_bch = 1;
1619                 info->nfullchunks = 8;
1620                 info->ntotalchunks = 9;
1621                 info->chunk_size = 1024;
1622                 info->spare_size = 0;
1623                 info->last_chunk_size = 0;
1624                 info->last_spare_size = 160;
1625                 info->ecc_size = 32;
1626                 ecc->mode = NAND_ECC_HW;
1627                 ecc->size = info->chunk_size;
1628                 ecc->layout = &ecc_layout_8KB_bch8bit;
1629                 ecc->strength = 16;
1630
1631         } else {
1632                 dev_err(&info->pdev->dev,
1633                         "ECC strength %d at page size %d is not supported\n",
1634                         strength, page_size);
1635                 return -ENODEV;
1636         }
1637
1638         return 0;
1639 }
1640
1641 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1642 {
1643         struct nand_chip *chip = mtd_to_nand(mtd);
1644         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1645         struct pxa3xx_nand_info *info = host->info_data;
1646         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1647         int ret;
1648         uint16_t ecc_strength, ecc_step;
1649
1650         if (pdata->keep_config) {
1651                 pxa3xx_nand_detect_config(info);
1652         } else {
1653                 ret = pxa3xx_nand_config_ident(info);
1654                 if (ret)
1655                         return ret;
1656                 ret = pxa3xx_nand_sensing(host);
1657                 if (ret) {
1658                         dev_info(&info->pdev->dev,
1659                                  "There is no chip on cs %d!\n",
1660                                  info->cs);
1661                         return ret;
1662                 }
1663         }
1664
1665         /* Device detection must be done with ECC disabled */
1666         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1667                 nand_writel(info, NDECCCTRL, 0x0);
1668
1669         if (nand_scan_ident(mtd, 1, NULL))
1670                 return -ENODEV;
1671
1672         if (!pdata->keep_config) {
1673                 ret = pxa3xx_nand_init_timings(host);
1674                 if (ret) {
1675                         dev_err(&info->pdev->dev,
1676                                 "Failed to set timings: %d\n", ret);
1677                         return ret;
1678                 }
1679         }
1680
1681 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1682         /*
1683          * We'll use a bad block table stored in-flash and don't
1684          * allow writing the bad block marker to the flash.
1685          */
1686         chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1687         chip->bbt_td = &bbt_main_descr;
1688         chip->bbt_md = &bbt_mirror_descr;
1689 #endif
1690
1691         if (pdata->ecc_strength && pdata->ecc_step_size) {
1692                 ecc_strength = pdata->ecc_strength;
1693                 ecc_step = pdata->ecc_step_size;
1694         } else {
1695                 ecc_strength = chip->ecc_strength_ds;
1696                 ecc_step = chip->ecc_step_ds;
1697         }
1698
1699         /* Set default ECC strength requirements on non-ONFI devices */
1700         if (ecc_strength < 1 && ecc_step < 1) {
1701                 ecc_strength = 1;
1702                 ecc_step = 512;
1703         }
1704
1705         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1706                            ecc_step, mtd->writesize);
1707         if (ret)
1708                 return ret;
1709
1710         /*
1711          * If the page size is bigger than the FIFO size, let's check
1712          * we are given the right variant and then switch to the extended
1713          * (aka split) command handling,
1714          */
1715         if (mtd->writesize > info->chunk_size) {
1716                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1717                         chip->cmdfunc = nand_cmdfunc_extended;
1718                 } else {
1719                         dev_err(&info->pdev->dev,
1720                                 "unsupported page size on this variant\n");
1721                         return -ENODEV;
1722                 }
1723         }
1724
1725         /* calculate addressing information */
1726         if (mtd->writesize >= 2048)
1727                 host->col_addr_cycles = 2;
1728         else
1729                 host->col_addr_cycles = 1;
1730
1731         /* release the initial buffer */
1732         kfree(info->data_buff);
1733
1734         /* allocate the real data + oob buffer */
1735         info->buf_size = mtd->writesize + mtd->oobsize;
1736         ret = pxa3xx_nand_init_buff(info);
1737         if (ret)
1738                 return ret;
1739         info->oob_buff = info->data_buff + mtd->writesize;
1740
1741         if ((mtd->size >> chip->page_shift) > 65536)
1742                 host->row_addr_cycles = 3;
1743         else
1744                 host->row_addr_cycles = 2;
1745
1746         if (!pdata->keep_config)
1747                 pxa3xx_nand_config_tail(info);
1748
1749         return nand_scan_tail(mtd);
1750 }
1751
1752 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1753 {
1754         struct pxa3xx_nand_platform_data *pdata;
1755         struct pxa3xx_nand_host *host;
1756         struct nand_chip *chip = NULL;
1757         struct mtd_info *mtd;
1758         int ret, cs;
1759
1760         pdata = info->pdata;
1761         if (pdata->num_cs <= 0)
1762                 return -ENODEV;
1763
1764         info->variant = pxa3xx_nand_get_variant();
1765         for (cs = 0; cs < pdata->num_cs; cs++) {
1766                 chip = (struct nand_chip *)
1767                         ((u8 *)&info[1] + sizeof(*host) * cs);
1768                 mtd = nand_to_mtd(chip);
1769                 host = (struct pxa3xx_nand_host *)chip;
1770                 info->host[cs] = host;
1771                 host->cs = cs;
1772                 host->info_data = info;
1773                 mtd->owner = THIS_MODULE;
1774
1775                 nand_set_controller_data(chip, host);
1776                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1777                 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1778                 chip->ecc.read_oob_raw  = pxa3xx_nand_read_oob_raw;
1779                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1780                 chip->controller        = &info->controller;
1781                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1782                 chip->select_chip       = pxa3xx_nand_select_chip;
1783                 chip->read_word         = pxa3xx_nand_read_word;
1784                 chip->read_byte         = pxa3xx_nand_read_byte;
1785                 chip->read_buf          = pxa3xx_nand_read_buf;
1786                 chip->write_buf         = pxa3xx_nand_write_buf;
1787                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1788                 chip->cmdfunc           = nand_cmdfunc;
1789         }
1790
1791         /* Allocate a buffer to allow flash detection */
1792         info->buf_size = INIT_BUFFER_SIZE;
1793         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1794         if (info->data_buff == NULL) {
1795                 ret = -ENOMEM;
1796                 goto fail_disable_clk;
1797         }
1798
1799         /* initialize all interrupts to be disabled */
1800         disable_int(info, NDSR_MASK);
1801
1802         return 0;
1803
1804         kfree(info->data_buff);
1805 fail_disable_clk:
1806         return ret;
1807 }
1808
1809 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1810 {
1811         struct pxa3xx_nand_platform_data *pdata;
1812         const void *blob = gd->fdt_blob;
1813         int node = -1;
1814
1815         pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1816         if (!pdata)
1817                 return -ENOMEM;
1818
1819         /* Get address decoding nodes from the FDT blob */
1820         do {
1821                 node = fdt_node_offset_by_compatible(blob, node,
1822                                                      "marvell,mvebu-pxa3xx-nand");
1823                 if (node < 0)
1824                         break;
1825
1826                 /* Bypass disabeld nodes */
1827                 if (!fdtdec_get_is_enabled(blob, node))
1828                         continue;
1829
1830                 /* Get the first enabled NAND controler base address */
1831                 info->mmio_base =
1832                         (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1833                                         blob, node, "reg", 0, NULL, true);
1834
1835                 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1836                 if (pdata->num_cs != 1) {
1837                         pr_err("pxa3xx driver supports single CS only\n");
1838                         break;
1839                 }
1840
1841                 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1842                         pdata->enable_arbiter = 1;
1843
1844                 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1845                         pdata->keep_config = 1;
1846
1847                 /*
1848                  * ECC parameters.
1849                  * If these are not set, they will be selected according
1850                  * to the detected flash type.
1851                  */
1852                 /* ECC strength */
1853                 pdata->ecc_strength = fdtdec_get_int(blob, node,
1854                                                      "nand-ecc-strength", 0);
1855
1856                 /* ECC step size */
1857                 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1858                                                       "nand-ecc-step-size", 0);
1859
1860                 info->pdata = pdata;
1861
1862                 /* Currently support only a single NAND controller */
1863                 return 0;
1864
1865         } while (node >= 0);
1866
1867         return -EINVAL;
1868 }
1869
1870 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1871 {
1872         struct pxa3xx_nand_platform_data *pdata;
1873         int ret, cs, probe_success;
1874
1875         ret = pxa3xx_nand_probe_dt(info);
1876         if (ret)
1877                 return ret;
1878
1879         pdata = info->pdata;
1880
1881         ret = alloc_nand_resource(info);
1882         if (ret) {
1883                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1884                 return ret;
1885         }
1886
1887         probe_success = 0;
1888         for (cs = 0; cs < pdata->num_cs; cs++) {
1889                 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1890
1891                 /*
1892                  * The mtd name matches the one used in 'mtdparts' kernel
1893                  * parameter. This name cannot be changed or otherwise
1894                  * user's mtd partitions configuration would get broken.
1895                  */
1896                 mtd->name = "pxa3xx_nand-0";
1897                 info->cs = cs;
1898                 ret = pxa3xx_nand_scan(mtd);
1899                 if (ret) {
1900                         dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1901                                  cs);
1902                         continue;
1903                 }
1904
1905                 if (nand_register(cs, mtd))
1906                         continue;
1907
1908                 probe_success = 1;
1909         }
1910
1911         if (!probe_success)
1912                 return -ENODEV;
1913
1914         return 0;
1915 }
1916
1917 /*
1918  * Main initialization routine
1919  */
1920 void board_nand_init(void)
1921 {
1922         struct pxa3xx_nand_info *info;
1923         struct pxa3xx_nand_host *host;
1924         int ret;
1925
1926         info = kzalloc(sizeof(*info) +
1927                        sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1928                        GFP_KERNEL);
1929         if (!info)
1930                 return;
1931
1932         ret = pxa3xx_nand_probe(info);
1933         if (ret)
1934                 return;
1935 }