fc5b6275f29de4ef52147ad120469b3db1f2674c
[oweals/u-boot.git] / drivers / mtd / nand / raw / pxa3xx_nand.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/mtd/nand/raw/pxa3xx_nand.c
4  *
5  * Copyright © 2005 Intel Corporation
6  * Copyright © 2006 Marvell International Ltd.
7  */
8
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/bug.h>
16 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <asm/io.h>
19 #include <asm/arch/cpu.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/mtd/rawnand.h>
22 #include <linux/types.h>
23
24 #include "pxa3xx_nand.h"
25
26 DECLARE_GLOBAL_DATA_PTR;
27
28 #define TIMEOUT_DRAIN_FIFO      5       /* in ms */
29 #define CHIP_DELAY_TIMEOUT      200
30 #define NAND_STOP_DELAY         40
31
32 /*
33  * Define a buffer size for the initial command that detects the flash device:
34  * STATUS, READID and PARAM.
35  * ONFI param page is 256 bytes, and there are three redundant copies
36  * to be read. JEDEC param page is 512 bytes, and there are also three
37  * redundant copies to be read.
38  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
39  */
40 #define INIT_BUFFER_SIZE        2048
41
42 /* registers and bit definitions */
43 #define NDCR            (0x00) /* Control register */
44 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
45 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
46 #define NDSR            (0x14) /* Status Register */
47 #define NDPCR           (0x18) /* Page Count Register */
48 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
49 #define NDBDR1          (0x20) /* Bad Block Register 1 */
50 #define NDECCCTRL       (0x28) /* ECC control */
51 #define NDDB            (0x40) /* Data Buffer */
52 #define NDCB0           (0x48) /* Command Buffer0 */
53 #define NDCB1           (0x4C) /* Command Buffer1 */
54 #define NDCB2           (0x50) /* Command Buffer2 */
55
56 #define NDCR_SPARE_EN           (0x1 << 31)
57 #define NDCR_ECC_EN             (0x1 << 30)
58 #define NDCR_DMA_EN             (0x1 << 29)
59 #define NDCR_ND_RUN             (0x1 << 28)
60 #define NDCR_DWIDTH_C           (0x1 << 27)
61 #define NDCR_DWIDTH_M           (0x1 << 26)
62 #define NDCR_PAGE_SZ            (0x1 << 24)
63 #define NDCR_NCSX               (0x1 << 23)
64 #define NDCR_ND_MODE            (0x3 << 21)
65 #define NDCR_NAND_MODE          (0x0)
66 #define NDCR_CLR_PG_CNT         (0x1 << 20)
67 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
68 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
69 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
70
71 #define NDCR_RA_START           (0x1 << 15)
72 #define NDCR_PG_PER_BLK         (0x1 << 14)
73 #define NDCR_ND_ARB_EN          (0x1 << 12)
74 #define NDCR_INT_MASK           (0xFFF)
75
76 #define NDSR_MASK               (0xfff)
77 #define NDSR_ERR_CNT_OFF        (16)
78 #define NDSR_ERR_CNT_MASK       (0x1f)
79 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
80 #define NDSR_RDY                (0x1 << 12)
81 #define NDSR_FLASH_RDY          (0x1 << 11)
82 #define NDSR_CS0_PAGED          (0x1 << 10)
83 #define NDSR_CS1_PAGED          (0x1 << 9)
84 #define NDSR_CS0_CMDD           (0x1 << 8)
85 #define NDSR_CS1_CMDD           (0x1 << 7)
86 #define NDSR_CS0_BBD            (0x1 << 6)
87 #define NDSR_CS1_BBD            (0x1 << 5)
88 #define NDSR_UNCORERR           (0x1 << 4)
89 #define NDSR_CORERR             (0x1 << 3)
90 #define NDSR_WRDREQ             (0x1 << 2)
91 #define NDSR_RDDREQ             (0x1 << 1)
92 #define NDSR_WRCMDREQ           (0x1)
93
94 #define NDCB0_LEN_OVRD          (0x1 << 28)
95 #define NDCB0_ST_ROW_EN         (0x1 << 26)
96 #define NDCB0_AUTO_RS           (0x1 << 25)
97 #define NDCB0_CSEL              (0x1 << 24)
98 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
99 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
100 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
101 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
102 #define NDCB0_NC                (0x1 << 20)
103 #define NDCB0_DBC               (0x1 << 19)
104 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
105 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
106 #define NDCB0_CMD2_MASK         (0xff << 8)
107 #define NDCB0_CMD1_MASK         (0xff)
108 #define NDCB0_ADDR_CYC_SHIFT    (16)
109
110 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
111 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
112 #define EXT_CMD_TYPE_READ       4 /* Read */
113 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
114 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
115 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
116 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
117
118 /*
119  * This should be large enough to read 'ONFI' and 'JEDEC'.
120  * Let's use 7 bytes, which is the maximum ID count supported
121  * by the controller (see NDCR_RD_ID_CNT_MASK).
122  */
123 #define READ_ID_BYTES           7
124
125 /* macros for registers read/write */
126 #define nand_writel(info, off, val)     \
127         writel((val), (info)->mmio_base + (off))
128
129 #define nand_readl(info, off)           \
130         readl((info)->mmio_base + (off))
131
132 /* error code and state */
133 enum {
134         ERR_NONE        = 0,
135         ERR_DMABUSERR   = -1,
136         ERR_SENDCMD     = -2,
137         ERR_UNCORERR    = -3,
138         ERR_BBERR       = -4,
139         ERR_CORERR      = -5,
140 };
141
142 enum {
143         STATE_IDLE = 0,
144         STATE_PREPARED,
145         STATE_CMD_HANDLE,
146         STATE_DMA_READING,
147         STATE_DMA_WRITING,
148         STATE_DMA_DONE,
149         STATE_PIO_READING,
150         STATE_PIO_WRITING,
151         STATE_CMD_DONE,
152         STATE_READY,
153 };
154
155 enum pxa3xx_nand_variant {
156         PXA3XX_NAND_VARIANT_PXA,
157         PXA3XX_NAND_VARIANT_ARMADA370,
158 };
159
160 struct pxa3xx_nand_host {
161         struct nand_chip        chip;
162         void                    *info_data;
163
164         /* page size of attached chip */
165         int                     use_ecc;
166         int                     cs;
167
168         /* calculated from pxa3xx_nand_flash data */
169         unsigned int            col_addr_cycles;
170         unsigned int            row_addr_cycles;
171 };
172
173 struct pxa3xx_nand_info {
174         struct nand_hw_control  controller;
175         struct pxa3xx_nand_platform_data *pdata;
176
177         struct clk              *clk;
178         void __iomem            *mmio_base;
179         unsigned long           mmio_phys;
180         int                     cmd_complete, dev_ready;
181
182         unsigned int            buf_start;
183         unsigned int            buf_count;
184         unsigned int            buf_size;
185         unsigned int            data_buff_pos;
186         unsigned int            oob_buff_pos;
187
188         unsigned char           *data_buff;
189         unsigned char           *oob_buff;
190
191         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
192         unsigned int            state;
193
194         /*
195          * This driver supports NFCv1 (as found in PXA SoC)
196          * and NFCv2 (as found in Armada 370/XP SoC).
197          */
198         enum pxa3xx_nand_variant variant;
199
200         int                     cs;
201         int                     use_ecc;        /* use HW ECC ? */
202         int                     force_raw;      /* prevent use_ecc to be set */
203         int                     ecc_bch;        /* using BCH ECC? */
204         int                     use_spare;      /* use spare ? */
205         int                     need_wait;
206
207         /* Amount of real data per full chunk */
208         unsigned int            chunk_size;
209
210         /* Amount of spare data per full chunk */
211         unsigned int            spare_size;
212
213         /* Number of full chunks (i.e chunk_size + spare_size) */
214         unsigned int            nfullchunks;
215
216         /*
217          * Total number of chunks. If equal to nfullchunks, then there
218          * are only full chunks. Otherwise, there is one last chunk of
219          * size (last_chunk_size + last_spare_size)
220          */
221         unsigned int            ntotalchunks;
222
223         /* Amount of real data in the last chunk */
224         unsigned int            last_chunk_size;
225
226         /* Amount of spare data in the last chunk */
227         unsigned int            last_spare_size;
228
229         unsigned int            ecc_size;
230         unsigned int            ecc_err_cnt;
231         unsigned int            max_bitflips;
232         int                     retcode;
233
234         /*
235          * Variables only valid during command
236          * execution. step_chunk_size and step_spare_size is the
237          * amount of real data and spare data in the current
238          * chunk. cur_chunk is the current chunk being
239          * read/programmed.
240          */
241         unsigned int            step_chunk_size;
242         unsigned int            step_spare_size;
243         unsigned int            cur_chunk;
244
245         /* cached register value */
246         uint32_t                reg_ndcr;
247         uint32_t                ndtr0cs0;
248         uint32_t                ndtr1cs0;
249
250         /* generated NDCBx register values */
251         uint32_t                ndcb0;
252         uint32_t                ndcb1;
253         uint32_t                ndcb2;
254         uint32_t                ndcb3;
255 };
256
257 static struct pxa3xx_nand_timing timing[] = {
258         /*
259          * tCH  Enable signal hold time
260          * tCS  Enable signal setup time
261          * tWH  ND_nWE high duration
262          * tWP  ND_nWE pulse time
263          * tRH  ND_nRE high duration
264          * tRP  ND_nRE pulse width
265          * tR   ND_nWE high to ND_nRE low for read
266          * tWHR ND_nWE high to ND_nRE low for status read
267          * tAR  ND_ALE low to ND_nRE low delay
268          */
269         /*ch  cs  wh  wp   rh  rp   r      whr  ar */
270         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
271         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
272         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
273         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
274         {  5, 20, 10,  12, 10,  12, 25000,  60, 10, },
275 };
276
277 static struct pxa3xx_nand_flash builtin_flash_types[] = {
278         /*
279          * chip_id
280          * flash_width  Width of Flash memory (DWIDTH_M)
281          * dfc_width    Width of flash controller(DWIDTH_C)
282          * *timing
283          * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
284          */
285         { 0x46ec, 16, 16, &timing[1] },
286         { 0xdaec,  8,  8, &timing[1] },
287         { 0xd7ec,  8,  8, &timing[1] },
288         { 0xa12c,  8,  8, &timing[2] },
289         { 0xb12c, 16, 16, &timing[2] },
290         { 0xdc2c,  8,  8, &timing[2] },
291         { 0xcc2c, 16, 16, &timing[2] },
292         { 0xba20, 16, 16, &timing[3] },
293         { 0xda98,  8,  8, &timing[4] },
294 };
295
296 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
297 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
298 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
299
300 static struct nand_bbt_descr bbt_main_descr = {
301         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
302                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
303         .offs = 8,
304         .len = 6,
305         .veroffs = 14,
306         .maxblocks = 8,         /* Last 8 blocks in each chip */
307         .pattern = bbt_pattern
308 };
309
310 static struct nand_bbt_descr bbt_mirror_descr = {
311         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
312                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
313         .offs = 8,
314         .len = 6,
315         .veroffs = 14,
316         .maxblocks = 8,         /* Last 8 blocks in each chip */
317         .pattern = bbt_mirror_pattern
318 };
319 #endif
320
321 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
322         .eccbytes = 32,
323         .eccpos = {
324                 32, 33, 34, 35, 36, 37, 38, 39,
325                 40, 41, 42, 43, 44, 45, 46, 47,
326                 48, 49, 50, 51, 52, 53, 54, 55,
327                 56, 57, 58, 59, 60, 61, 62, 63},
328         .oobfree = { {2, 30} }
329 };
330
331 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
332         .eccbytes = 64,
333         .eccpos = {
334                 32, 33, 34, 35, 36, 37, 38, 39,
335                 40, 41, 42, 43, 44, 45, 46, 47,
336                 48, 49, 50, 51, 52, 53, 54, 55,
337                 56, 57, 58, 59, 60, 61, 62, 63,
338                 64, 65, 66, 67, 68, 69, 70, 71,
339                 72, 73, 74, 75, 76, 77, 78, 79,
340                 80, 81, 82, 83, 84, 85, 86, 87,
341                 88, 89, 90, 91, 92, 93, 94, 95},
342         .oobfree = { {1, 4}, {6, 26} }
343 };
344
345 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
346         .eccbytes = 64,
347         .eccpos = {
348                 32,  33,  34,  35,  36,  37,  38,  39,
349                 40,  41,  42,  43,  44,  45,  46,  47,
350                 48,  49,  50,  51,  52,  53,  54,  55,
351                 56,  57,  58,  59,  60,  61,  62,  63,
352                 96,  97,  98,  99,  100, 101, 102, 103,
353                 104, 105, 106, 107, 108, 109, 110, 111,
354                 112, 113, 114, 115, 116, 117, 118, 119,
355                 120, 121, 122, 123, 124, 125, 126, 127},
356         /* Bootrom looks in bytes 0 & 5 for bad blocks */
357         .oobfree = { {6, 26}, { 64, 32} }
358 };
359
360 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
361         .eccbytes = 128,
362         .eccpos = {
363                 32,  33,  34,  35,  36,  37,  38,  39,
364                 40,  41,  42,  43,  44,  45,  46,  47,
365                 48,  49,  50,  51,  52,  53,  54,  55,
366                 56,  57,  58,  59,  60,  61,  62,  63,
367
368                 96,  97,  98,  99,  100, 101, 102, 103,
369                 104, 105, 106, 107, 108, 109, 110, 111,
370                 112, 113, 114, 115, 116, 117, 118, 119,
371                 120, 121, 122, 123, 124, 125, 126, 127,
372
373                 160, 161, 162, 163, 164, 165, 166, 167,
374                 168, 169, 170, 171, 172, 173, 174, 175,
375                 176, 177, 178, 179, 180, 181, 182, 183,
376                 184, 185, 186, 187, 188, 189, 190, 191,
377
378                 224, 225, 226, 227, 228, 229, 230, 231,
379                 232, 233, 234, 235, 236, 237, 238, 239,
380                 240, 241, 242, 243, 244, 245, 246, 247,
381                 248, 249, 250, 251, 252, 253, 254, 255},
382
383         /* Bootrom looks in bytes 0 & 5 for bad blocks */
384         .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
385 };
386
387 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
388         .eccbytes = 128,
389         .eccpos = {
390                 32,  33,  34,  35,  36,  37,  38,  39,
391                 40,  41,  42,  43,  44,  45,  46,  47,
392                 48,  49,  50,  51,  52,  53,  54,  55,
393                 56,  57,  58,  59,  60,  61,  62,  63},
394         .oobfree = { }
395 };
396
397 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
398         .eccbytes = 256,
399         .eccpos = {},
400         /* HW ECC handles all ECC data and all spare area is free for OOB */
401         .oobfree = {{0, 160} }
402 };
403
404 #define NDTR0_tCH(c)    (min((c), 7) << 19)
405 #define NDTR0_tCS(c)    (min((c), 7) << 16)
406 #define NDTR0_tWH(c)    (min((c), 7) << 11)
407 #define NDTR0_tWP(c)    (min((c), 7) << 8)
408 #define NDTR0_tRH(c)    (min((c), 7) << 3)
409 #define NDTR0_tRP(c)    (min((c), 7) << 0)
410
411 #define NDTR1_tR(c)     (min((c), 65535) << 16)
412 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
413 #define NDTR1_tAR(c)    (min((c), 15) << 0)
414
415 /* convert nano-seconds to nand flash controller clock cycles */
416 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
417
418 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
419 {
420         /* We only support the Armada 370/XP/38x for now */
421         return PXA3XX_NAND_VARIANT_ARMADA370;
422 }
423
424 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
425                                    const struct pxa3xx_nand_timing *t)
426 {
427         struct pxa3xx_nand_info *info = host->info_data;
428         unsigned long nand_clk = mvebu_get_nand_clock();
429         uint32_t ndtr0, ndtr1;
430
431         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
432                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
433                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
434                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
435                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
436                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
437
438         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
439                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
440                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
441
442         info->ndtr0cs0 = ndtr0;
443         info->ndtr1cs0 = ndtr1;
444         nand_writel(info, NDTR0CS0, ndtr0);
445         nand_writel(info, NDTR1CS0, ndtr1);
446 }
447
448 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
449                                        const struct nand_sdr_timings *t)
450 {
451         struct pxa3xx_nand_info *info = host->info_data;
452         struct nand_chip *chip = &host->chip;
453         unsigned long nand_clk = mvebu_get_nand_clock();
454         uint32_t ndtr0, ndtr1;
455
456         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
457         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
458         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
459         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
460         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
461         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
462         u32 tR = chip->chip_delay * 1000;
463         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
464         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
465
466         /* fallback to a default value if tR = 0 */
467         if (!tR)
468                 tR = 20000;
469
470         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
471                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
472                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
473                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
474                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
475                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
476
477         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
478                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
479                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
480
481         info->ndtr0cs0 = ndtr0;
482         info->ndtr1cs0 = ndtr1;
483         nand_writel(info, NDTR0CS0, ndtr0);
484         nand_writel(info, NDTR1CS0, ndtr1);
485 }
486
487 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
488 {
489         const struct nand_sdr_timings *timings;
490         struct nand_chip *chip = &host->chip;
491         struct pxa3xx_nand_info *info = host->info_data;
492         const struct pxa3xx_nand_flash *f = NULL;
493         struct mtd_info *mtd = nand_to_mtd(&host->chip);
494         int mode, id, ntypes, i;
495
496         mode = onfi_get_async_timing_mode(chip);
497         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
498                 ntypes = ARRAY_SIZE(builtin_flash_types);
499
500                 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
501
502                 id = chip->read_byte(mtd);
503                 id |= chip->read_byte(mtd) << 0x8;
504
505                 for (i = 0; i < ntypes; i++) {
506                         f = &builtin_flash_types[i];
507
508                         if (f->chip_id == id)
509                                 break;
510                 }
511
512                 if (i == ntypes) {
513                         dev_err(&info->pdev->dev, "Error: timings not found\n");
514                         return -EINVAL;
515                 }
516
517                 pxa3xx_nand_set_timing(host, f->timing);
518
519                 if (f->flash_width == 16) {
520                         info->reg_ndcr |= NDCR_DWIDTH_M;
521                         chip->options |= NAND_BUSWIDTH_16;
522                 }
523
524                 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
525         } else {
526                 mode = fls(mode) - 1;
527                 if (mode < 0)
528                         mode = 0;
529
530                 timings = onfi_async_timing_mode_to_sdr_timings(mode);
531                 if (IS_ERR(timings))
532                         return PTR_ERR(timings);
533
534                 pxa3xx_nand_set_sdr_timing(host, timings);
535         }
536
537         return 0;
538 }
539
540 /**
541  * NOTE: it is a must to set ND_RUN first, then write
542  * command buffer, otherwise, it does not work.
543  * We enable all the interrupt at the same time, and
544  * let pxa3xx_nand_irq to handle all logic.
545  */
546 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
547 {
548         uint32_t ndcr;
549
550         ndcr = info->reg_ndcr;
551
552         if (info->use_ecc) {
553                 ndcr |= NDCR_ECC_EN;
554                 if (info->ecc_bch)
555                         nand_writel(info, NDECCCTRL, 0x1);
556         } else {
557                 ndcr &= ~NDCR_ECC_EN;
558                 if (info->ecc_bch)
559                         nand_writel(info, NDECCCTRL, 0x0);
560         }
561
562         ndcr &= ~NDCR_DMA_EN;
563
564         if (info->use_spare)
565                 ndcr |= NDCR_SPARE_EN;
566         else
567                 ndcr &= ~NDCR_SPARE_EN;
568
569         ndcr |= NDCR_ND_RUN;
570
571         /* clear status bits and run */
572         nand_writel(info, NDSR, NDSR_MASK);
573         nand_writel(info, NDCR, 0);
574         nand_writel(info, NDCR, ndcr);
575 }
576
577 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
578 {
579         uint32_t ndcr;
580
581         ndcr = nand_readl(info, NDCR);
582         nand_writel(info, NDCR, ndcr | int_mask);
583 }
584
585 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
586 {
587         if (info->ecc_bch && !info->force_raw) {
588                 u32 ts;
589
590                 /*
591                  * According to the datasheet, when reading from NDDB
592                  * with BCH enabled, after each 32 bytes reads, we
593                  * have to make sure that the NDSR.RDDREQ bit is set.
594                  *
595                  * Drain the FIFO 8 32 bits reads at a time, and skip
596                  * the polling on the last read.
597                  */
598                 while (len > 8) {
599                         readsl(info->mmio_base + NDDB, data, 8);
600
601                         ts = get_timer(0);
602                         while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
603                                 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
604                                         dev_err(&info->pdev->dev,
605                                                 "Timeout on RDDREQ while draining the FIFO\n");
606                                         return;
607                                 }
608                         }
609
610                         data += 32;
611                         len -= 8;
612                 }
613         }
614
615         readsl(info->mmio_base + NDDB, data, len);
616 }
617
618 static void handle_data_pio(struct pxa3xx_nand_info *info)
619 {
620         int data_len = info->step_chunk_size;
621
622         /*
623          * In raw mode, include the spare area and the ECC bytes that are not
624          * consumed by the controller in the data section. Do not reorganize
625          * here, do it in the ->read_page_raw() handler instead.
626          */
627         if (info->force_raw)
628                 data_len += info->step_spare_size + info->ecc_size;
629
630         switch (info->state) {
631         case STATE_PIO_WRITING:
632                 if (info->step_chunk_size)
633                         writesl(info->mmio_base + NDDB,
634                                 info->data_buff + info->data_buff_pos,
635                                 DIV_ROUND_UP(data_len, 4));
636
637                 if (info->step_spare_size)
638                         writesl(info->mmio_base + NDDB,
639                                 info->oob_buff + info->oob_buff_pos,
640                                 DIV_ROUND_UP(info->step_spare_size, 4));
641                 break;
642         case STATE_PIO_READING:
643                 if (data_len)
644                         drain_fifo(info,
645                                    info->data_buff + info->data_buff_pos,
646                                    DIV_ROUND_UP(data_len, 4));
647
648                 if (info->force_raw)
649                         break;
650
651                 if (info->step_spare_size)
652                         drain_fifo(info,
653                                    info->oob_buff + info->oob_buff_pos,
654                                    DIV_ROUND_UP(info->step_spare_size, 4));
655                 break;
656         default:
657                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
658                                 info->state);
659                 BUG();
660         }
661
662         /* Update buffer pointers for multi-page read/write */
663         info->data_buff_pos += data_len;
664         info->oob_buff_pos += info->step_spare_size;
665 }
666
667 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
668 {
669         handle_data_pio(info);
670
671         info->state = STATE_CMD_DONE;
672         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
673 }
674
675 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
676 {
677         unsigned int status, is_completed = 0, is_ready = 0;
678         unsigned int ready, cmd_done;
679         irqreturn_t ret = IRQ_HANDLED;
680
681         if (info->cs == 0) {
682                 ready           = NDSR_FLASH_RDY;
683                 cmd_done        = NDSR_CS0_CMDD;
684         } else {
685                 ready           = NDSR_RDY;
686                 cmd_done        = NDSR_CS1_CMDD;
687         }
688
689         /* TODO - find out why we need the delay during write operation. */
690         ndelay(1);
691
692         status = nand_readl(info, NDSR);
693
694         if (status & NDSR_UNCORERR)
695                 info->retcode = ERR_UNCORERR;
696         if (status & NDSR_CORERR) {
697                 info->retcode = ERR_CORERR;
698                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
699                     info->ecc_bch)
700                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
701                 else
702                         info->ecc_err_cnt = 1;
703
704                 /*
705                  * Each chunk composing a page is corrected independently,
706                  * and we need to store maximum number of corrected bitflips
707                  * to return it to the MTD layer in ecc.read_page().
708                  */
709                 info->max_bitflips = max_t(unsigned int,
710                                            info->max_bitflips,
711                                            info->ecc_err_cnt);
712         }
713         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
714                 info->state = (status & NDSR_RDDREQ) ?
715                         STATE_PIO_READING : STATE_PIO_WRITING;
716                 /* Call the IRQ thread in U-Boot directly */
717                 pxa3xx_nand_irq_thread(info);
718                 return 0;
719         }
720         if (status & cmd_done) {
721                 info->state = STATE_CMD_DONE;
722                 is_completed = 1;
723         }
724         if (status & ready) {
725                 info->state = STATE_READY;
726                 is_ready = 1;
727         }
728
729         /*
730          * Clear all status bit before issuing the next command, which
731          * can and will alter the status bits and will deserve a new
732          * interrupt on its own. This lets the controller exit the IRQ
733          */
734         nand_writel(info, NDSR, status);
735
736         if (status & NDSR_WRCMDREQ) {
737                 status &= ~NDSR_WRCMDREQ;
738                 info->state = STATE_CMD_HANDLE;
739
740                 /*
741                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
742                  * must be loaded by writing directly either 12 or 16
743                  * bytes directly to NDCB0, four bytes at a time.
744                  *
745                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
746                  * but each NDCBx register can be read.
747                  */
748                 nand_writel(info, NDCB0, info->ndcb0);
749                 nand_writel(info, NDCB0, info->ndcb1);
750                 nand_writel(info, NDCB0, info->ndcb2);
751
752                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
753                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
754                         nand_writel(info, NDCB0, info->ndcb3);
755         }
756
757         if (is_completed)
758                 info->cmd_complete = 1;
759         if (is_ready)
760                 info->dev_ready = 1;
761
762         return ret;
763 }
764
765 static inline int is_buf_blank(uint8_t *buf, size_t len)
766 {
767         for (; len > 0; len--)
768                 if (*buf++ != 0xff)
769                         return 0;
770         return 1;
771 }
772
773 static void set_command_address(struct pxa3xx_nand_info *info,
774                 unsigned int page_size, uint16_t column, int page_addr)
775 {
776         /* small page addr setting */
777         if (page_size < info->chunk_size) {
778                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
779                                 | (column & 0xFF);
780
781                 info->ndcb2 = 0;
782         } else {
783                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
784                                 | (column & 0xFFFF);
785
786                 if (page_addr & 0xFF0000)
787                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
788                 else
789                         info->ndcb2 = 0;
790         }
791 }
792
793 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
794 {
795         struct pxa3xx_nand_host *host = info->host[info->cs];
796         struct mtd_info *mtd = nand_to_mtd(&host->chip);
797
798         /* reset data and oob column point to handle data */
799         info->buf_start         = 0;
800         info->buf_count         = 0;
801         info->data_buff_pos     = 0;
802         info->oob_buff_pos      = 0;
803         info->step_chunk_size   = 0;
804         info->step_spare_size   = 0;
805         info->cur_chunk         = 0;
806         info->use_ecc           = 0;
807         info->use_spare         = 1;
808         info->retcode           = ERR_NONE;
809         info->ecc_err_cnt       = 0;
810         info->ndcb3             = 0;
811         info->need_wait         = 0;
812
813         switch (command) {
814         case NAND_CMD_READ0:
815         case NAND_CMD_READOOB:
816         case NAND_CMD_PAGEPROG:
817                 if (!info->force_raw)
818                         info->use_ecc = 1;
819                 break;
820         case NAND_CMD_PARAM:
821                 info->use_spare = 0;
822                 break;
823         default:
824                 info->ndcb1 = 0;
825                 info->ndcb2 = 0;
826                 break;
827         }
828
829         /*
830          * If we are about to issue a read command, or about to set
831          * the write address, then clean the data buffer.
832          */
833         if (command == NAND_CMD_READ0 ||
834             command == NAND_CMD_READOOB ||
835             command == NAND_CMD_SEQIN) {
836                 info->buf_count = mtd->writesize + mtd->oobsize;
837                 memset(info->data_buff, 0xFF, info->buf_count);
838         }
839 }
840
841 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
842                 int ext_cmd_type, uint16_t column, int page_addr)
843 {
844         int addr_cycle, exec_cmd;
845         struct pxa3xx_nand_host *host;
846         struct mtd_info *mtd;
847
848         host = info->host[info->cs];
849         mtd = nand_to_mtd(&host->chip);
850         addr_cycle = 0;
851         exec_cmd = 1;
852
853         if (info->cs != 0)
854                 info->ndcb0 = NDCB0_CSEL;
855         else
856                 info->ndcb0 = 0;
857
858         if (command == NAND_CMD_SEQIN)
859                 exec_cmd = 0;
860
861         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
862                                     + host->col_addr_cycles);
863
864         switch (command) {
865         case NAND_CMD_READOOB:
866         case NAND_CMD_READ0:
867                 info->buf_start = column;
868                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
869                                 | addr_cycle
870                                 | NAND_CMD_READ0;
871
872                 if (command == NAND_CMD_READOOB)
873                         info->buf_start += mtd->writesize;
874
875                 if (info->cur_chunk < info->nfullchunks) {
876                         info->step_chunk_size = info->chunk_size;
877                         info->step_spare_size = info->spare_size;
878                 } else {
879                         info->step_chunk_size = info->last_chunk_size;
880                         info->step_spare_size = info->last_spare_size;
881                 }
882
883                 /*
884                  * Multiple page read needs an 'extended command type' field,
885                  * which is either naked-read or last-read according to the
886                  * state.
887                  */
888                 if (info->force_raw) {
889                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
890                                        NDCB0_LEN_OVRD |
891                                        NDCB0_EXT_CMD_TYPE(ext_cmd_type);
892                         info->ndcb3 = info->step_chunk_size +
893                                       info->step_spare_size + info->ecc_size;
894                 } else if (mtd->writesize == info->chunk_size) {
895                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
896                 } else if (mtd->writesize > info->chunk_size) {
897                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
898                                         | NDCB0_LEN_OVRD
899                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
900                         info->ndcb3 = info->step_chunk_size +
901                                 info->step_spare_size;
902                 }
903
904                 set_command_address(info, mtd->writesize, column, page_addr);
905                 break;
906
907         case NAND_CMD_SEQIN:
908
909                 info->buf_start = column;
910                 set_command_address(info, mtd->writesize, 0, page_addr);
911
912                 /*
913                  * Multiple page programming needs to execute the initial
914                  * SEQIN command that sets the page address.
915                  */
916                 if (mtd->writesize > info->chunk_size) {
917                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
918                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
919                                 | addr_cycle
920                                 | command;
921                         exec_cmd = 1;
922                 }
923                 break;
924
925         case NAND_CMD_PAGEPROG:
926                 if (is_buf_blank(info->data_buff,
927                                  (mtd->writesize + mtd->oobsize))) {
928                         exec_cmd = 0;
929                         break;
930                 }
931
932                 if (info->cur_chunk < info->nfullchunks) {
933                         info->step_chunk_size = info->chunk_size;
934                         info->step_spare_size = info->spare_size;
935                 } else {
936                         info->step_chunk_size = info->last_chunk_size;
937                         info->step_spare_size = info->last_spare_size;
938                 }
939
940                 /* Second command setting for large pages */
941                 if (mtd->writesize > info->chunk_size) {
942                         /*
943                          * Multiple page write uses the 'extended command'
944                          * field. This can be used to issue a command dispatch
945                          * or a naked-write depending on the current stage.
946                          */
947                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
948                                         | NDCB0_LEN_OVRD
949                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
950                         info->ndcb3 = info->step_chunk_size +
951                                       info->step_spare_size;
952
953                         /*
954                          * This is the command dispatch that completes a chunked
955                          * page program operation.
956                          */
957                         if (info->cur_chunk == info->ntotalchunks) {
958                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
959                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
960                                         | command;
961                                 info->ndcb1 = 0;
962                                 info->ndcb2 = 0;
963                                 info->ndcb3 = 0;
964                         }
965                 } else {
966                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
967                                         | NDCB0_AUTO_RS
968                                         | NDCB0_ST_ROW_EN
969                                         | NDCB0_DBC
970                                         | (NAND_CMD_PAGEPROG << 8)
971                                         | NAND_CMD_SEQIN
972                                         | addr_cycle;
973                 }
974                 break;
975
976         case NAND_CMD_PARAM:
977                 info->buf_count = INIT_BUFFER_SIZE;
978                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
979                                 | NDCB0_ADDR_CYC(1)
980                                 | NDCB0_LEN_OVRD
981                                 | command;
982                 info->ndcb1 = (column & 0xFF);
983                 info->ndcb3 = INIT_BUFFER_SIZE;
984                 info->step_chunk_size = INIT_BUFFER_SIZE;
985                 break;
986
987         case NAND_CMD_READID:
988                 info->buf_count = READ_ID_BYTES;
989                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
990                                 | NDCB0_ADDR_CYC(1)
991                                 | command;
992                 info->ndcb1 = (column & 0xFF);
993
994                 info->step_chunk_size = 8;
995                 break;
996         case NAND_CMD_STATUS:
997                 info->buf_count = 1;
998                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
999                                 | NDCB0_ADDR_CYC(1)
1000                                 | command;
1001
1002                 info->step_chunk_size = 8;
1003                 break;
1004
1005         case NAND_CMD_ERASE1:
1006                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1007                                 | NDCB0_AUTO_RS
1008                                 | NDCB0_ADDR_CYC(3)
1009                                 | NDCB0_DBC
1010                                 | (NAND_CMD_ERASE2 << 8)
1011                                 | NAND_CMD_ERASE1;
1012                 info->ndcb1 = page_addr;
1013                 info->ndcb2 = 0;
1014
1015                 break;
1016         case NAND_CMD_RESET:
1017                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1018                                 | command;
1019
1020                 break;
1021
1022         case NAND_CMD_ERASE2:
1023                 exec_cmd = 0;
1024                 break;
1025
1026         default:
1027                 exec_cmd = 0;
1028                 dev_err(&info->pdev->dev, "non-supported command %x\n",
1029                         command);
1030                 break;
1031         }
1032
1033         return exec_cmd;
1034 }
1035
1036 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1037                          int column, int page_addr)
1038 {
1039         struct nand_chip *chip = mtd_to_nand(mtd);
1040         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1041         struct pxa3xx_nand_info *info = host->info_data;
1042         int exec_cmd;
1043
1044         /*
1045          * if this is a x16 device ,then convert the input
1046          * "byte" address into a "word" address appropriate
1047          * for indexing a word-oriented device
1048          */
1049         if (info->reg_ndcr & NDCR_DWIDTH_M)
1050                 column /= 2;
1051
1052         /*
1053          * There may be different NAND chip hooked to
1054          * different chip select, so check whether
1055          * chip select has been changed, if yes, reset the timing
1056          */
1057         if (info->cs != host->cs) {
1058                 info->cs = host->cs;
1059                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1060                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1061         }
1062
1063         prepare_start_command(info, command);
1064
1065         info->state = STATE_PREPARED;
1066         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1067
1068         if (exec_cmd) {
1069                 u32 ts;
1070
1071                 info->cmd_complete = 0;
1072                 info->dev_ready = 0;
1073                 info->need_wait = 1;
1074                 pxa3xx_nand_start(info);
1075
1076                 ts = get_timer(0);
1077                 while (1) {
1078                         u32 status;
1079
1080                         status = nand_readl(info, NDSR);
1081                         if (status)
1082                                 pxa3xx_nand_irq(info);
1083
1084                         if (info->cmd_complete)
1085                                 break;
1086
1087                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1088                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1089                                 return;
1090                         }
1091                 }
1092         }
1093         info->state = STATE_IDLE;
1094 }
1095
1096 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1097                                   const unsigned command,
1098                                   int column, int page_addr)
1099 {
1100         struct nand_chip *chip = mtd_to_nand(mtd);
1101         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1102         struct pxa3xx_nand_info *info = host->info_data;
1103         int exec_cmd, ext_cmd_type;
1104
1105         /*
1106          * if this is a x16 device then convert the input
1107          * "byte" address into a "word" address appropriate
1108          * for indexing a word-oriented device
1109          */
1110         if (info->reg_ndcr & NDCR_DWIDTH_M)
1111                 column /= 2;
1112
1113         /*
1114          * There may be different NAND chip hooked to
1115          * different chip select, so check whether
1116          * chip select has been changed, if yes, reset the timing
1117          */
1118         if (info->cs != host->cs) {
1119                 info->cs = host->cs;
1120                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1121                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1122         }
1123
1124         /* Select the extended command for the first command */
1125         switch (command) {
1126         case NAND_CMD_READ0:
1127         case NAND_CMD_READOOB:
1128                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1129                 break;
1130         case NAND_CMD_SEQIN:
1131                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1132                 break;
1133         case NAND_CMD_PAGEPROG:
1134                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1135                 break;
1136         default:
1137                 ext_cmd_type = 0;
1138                 break;
1139         }
1140
1141         prepare_start_command(info, command);
1142
1143         /*
1144          * Prepare the "is ready" completion before starting a command
1145          * transaction sequence. If the command is not executed the
1146          * completion will be completed, see below.
1147          *
1148          * We can do that inside the loop because the command variable
1149          * is invariant and thus so is the exec_cmd.
1150          */
1151         info->need_wait = 1;
1152         info->dev_ready = 0;
1153
1154         do {
1155                 u32 ts;
1156
1157                 info->state = STATE_PREPARED;
1158                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1159                                                column, page_addr);
1160                 if (!exec_cmd) {
1161                         info->need_wait = 0;
1162                         info->dev_ready = 1;
1163                         break;
1164                 }
1165
1166                 info->cmd_complete = 0;
1167                 pxa3xx_nand_start(info);
1168
1169                 ts = get_timer(0);
1170                 while (1) {
1171                         u32 status;
1172
1173                         status = nand_readl(info, NDSR);
1174                         if (status)
1175                                 pxa3xx_nand_irq(info);
1176
1177                         if (info->cmd_complete)
1178                                 break;
1179
1180                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1181                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1182                                 return;
1183                         }
1184                 }
1185
1186                 /* Only a few commands need several steps */
1187                 if (command != NAND_CMD_PAGEPROG &&
1188                     command != NAND_CMD_READ0    &&
1189                     command != NAND_CMD_READOOB)
1190                         break;
1191
1192                 info->cur_chunk++;
1193
1194                 /* Check if the sequence is complete */
1195                 if (info->cur_chunk == info->ntotalchunks &&
1196                     command != NAND_CMD_PAGEPROG)
1197                         break;
1198
1199                 /*
1200                  * After a splitted program command sequence has issued
1201                  * the command dispatch, the command sequence is complete.
1202                  */
1203                 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1204                     command == NAND_CMD_PAGEPROG &&
1205                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1206                         break;
1207
1208                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1209                         /* Last read: issue a 'last naked read' */
1210                         if (info->cur_chunk == info->ntotalchunks - 1)
1211                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1212                         else
1213                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1214
1215                 /*
1216                  * If a splitted program command has no more data to transfer,
1217                  * the command dispatch must be issued to complete.
1218                  */
1219                 } else if (command == NAND_CMD_PAGEPROG &&
1220                            info->cur_chunk == info->ntotalchunks) {
1221                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1222                 }
1223         } while (1);
1224
1225         info->state = STATE_IDLE;
1226 }
1227
1228 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1229                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1230                 int page)
1231 {
1232         chip->write_buf(mtd, buf, mtd->writesize);
1233         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1234
1235         return 0;
1236 }
1237
1238 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1239                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1240                 int page)
1241 {
1242         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1243         struct pxa3xx_nand_info *info = host->info_data;
1244         int bf;
1245
1246         chip->read_buf(mtd, buf, mtd->writesize);
1247         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1248
1249         if (info->retcode == ERR_CORERR && info->use_ecc) {
1250                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1251
1252         } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1253                 /*
1254                  * Empty pages will trigger uncorrectable errors. Re-read the
1255                  * entire page in raw mode and check for bits not being "1".
1256                  * If there are more than the supported strength, then it means
1257                  * this is an actual uncorrectable error.
1258                  */
1259                 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1260                 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1261                                                  chip->oob_poi, mtd->oobsize,
1262                                                  NULL, 0, chip->ecc.strength);
1263                 if (bf < 0) {
1264                         mtd->ecc_stats.failed++;
1265                 } else if (bf) {
1266                         mtd->ecc_stats.corrected += bf;
1267                         info->max_bitflips = max_t(unsigned int,
1268                                                    info->max_bitflips, bf);
1269                         info->retcode = ERR_CORERR;
1270                 } else {
1271                         info->retcode = ERR_NONE;
1272                 }
1273
1274         } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1275                 /* Raw read is not supported with Hamming ECC engine */
1276                 if (is_buf_blank(buf, mtd->writesize))
1277                         info->retcode = ERR_NONE;
1278                 else
1279                         mtd->ecc_stats.failed++;
1280         }
1281
1282         return info->max_bitflips;
1283 }
1284
1285 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1286                                      struct nand_chip *chip, uint8_t *buf,
1287                                      int oob_required, int page)
1288 {
1289         struct pxa3xx_nand_host *host = chip->priv;
1290         struct pxa3xx_nand_info *info = host->info_data;
1291         int chunk, ecc_off_buf;
1292
1293         if (!info->ecc_bch)
1294                 return -ENOTSUPP;
1295
1296         /*
1297          * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1298          * pxa3xx_nand_start(), which will actually disable the ECC engine.
1299          */
1300         info->force_raw = true;
1301         chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1302
1303         ecc_off_buf = (info->nfullchunks * info->spare_size) +
1304                       info->last_spare_size;
1305         for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1306                 chip->read_buf(mtd,
1307                                buf + (chunk * info->chunk_size),
1308                                info->chunk_size);
1309                 chip->read_buf(mtd,
1310                                chip->oob_poi +
1311                                (chunk * (info->spare_size)),
1312                                info->spare_size);
1313                 chip->read_buf(mtd,
1314                                chip->oob_poi + ecc_off_buf +
1315                                (chunk * (info->ecc_size)),
1316                                info->ecc_size - 2);
1317         }
1318
1319         if (info->ntotalchunks > info->nfullchunks) {
1320                 chip->read_buf(mtd,
1321                                buf + (info->nfullchunks * info->chunk_size),
1322                                info->last_chunk_size);
1323                 chip->read_buf(mtd,
1324                                chip->oob_poi +
1325                                (info->nfullchunks * (info->spare_size)),
1326                                info->last_spare_size);
1327                 chip->read_buf(mtd,
1328                                chip->oob_poi + ecc_off_buf +
1329                                (info->nfullchunks * (info->ecc_size)),
1330                                info->ecc_size - 2);
1331         }
1332
1333         info->force_raw = false;
1334
1335         return 0;
1336 }
1337
1338 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1339                                     struct nand_chip *chip, int page)
1340 {
1341         /* Invalidate page cache */
1342         chip->pagebuf = -1;
1343
1344         return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1345                                        page);
1346 }
1347
1348 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1349 {
1350         struct nand_chip *chip = mtd_to_nand(mtd);
1351         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1352         struct pxa3xx_nand_info *info = host->info_data;
1353         char retval = 0xFF;
1354
1355         if (info->buf_start < info->buf_count)
1356                 /* Has just send a new command? */
1357                 retval = info->data_buff[info->buf_start++];
1358
1359         return retval;
1360 }
1361
1362 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1363 {
1364         struct nand_chip *chip = mtd_to_nand(mtd);
1365         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1366         struct pxa3xx_nand_info *info = host->info_data;
1367         u16 retval = 0xFFFF;
1368
1369         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1370                 retval = *((u16 *)(info->data_buff+info->buf_start));
1371                 info->buf_start += 2;
1372         }
1373         return retval;
1374 }
1375
1376 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1377 {
1378         struct nand_chip *chip = mtd_to_nand(mtd);
1379         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1380         struct pxa3xx_nand_info *info = host->info_data;
1381         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1382
1383         memcpy(buf, info->data_buff + info->buf_start, real_len);
1384         info->buf_start += real_len;
1385 }
1386
1387 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1388                 const uint8_t *buf, int len)
1389 {
1390         struct nand_chip *chip = mtd_to_nand(mtd);
1391         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1392         struct pxa3xx_nand_info *info = host->info_data;
1393         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1394
1395         memcpy(info->data_buff + info->buf_start, buf, real_len);
1396         info->buf_start += real_len;
1397 }
1398
1399 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1400 {
1401         return;
1402 }
1403
1404 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1405 {
1406         struct nand_chip *chip = mtd_to_nand(mtd);
1407         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1408         struct pxa3xx_nand_info *info = host->info_data;
1409
1410         if (info->need_wait) {
1411                 u32 ts;
1412
1413                 info->need_wait = 0;
1414
1415                 ts = get_timer(0);
1416                 while (1) {
1417                         u32 status;
1418
1419                         status = nand_readl(info, NDSR);
1420                         if (status)
1421                                 pxa3xx_nand_irq(info);
1422
1423                         if (info->dev_ready)
1424                                 break;
1425
1426                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1427                                 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1428                                 return NAND_STATUS_FAIL;
1429                         }
1430                 }
1431         }
1432
1433         /* pxa3xx_nand_send_command has waited for command complete */
1434         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1435                 if (info->retcode == ERR_NONE)
1436                         return 0;
1437                 else
1438                         return NAND_STATUS_FAIL;
1439         }
1440
1441         return NAND_STATUS_READY;
1442 }
1443
1444 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1445 {
1446         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1447
1448         /* Configure default flash values */
1449         info->reg_ndcr = 0x0; /* enable all interrupts */
1450         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1451         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1452         info->reg_ndcr |= NDCR_SPARE_EN;
1453
1454         return 0;
1455 }
1456
1457 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1458 {
1459         struct pxa3xx_nand_host *host = info->host[info->cs];
1460         struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1461         struct nand_chip *chip = mtd_to_nand(mtd);
1462
1463         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1464         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1465         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1466 }
1467
1468 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1469 {
1470         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1471         uint32_t ndcr = nand_readl(info, NDCR);
1472
1473         /* Set an initial chunk size */
1474         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1475         info->reg_ndcr = ndcr &
1476                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1477         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1478         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1479         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1480 }
1481
1482 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1483 {
1484         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1485         if (info->data_buff == NULL)
1486                 return -ENOMEM;
1487         return 0;
1488 }
1489
1490 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1491 {
1492         struct pxa3xx_nand_info *info = host->info_data;
1493         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1494         struct mtd_info *mtd;
1495         struct nand_chip *chip;
1496         const struct nand_sdr_timings *timings;
1497         int ret;
1498
1499         mtd = nand_to_mtd(&info->host[info->cs]->chip);
1500         chip = mtd_to_nand(mtd);
1501
1502         /* configure default flash values */
1503         info->reg_ndcr = 0x0; /* enable all interrupts */
1504         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1505         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1506         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1507
1508         /* use the common timing to make a try */
1509         timings = onfi_async_timing_mode_to_sdr_timings(0);
1510         if (IS_ERR(timings))
1511                 return PTR_ERR(timings);
1512
1513         pxa3xx_nand_set_sdr_timing(host, timings);
1514
1515         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1516         ret = chip->waitfunc(mtd, chip);
1517         if (ret & NAND_STATUS_FAIL)
1518                 return -ENODEV;
1519
1520         return 0;
1521 }
1522
1523 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1524                         struct nand_ecc_ctrl *ecc,
1525                         int strength, int ecc_stepsize, int page_size)
1526 {
1527         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1528                 info->nfullchunks = 1;
1529                 info->ntotalchunks = 1;
1530                 info->chunk_size = 2048;
1531                 info->spare_size = 40;
1532                 info->ecc_size = 24;
1533                 ecc->mode = NAND_ECC_HW;
1534                 ecc->size = 512;
1535                 ecc->strength = 1;
1536
1537         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1538                 info->nfullchunks = 1;
1539                 info->ntotalchunks = 1;
1540                 info->chunk_size = 512;
1541                 info->spare_size = 8;
1542                 info->ecc_size = 8;
1543                 ecc->mode = NAND_ECC_HW;
1544                 ecc->size = 512;
1545                 ecc->strength = 1;
1546
1547         /*
1548          * Required ECC: 4-bit correction per 512 bytes
1549          * Select: 16-bit correction per 2048 bytes
1550          */
1551         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1552                 info->ecc_bch = 1;
1553                 info->nfullchunks = 1;
1554                 info->ntotalchunks = 1;
1555                 info->chunk_size = 2048;
1556                 info->spare_size = 32;
1557                 info->ecc_size = 32;
1558                 ecc->mode = NAND_ECC_HW;
1559                 ecc->size = info->chunk_size;
1560                 ecc->layout = &ecc_layout_2KB_bch4bit;
1561                 ecc->strength = 16;
1562
1563         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1564                 info->ecc_bch = 1;
1565                 info->nfullchunks = 2;
1566                 info->ntotalchunks = 2;
1567                 info->chunk_size = 2048;
1568                 info->spare_size = 32;
1569                 info->ecc_size = 32;
1570                 ecc->mode = NAND_ECC_HW;
1571                 ecc->size = info->chunk_size;
1572                 ecc->layout = &ecc_layout_4KB_bch4bit;
1573                 ecc->strength = 16;
1574
1575         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1576                 info->ecc_bch = 1;
1577                 info->nfullchunks = 4;
1578                 info->ntotalchunks = 4;
1579                 info->chunk_size = 2048;
1580                 info->spare_size = 32;
1581                 info->ecc_size = 32;
1582                 ecc->mode = NAND_ECC_HW;
1583                 ecc->size = info->chunk_size;
1584                 ecc->layout = &ecc_layout_8KB_bch4bit;
1585                 ecc->strength = 16;
1586
1587         /*
1588          * Required ECC: 8-bit correction per 512 bytes
1589          * Select: 16-bit correction per 1024 bytes
1590          */
1591         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1592                 info->ecc_bch = 1;
1593                 info->nfullchunks = 1;
1594                 info->ntotalchunks = 2;
1595                 info->chunk_size = 1024;
1596                 info->spare_size = 0;
1597                 info->last_chunk_size = 1024;
1598                 info->last_spare_size = 32;
1599                 info->ecc_size = 32;
1600                 ecc->mode = NAND_ECC_HW;
1601                 ecc->size = info->chunk_size;
1602                 ecc->layout = &ecc_layout_2KB_bch8bit;
1603                 ecc->strength = 16;
1604
1605         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1606                 info->ecc_bch = 1;
1607                 info->nfullchunks = 4;
1608                 info->ntotalchunks = 5;
1609                 info->chunk_size = 1024;
1610                 info->spare_size = 0;
1611                 info->last_chunk_size = 0;
1612                 info->last_spare_size = 64;
1613                 info->ecc_size = 32;
1614                 ecc->mode = NAND_ECC_HW;
1615                 ecc->size = info->chunk_size;
1616                 ecc->layout = &ecc_layout_4KB_bch8bit;
1617                 ecc->strength = 16;
1618
1619         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1620                 info->ecc_bch = 1;
1621                 info->nfullchunks = 8;
1622                 info->ntotalchunks = 9;
1623                 info->chunk_size = 1024;
1624                 info->spare_size = 0;
1625                 info->last_chunk_size = 0;
1626                 info->last_spare_size = 160;
1627                 info->ecc_size = 32;
1628                 ecc->mode = NAND_ECC_HW;
1629                 ecc->size = info->chunk_size;
1630                 ecc->layout = &ecc_layout_8KB_bch8bit;
1631                 ecc->strength = 16;
1632
1633         } else {
1634                 dev_err(&info->pdev->dev,
1635                         "ECC strength %d at page size %d is not supported\n",
1636                         strength, page_size);
1637                 return -ENODEV;
1638         }
1639
1640         return 0;
1641 }
1642
1643 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1644 {
1645         struct nand_chip *chip = mtd_to_nand(mtd);
1646         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1647         struct pxa3xx_nand_info *info = host->info_data;
1648         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1649         int ret;
1650         uint16_t ecc_strength, ecc_step;
1651
1652         if (pdata->keep_config) {
1653                 pxa3xx_nand_detect_config(info);
1654         } else {
1655                 ret = pxa3xx_nand_config_ident(info);
1656                 if (ret)
1657                         return ret;
1658                 ret = pxa3xx_nand_sensing(host);
1659                 if (ret) {
1660                         dev_info(&info->pdev->dev,
1661                                  "There is no chip on cs %d!\n",
1662                                  info->cs);
1663                         return ret;
1664                 }
1665         }
1666
1667         /* Device detection must be done with ECC disabled */
1668         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1669                 nand_writel(info, NDECCCTRL, 0x0);
1670
1671         if (nand_scan_ident(mtd, 1, NULL))
1672                 return -ENODEV;
1673
1674         if (!pdata->keep_config) {
1675                 ret = pxa3xx_nand_init_timings(host);
1676                 if (ret) {
1677                         dev_err(&info->pdev->dev,
1678                                 "Failed to set timings: %d\n", ret);
1679                         return ret;
1680                 }
1681         }
1682
1683 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1684         /*
1685          * We'll use a bad block table stored in-flash and don't
1686          * allow writing the bad block marker to the flash.
1687          */
1688         chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1689         chip->bbt_td = &bbt_main_descr;
1690         chip->bbt_md = &bbt_mirror_descr;
1691 #endif
1692
1693         if (pdata->ecc_strength && pdata->ecc_step_size) {
1694                 ecc_strength = pdata->ecc_strength;
1695                 ecc_step = pdata->ecc_step_size;
1696         } else {
1697                 ecc_strength = chip->ecc_strength_ds;
1698                 ecc_step = chip->ecc_step_ds;
1699         }
1700
1701         /* Set default ECC strength requirements on non-ONFI devices */
1702         if (ecc_strength < 1 && ecc_step < 1) {
1703                 ecc_strength = 1;
1704                 ecc_step = 512;
1705         }
1706
1707         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1708                            ecc_step, mtd->writesize);
1709         if (ret)
1710                 return ret;
1711
1712         /*
1713          * If the page size is bigger than the FIFO size, let's check
1714          * we are given the right variant and then switch to the extended
1715          * (aka split) command handling,
1716          */
1717         if (mtd->writesize > info->chunk_size) {
1718                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1719                         chip->cmdfunc = nand_cmdfunc_extended;
1720                 } else {
1721                         dev_err(&info->pdev->dev,
1722                                 "unsupported page size on this variant\n");
1723                         return -ENODEV;
1724                 }
1725         }
1726
1727         /* calculate addressing information */
1728         if (mtd->writesize >= 2048)
1729                 host->col_addr_cycles = 2;
1730         else
1731                 host->col_addr_cycles = 1;
1732
1733         /* release the initial buffer */
1734         kfree(info->data_buff);
1735
1736         /* allocate the real data + oob buffer */
1737         info->buf_size = mtd->writesize + mtd->oobsize;
1738         ret = pxa3xx_nand_init_buff(info);
1739         if (ret)
1740                 return ret;
1741         info->oob_buff = info->data_buff + mtd->writesize;
1742
1743         if ((mtd->size >> chip->page_shift) > 65536)
1744                 host->row_addr_cycles = 3;
1745         else
1746                 host->row_addr_cycles = 2;
1747
1748         if (!pdata->keep_config)
1749                 pxa3xx_nand_config_tail(info);
1750
1751         return nand_scan_tail(mtd);
1752 }
1753
1754 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1755 {
1756         struct pxa3xx_nand_platform_data *pdata;
1757         struct pxa3xx_nand_host *host;
1758         struct nand_chip *chip = NULL;
1759         struct mtd_info *mtd;
1760         int ret, cs;
1761
1762         pdata = info->pdata;
1763         if (pdata->num_cs <= 0)
1764                 return -ENODEV;
1765
1766         info->variant = pxa3xx_nand_get_variant();
1767         for (cs = 0; cs < pdata->num_cs; cs++) {
1768                 chip = (struct nand_chip *)
1769                         ((u8 *)&info[1] + sizeof(*host) * cs);
1770                 mtd = nand_to_mtd(chip);
1771                 host = (struct pxa3xx_nand_host *)chip;
1772                 info->host[cs] = host;
1773                 host->cs = cs;
1774                 host->info_data = info;
1775                 mtd->owner = THIS_MODULE;
1776
1777                 nand_set_controller_data(chip, host);
1778                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1779                 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1780                 chip->ecc.read_oob_raw  = pxa3xx_nand_read_oob_raw;
1781                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1782                 chip->controller        = &info->controller;
1783                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1784                 chip->select_chip       = pxa3xx_nand_select_chip;
1785                 chip->read_word         = pxa3xx_nand_read_word;
1786                 chip->read_byte         = pxa3xx_nand_read_byte;
1787                 chip->read_buf          = pxa3xx_nand_read_buf;
1788                 chip->write_buf         = pxa3xx_nand_write_buf;
1789                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1790                 chip->cmdfunc           = nand_cmdfunc;
1791         }
1792
1793         /* Allocate a buffer to allow flash detection */
1794         info->buf_size = INIT_BUFFER_SIZE;
1795         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1796         if (info->data_buff == NULL) {
1797                 ret = -ENOMEM;
1798                 goto fail_disable_clk;
1799         }
1800
1801         /* initialize all interrupts to be disabled */
1802         disable_int(info, NDSR_MASK);
1803
1804         return 0;
1805
1806         kfree(info->data_buff);
1807 fail_disable_clk:
1808         return ret;
1809 }
1810
1811 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1812 {
1813         struct pxa3xx_nand_platform_data *pdata;
1814         const void *blob = gd->fdt_blob;
1815         int node = -1;
1816
1817         pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1818         if (!pdata)
1819                 return -ENOMEM;
1820
1821         /* Get address decoding nodes from the FDT blob */
1822         do {
1823                 node = fdt_node_offset_by_compatible(blob, node,
1824                                                      "marvell,mvebu-pxa3xx-nand");
1825                 if (node < 0)
1826                         break;
1827
1828                 /* Bypass disabeld nodes */
1829                 if (!fdtdec_get_is_enabled(blob, node))
1830                         continue;
1831
1832                 /* Get the first enabled NAND controler base address */
1833                 info->mmio_base =
1834                         (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1835                                         blob, node, "reg", 0, NULL, true);
1836
1837                 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1838                 if (pdata->num_cs != 1) {
1839                         pr_err("pxa3xx driver supports single CS only\n");
1840                         break;
1841                 }
1842
1843                 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1844                         pdata->enable_arbiter = 1;
1845
1846                 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1847                         pdata->keep_config = 1;
1848
1849                 /*
1850                  * ECC parameters.
1851                  * If these are not set, they will be selected according
1852                  * to the detected flash type.
1853                  */
1854                 /* ECC strength */
1855                 pdata->ecc_strength = fdtdec_get_int(blob, node,
1856                                                      "nand-ecc-strength", 0);
1857
1858                 /* ECC step size */
1859                 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1860                                                       "nand-ecc-step-size", 0);
1861
1862                 info->pdata = pdata;
1863
1864                 /* Currently support only a single NAND controller */
1865                 return 0;
1866
1867         } while (node >= 0);
1868
1869         return -EINVAL;
1870 }
1871
1872 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1873 {
1874         struct pxa3xx_nand_platform_data *pdata;
1875         int ret, cs, probe_success;
1876
1877         ret = pxa3xx_nand_probe_dt(info);
1878         if (ret)
1879                 return ret;
1880
1881         pdata = info->pdata;
1882
1883         ret = alloc_nand_resource(info);
1884         if (ret) {
1885                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1886                 return ret;
1887         }
1888
1889         probe_success = 0;
1890         for (cs = 0; cs < pdata->num_cs; cs++) {
1891                 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1892
1893                 /*
1894                  * The mtd name matches the one used in 'mtdparts' kernel
1895                  * parameter. This name cannot be changed or otherwise
1896                  * user's mtd partitions configuration would get broken.
1897                  */
1898                 mtd->name = "pxa3xx_nand-0";
1899                 info->cs = cs;
1900                 ret = pxa3xx_nand_scan(mtd);
1901                 if (ret) {
1902                         dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1903                                  cs);
1904                         continue;
1905                 }
1906
1907                 if (nand_register(cs, mtd))
1908                         continue;
1909
1910                 probe_success = 1;
1911         }
1912
1913         if (!probe_success)
1914                 return -ENODEV;
1915
1916         return 0;
1917 }
1918
1919 /*
1920  * Main initialization routine
1921  */
1922 void board_nand_init(void)
1923 {
1924         struct pxa3xx_nand_info *info;
1925         struct pxa3xx_nand_host *host;
1926         int ret;
1927
1928         info = kzalloc(sizeof(*info) +
1929                        sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1930                        GFP_KERNEL);
1931         if (!info)
1932                 return;
1933
1934         ret = pxa3xx_nand_probe(info);
1935         if (ret)
1936                 return;
1937 }