Merge tag 'efi-2020-07-rc6' of https://gitlab.denx.de/u-boot/custodians/u-boot-efi
[oweals/u-boot.git] / drivers / mtd / nand / raw / pxa3xx_nand.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/mtd/nand/raw/pxa3xx_nand.c
4  *
5  * Copyright © 2005 Intel Corporation
6  * Copyright © 2006 Marvell International Ltd.
7  */
8
9 #include <common.h>
10 #include <malloc.h>
11 #include <fdtdec.h>
12 #include <nand.h>
13 #include <dm/device_compat.h>
14 #include <dm/devres.h>
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
20 #include <asm/io.h>
21 #include <asm/arch/cpu.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/rawnand.h>
24 #include <linux/types.h>
25
26 #include "pxa3xx_nand.h"
27
28 DECLARE_GLOBAL_DATA_PTR;
29
30 #define TIMEOUT_DRAIN_FIFO      5       /* in ms */
31 #define CHIP_DELAY_TIMEOUT      200
32 #define NAND_STOP_DELAY         40
33
34 /*
35  * Define a buffer size for the initial command that detects the flash device:
36  * STATUS, READID and PARAM.
37  * ONFI param page is 256 bytes, and there are three redundant copies
38  * to be read. JEDEC param page is 512 bytes, and there are also three
39  * redundant copies to be read.
40  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
41  */
42 #define INIT_BUFFER_SIZE        2048
43
44 /* registers and bit definitions */
45 #define NDCR            (0x00) /* Control register */
46 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
47 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
48 #define NDSR            (0x14) /* Status Register */
49 #define NDPCR           (0x18) /* Page Count Register */
50 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
51 #define NDBDR1          (0x20) /* Bad Block Register 1 */
52 #define NDECCCTRL       (0x28) /* ECC control */
53 #define NDDB            (0x40) /* Data Buffer */
54 #define NDCB0           (0x48) /* Command Buffer0 */
55 #define NDCB1           (0x4C) /* Command Buffer1 */
56 #define NDCB2           (0x50) /* Command Buffer2 */
57
58 #define NDCR_SPARE_EN           (0x1 << 31)
59 #define NDCR_ECC_EN             (0x1 << 30)
60 #define NDCR_DMA_EN             (0x1 << 29)
61 #define NDCR_ND_RUN             (0x1 << 28)
62 #define NDCR_DWIDTH_C           (0x1 << 27)
63 #define NDCR_DWIDTH_M           (0x1 << 26)
64 #define NDCR_PAGE_SZ            (0x1 << 24)
65 #define NDCR_NCSX               (0x1 << 23)
66 #define NDCR_ND_MODE            (0x3 << 21)
67 #define NDCR_NAND_MODE          (0x0)
68 #define NDCR_CLR_PG_CNT         (0x1 << 20)
69 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
70 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
71 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
72
73 #define NDCR_RA_START           (0x1 << 15)
74 #define NDCR_PG_PER_BLK         (0x1 << 14)
75 #define NDCR_ND_ARB_EN          (0x1 << 12)
76 #define NDCR_INT_MASK           (0xFFF)
77
78 #define NDSR_MASK               (0xfff)
79 #define NDSR_ERR_CNT_OFF        (16)
80 #define NDSR_ERR_CNT_MASK       (0x1f)
81 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
82 #define NDSR_RDY                (0x1 << 12)
83 #define NDSR_FLASH_RDY          (0x1 << 11)
84 #define NDSR_CS0_PAGED          (0x1 << 10)
85 #define NDSR_CS1_PAGED          (0x1 << 9)
86 #define NDSR_CS0_CMDD           (0x1 << 8)
87 #define NDSR_CS1_CMDD           (0x1 << 7)
88 #define NDSR_CS0_BBD            (0x1 << 6)
89 #define NDSR_CS1_BBD            (0x1 << 5)
90 #define NDSR_UNCORERR           (0x1 << 4)
91 #define NDSR_CORERR             (0x1 << 3)
92 #define NDSR_WRDREQ             (0x1 << 2)
93 #define NDSR_RDDREQ             (0x1 << 1)
94 #define NDSR_WRCMDREQ           (0x1)
95
96 #define NDCB0_LEN_OVRD          (0x1 << 28)
97 #define NDCB0_ST_ROW_EN         (0x1 << 26)
98 #define NDCB0_AUTO_RS           (0x1 << 25)
99 #define NDCB0_CSEL              (0x1 << 24)
100 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
101 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
102 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
103 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
104 #define NDCB0_NC                (0x1 << 20)
105 #define NDCB0_DBC               (0x1 << 19)
106 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
107 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
108 #define NDCB0_CMD2_MASK         (0xff << 8)
109 #define NDCB0_CMD1_MASK         (0xff)
110 #define NDCB0_ADDR_CYC_SHIFT    (16)
111
112 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
113 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
114 #define EXT_CMD_TYPE_READ       4 /* Read */
115 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
116 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
117 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
118 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
119
120 /*
121  * This should be large enough to read 'ONFI' and 'JEDEC'.
122  * Let's use 7 bytes, which is the maximum ID count supported
123  * by the controller (see NDCR_RD_ID_CNT_MASK).
124  */
125 #define READ_ID_BYTES           7
126
127 /* macros for registers read/write */
128 #define nand_writel(info, off, val)     \
129         writel((val), (info)->mmio_base + (off))
130
131 #define nand_readl(info, off)           \
132         readl((info)->mmio_base + (off))
133
134 /* error code and state */
135 enum {
136         ERR_NONE        = 0,
137         ERR_DMABUSERR   = -1,
138         ERR_SENDCMD     = -2,
139         ERR_UNCORERR    = -3,
140         ERR_BBERR       = -4,
141         ERR_CORERR      = -5,
142 };
143
144 enum {
145         STATE_IDLE = 0,
146         STATE_PREPARED,
147         STATE_CMD_HANDLE,
148         STATE_DMA_READING,
149         STATE_DMA_WRITING,
150         STATE_DMA_DONE,
151         STATE_PIO_READING,
152         STATE_PIO_WRITING,
153         STATE_CMD_DONE,
154         STATE_READY,
155 };
156
157 enum pxa3xx_nand_variant {
158         PXA3XX_NAND_VARIANT_PXA,
159         PXA3XX_NAND_VARIANT_ARMADA370,
160 };
161
162 struct pxa3xx_nand_host {
163         struct nand_chip        chip;
164         void                    *info_data;
165
166         /* page size of attached chip */
167         int                     use_ecc;
168         int                     cs;
169
170         /* calculated from pxa3xx_nand_flash data */
171         unsigned int            col_addr_cycles;
172         unsigned int            row_addr_cycles;
173 };
174
175 struct pxa3xx_nand_info {
176         struct nand_hw_control  controller;
177         struct pxa3xx_nand_platform_data *pdata;
178
179         struct clk              *clk;
180         void __iomem            *mmio_base;
181         unsigned long           mmio_phys;
182         int                     cmd_complete, dev_ready;
183
184         unsigned int            buf_start;
185         unsigned int            buf_count;
186         unsigned int            buf_size;
187         unsigned int            data_buff_pos;
188         unsigned int            oob_buff_pos;
189
190         unsigned char           *data_buff;
191         unsigned char           *oob_buff;
192
193         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
194         unsigned int            state;
195
196         /*
197          * This driver supports NFCv1 (as found in PXA SoC)
198          * and NFCv2 (as found in Armada 370/XP SoC).
199          */
200         enum pxa3xx_nand_variant variant;
201
202         int                     cs;
203         int                     use_ecc;        /* use HW ECC ? */
204         int                     force_raw;      /* prevent use_ecc to be set */
205         int                     ecc_bch;        /* using BCH ECC? */
206         int                     use_spare;      /* use spare ? */
207         int                     need_wait;
208
209         /* Amount of real data per full chunk */
210         unsigned int            chunk_size;
211
212         /* Amount of spare data per full chunk */
213         unsigned int            spare_size;
214
215         /* Number of full chunks (i.e chunk_size + spare_size) */
216         unsigned int            nfullchunks;
217
218         /*
219          * Total number of chunks. If equal to nfullchunks, then there
220          * are only full chunks. Otherwise, there is one last chunk of
221          * size (last_chunk_size + last_spare_size)
222          */
223         unsigned int            ntotalchunks;
224
225         /* Amount of real data in the last chunk */
226         unsigned int            last_chunk_size;
227
228         /* Amount of spare data in the last chunk */
229         unsigned int            last_spare_size;
230
231         unsigned int            ecc_size;
232         unsigned int            ecc_err_cnt;
233         unsigned int            max_bitflips;
234         int                     retcode;
235
236         /*
237          * Variables only valid during command
238          * execution. step_chunk_size and step_spare_size is the
239          * amount of real data and spare data in the current
240          * chunk. cur_chunk is the current chunk being
241          * read/programmed.
242          */
243         unsigned int            step_chunk_size;
244         unsigned int            step_spare_size;
245         unsigned int            cur_chunk;
246
247         /* cached register value */
248         uint32_t                reg_ndcr;
249         uint32_t                ndtr0cs0;
250         uint32_t                ndtr1cs0;
251
252         /* generated NDCBx register values */
253         uint32_t                ndcb0;
254         uint32_t                ndcb1;
255         uint32_t                ndcb2;
256         uint32_t                ndcb3;
257 };
258
259 static struct pxa3xx_nand_timing timing[] = {
260         /*
261          * tCH  Enable signal hold time
262          * tCS  Enable signal setup time
263          * tWH  ND_nWE high duration
264          * tWP  ND_nWE pulse time
265          * tRH  ND_nRE high duration
266          * tRP  ND_nRE pulse width
267          * tR   ND_nWE high to ND_nRE low for read
268          * tWHR ND_nWE high to ND_nRE low for status read
269          * tAR  ND_ALE low to ND_nRE low delay
270          */
271         /*ch  cs  wh  wp   rh  rp   r      whr  ar */
272         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
273         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
274         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
275         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
276         {  5, 20, 10,  12, 10,  12, 25000,  60, 10, },
277 };
278
279 static struct pxa3xx_nand_flash builtin_flash_types[] = {
280         /*
281          * chip_id
282          * flash_width  Width of Flash memory (DWIDTH_M)
283          * dfc_width    Width of flash controller(DWIDTH_C)
284          * *timing
285          * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
286          */
287         { 0x46ec, 16, 16, &timing[1] },
288         { 0xdaec,  8,  8, &timing[1] },
289         { 0xd7ec,  8,  8, &timing[1] },
290         { 0xa12c,  8,  8, &timing[2] },
291         { 0xb12c, 16, 16, &timing[2] },
292         { 0xdc2c,  8,  8, &timing[2] },
293         { 0xcc2c, 16, 16, &timing[2] },
294         { 0xba20, 16, 16, &timing[3] },
295         { 0xda98,  8,  8, &timing[4] },
296 };
297
298 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
299 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301
302 static struct nand_bbt_descr bbt_main_descr = {
303         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
305         .offs = 8,
306         .len = 6,
307         .veroffs = 14,
308         .maxblocks = 8,         /* Last 8 blocks in each chip */
309         .pattern = bbt_pattern
310 };
311
312 static struct nand_bbt_descr bbt_mirror_descr = {
313         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315         .offs = 8,
316         .len = 6,
317         .veroffs = 14,
318         .maxblocks = 8,         /* Last 8 blocks in each chip */
319         .pattern = bbt_mirror_pattern
320 };
321 #endif
322
323 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
324         .eccbytes = 32,
325         .eccpos = {
326                 32, 33, 34, 35, 36, 37, 38, 39,
327                 40, 41, 42, 43, 44, 45, 46, 47,
328                 48, 49, 50, 51, 52, 53, 54, 55,
329                 56, 57, 58, 59, 60, 61, 62, 63},
330         .oobfree = { {2, 30} }
331 };
332
333 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
334         .eccbytes = 64,
335         .eccpos = {
336                 32, 33, 34, 35, 36, 37, 38, 39,
337                 40, 41, 42, 43, 44, 45, 46, 47,
338                 48, 49, 50, 51, 52, 53, 54, 55,
339                 56, 57, 58, 59, 60, 61, 62, 63,
340                 64, 65, 66, 67, 68, 69, 70, 71,
341                 72, 73, 74, 75, 76, 77, 78, 79,
342                 80, 81, 82, 83, 84, 85, 86, 87,
343                 88, 89, 90, 91, 92, 93, 94, 95},
344         .oobfree = { {1, 4}, {6, 26} }
345 };
346
347 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
348         .eccbytes = 64,
349         .eccpos = {
350                 32,  33,  34,  35,  36,  37,  38,  39,
351                 40,  41,  42,  43,  44,  45,  46,  47,
352                 48,  49,  50,  51,  52,  53,  54,  55,
353                 56,  57,  58,  59,  60,  61,  62,  63,
354                 96,  97,  98,  99,  100, 101, 102, 103,
355                 104, 105, 106, 107, 108, 109, 110, 111,
356                 112, 113, 114, 115, 116, 117, 118, 119,
357                 120, 121, 122, 123, 124, 125, 126, 127},
358         /* Bootrom looks in bytes 0 & 5 for bad blocks */
359         .oobfree = { {6, 26}, { 64, 32} }
360 };
361
362 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
363         .eccbytes = 128,
364         .eccpos = {
365                 32,  33,  34,  35,  36,  37,  38,  39,
366                 40,  41,  42,  43,  44,  45,  46,  47,
367                 48,  49,  50,  51,  52,  53,  54,  55,
368                 56,  57,  58,  59,  60,  61,  62,  63,
369
370                 96,  97,  98,  99,  100, 101, 102, 103,
371                 104, 105, 106, 107, 108, 109, 110, 111,
372                 112, 113, 114, 115, 116, 117, 118, 119,
373                 120, 121, 122, 123, 124, 125, 126, 127,
374
375                 160, 161, 162, 163, 164, 165, 166, 167,
376                 168, 169, 170, 171, 172, 173, 174, 175,
377                 176, 177, 178, 179, 180, 181, 182, 183,
378                 184, 185, 186, 187, 188, 189, 190, 191,
379
380                 224, 225, 226, 227, 228, 229, 230, 231,
381                 232, 233, 234, 235, 236, 237, 238, 239,
382                 240, 241, 242, 243, 244, 245, 246, 247,
383                 248, 249, 250, 251, 252, 253, 254, 255},
384
385         /* Bootrom looks in bytes 0 & 5 for bad blocks */
386         .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
387 };
388
389 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
390         .eccbytes = 128,
391         .eccpos = {
392                 32,  33,  34,  35,  36,  37,  38,  39,
393                 40,  41,  42,  43,  44,  45,  46,  47,
394                 48,  49,  50,  51,  52,  53,  54,  55,
395                 56,  57,  58,  59,  60,  61,  62,  63},
396         .oobfree = { }
397 };
398
399 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
400         .eccbytes = 256,
401         .eccpos = {},
402         /* HW ECC handles all ECC data and all spare area is free for OOB */
403         .oobfree = {{0, 160} }
404 };
405
406 #define NDTR0_tCH(c)    (min((c), 7) << 19)
407 #define NDTR0_tCS(c)    (min((c), 7) << 16)
408 #define NDTR0_tWH(c)    (min((c), 7) << 11)
409 #define NDTR0_tWP(c)    (min((c), 7) << 8)
410 #define NDTR0_tRH(c)    (min((c), 7) << 3)
411 #define NDTR0_tRP(c)    (min((c), 7) << 0)
412
413 #define NDTR1_tR(c)     (min((c), 65535) << 16)
414 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
415 #define NDTR1_tAR(c)    (min((c), 15) << 0)
416
417 /* convert nano-seconds to nand flash controller clock cycles */
418 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
419
420 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
421 {
422         /* We only support the Armada 370/XP/38x for now */
423         return PXA3XX_NAND_VARIANT_ARMADA370;
424 }
425
426 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
427                                    const struct pxa3xx_nand_timing *t)
428 {
429         struct pxa3xx_nand_info *info = host->info_data;
430         unsigned long nand_clk = mvebu_get_nand_clock();
431         uint32_t ndtr0, ndtr1;
432
433         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
434                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
435                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
436                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
437                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
438                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
439
440         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
441                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
442                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
443
444         info->ndtr0cs0 = ndtr0;
445         info->ndtr1cs0 = ndtr1;
446         nand_writel(info, NDTR0CS0, ndtr0);
447         nand_writel(info, NDTR1CS0, ndtr1);
448 }
449
450 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
451                                        const struct nand_sdr_timings *t)
452 {
453         struct pxa3xx_nand_info *info = host->info_data;
454         struct nand_chip *chip = &host->chip;
455         unsigned long nand_clk = mvebu_get_nand_clock();
456         uint32_t ndtr0, ndtr1;
457
458         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
459         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
460         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
461         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
462         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
463         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
464         u32 tR = chip->chip_delay * 1000;
465         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
466         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
467
468         /* fallback to a default value if tR = 0 */
469         if (!tR)
470                 tR = 20000;
471
472         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
473                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
474                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
475                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
476                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
477                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
478
479         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
480                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
481                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
482
483         info->ndtr0cs0 = ndtr0;
484         info->ndtr1cs0 = ndtr1;
485         nand_writel(info, NDTR0CS0, ndtr0);
486         nand_writel(info, NDTR1CS0, ndtr1);
487 }
488
489 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
490 {
491         const struct nand_sdr_timings *timings;
492         struct nand_chip *chip = &host->chip;
493         struct pxa3xx_nand_info *info = host->info_data;
494         const struct pxa3xx_nand_flash *f = NULL;
495         struct mtd_info *mtd = nand_to_mtd(&host->chip);
496         int mode, id, ntypes, i;
497
498         mode = onfi_get_async_timing_mode(chip);
499         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
500                 ntypes = ARRAY_SIZE(builtin_flash_types);
501
502                 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
503
504                 id = chip->read_byte(mtd);
505                 id |= chip->read_byte(mtd) << 0x8;
506
507                 for (i = 0; i < ntypes; i++) {
508                         f = &builtin_flash_types[i];
509
510                         if (f->chip_id == id)
511                                 break;
512                 }
513
514                 if (i == ntypes) {
515                         dev_err(&info->pdev->dev, "Error: timings not found\n");
516                         return -EINVAL;
517                 }
518
519                 pxa3xx_nand_set_timing(host, f->timing);
520
521                 if (f->flash_width == 16) {
522                         info->reg_ndcr |= NDCR_DWIDTH_M;
523                         chip->options |= NAND_BUSWIDTH_16;
524                 }
525
526                 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
527         } else {
528                 mode = fls(mode) - 1;
529                 if (mode < 0)
530                         mode = 0;
531
532                 timings = onfi_async_timing_mode_to_sdr_timings(mode);
533                 if (IS_ERR(timings))
534                         return PTR_ERR(timings);
535
536                 pxa3xx_nand_set_sdr_timing(host, timings);
537         }
538
539         return 0;
540 }
541
542 /**
543  * NOTE: it is a must to set ND_RUN first, then write
544  * command buffer, otherwise, it does not work.
545  * We enable all the interrupt at the same time, and
546  * let pxa3xx_nand_irq to handle all logic.
547  */
548 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
549 {
550         uint32_t ndcr;
551
552         ndcr = info->reg_ndcr;
553
554         if (info->use_ecc) {
555                 ndcr |= NDCR_ECC_EN;
556                 if (info->ecc_bch)
557                         nand_writel(info, NDECCCTRL, 0x1);
558         } else {
559                 ndcr &= ~NDCR_ECC_EN;
560                 if (info->ecc_bch)
561                         nand_writel(info, NDECCCTRL, 0x0);
562         }
563
564         ndcr &= ~NDCR_DMA_EN;
565
566         if (info->use_spare)
567                 ndcr |= NDCR_SPARE_EN;
568         else
569                 ndcr &= ~NDCR_SPARE_EN;
570
571         ndcr |= NDCR_ND_RUN;
572
573         /* clear status bits and run */
574         nand_writel(info, NDSR, NDSR_MASK);
575         nand_writel(info, NDCR, 0);
576         nand_writel(info, NDCR, ndcr);
577 }
578
579 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
580 {
581         uint32_t ndcr;
582
583         ndcr = nand_readl(info, NDCR);
584         nand_writel(info, NDCR, ndcr | int_mask);
585 }
586
587 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
588 {
589         if (info->ecc_bch && !info->force_raw) {
590                 u32 ts;
591
592                 /*
593                  * According to the datasheet, when reading from NDDB
594                  * with BCH enabled, after each 32 bytes reads, we
595                  * have to make sure that the NDSR.RDDREQ bit is set.
596                  *
597                  * Drain the FIFO 8 32 bits reads at a time, and skip
598                  * the polling on the last read.
599                  */
600                 while (len > 8) {
601                         readsl(info->mmio_base + NDDB, data, 8);
602
603                         ts = get_timer(0);
604                         while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
605                                 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
606                                         dev_err(&info->pdev->dev,
607                                                 "Timeout on RDDREQ while draining the FIFO\n");
608                                         return;
609                                 }
610                         }
611
612                         data += 32;
613                         len -= 8;
614                 }
615         }
616
617         readsl(info->mmio_base + NDDB, data, len);
618 }
619
620 static void handle_data_pio(struct pxa3xx_nand_info *info)
621 {
622         int data_len = info->step_chunk_size;
623
624         /*
625          * In raw mode, include the spare area and the ECC bytes that are not
626          * consumed by the controller in the data section. Do not reorganize
627          * here, do it in the ->read_page_raw() handler instead.
628          */
629         if (info->force_raw)
630                 data_len += info->step_spare_size + info->ecc_size;
631
632         switch (info->state) {
633         case STATE_PIO_WRITING:
634                 if (info->step_chunk_size)
635                         writesl(info->mmio_base + NDDB,
636                                 info->data_buff + info->data_buff_pos,
637                                 DIV_ROUND_UP(data_len, 4));
638
639                 if (info->step_spare_size)
640                         writesl(info->mmio_base + NDDB,
641                                 info->oob_buff + info->oob_buff_pos,
642                                 DIV_ROUND_UP(info->step_spare_size, 4));
643                 break;
644         case STATE_PIO_READING:
645                 if (data_len)
646                         drain_fifo(info,
647                                    info->data_buff + info->data_buff_pos,
648                                    DIV_ROUND_UP(data_len, 4));
649
650                 if (info->force_raw)
651                         break;
652
653                 if (info->step_spare_size)
654                         drain_fifo(info,
655                                    info->oob_buff + info->oob_buff_pos,
656                                    DIV_ROUND_UP(info->step_spare_size, 4));
657                 break;
658         default:
659                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
660                                 info->state);
661                 BUG();
662         }
663
664         /* Update buffer pointers for multi-page read/write */
665         info->data_buff_pos += data_len;
666         info->oob_buff_pos += info->step_spare_size;
667 }
668
669 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
670 {
671         handle_data_pio(info);
672
673         info->state = STATE_CMD_DONE;
674         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
675 }
676
677 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
678 {
679         unsigned int status, is_completed = 0, is_ready = 0;
680         unsigned int ready, cmd_done;
681         irqreturn_t ret = IRQ_HANDLED;
682
683         if (info->cs == 0) {
684                 ready           = NDSR_FLASH_RDY;
685                 cmd_done        = NDSR_CS0_CMDD;
686         } else {
687                 ready           = NDSR_RDY;
688                 cmd_done        = NDSR_CS1_CMDD;
689         }
690
691         /* TODO - find out why we need the delay during write operation. */
692         ndelay(1);
693
694         status = nand_readl(info, NDSR);
695
696         if (status & NDSR_UNCORERR)
697                 info->retcode = ERR_UNCORERR;
698         if (status & NDSR_CORERR) {
699                 info->retcode = ERR_CORERR;
700                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
701                     info->ecc_bch)
702                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
703                 else
704                         info->ecc_err_cnt = 1;
705
706                 /*
707                  * Each chunk composing a page is corrected independently,
708                  * and we need to store maximum number of corrected bitflips
709                  * to return it to the MTD layer in ecc.read_page().
710                  */
711                 info->max_bitflips = max_t(unsigned int,
712                                            info->max_bitflips,
713                                            info->ecc_err_cnt);
714         }
715         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
716                 info->state = (status & NDSR_RDDREQ) ?
717                         STATE_PIO_READING : STATE_PIO_WRITING;
718                 /* Call the IRQ thread in U-Boot directly */
719                 pxa3xx_nand_irq_thread(info);
720                 return 0;
721         }
722         if (status & cmd_done) {
723                 info->state = STATE_CMD_DONE;
724                 is_completed = 1;
725         }
726         if (status & ready) {
727                 info->state = STATE_READY;
728                 is_ready = 1;
729         }
730
731         /*
732          * Clear all status bit before issuing the next command, which
733          * can and will alter the status bits and will deserve a new
734          * interrupt on its own. This lets the controller exit the IRQ
735          */
736         nand_writel(info, NDSR, status);
737
738         if (status & NDSR_WRCMDREQ) {
739                 status &= ~NDSR_WRCMDREQ;
740                 info->state = STATE_CMD_HANDLE;
741
742                 /*
743                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
744                  * must be loaded by writing directly either 12 or 16
745                  * bytes directly to NDCB0, four bytes at a time.
746                  *
747                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
748                  * but each NDCBx register can be read.
749                  */
750                 nand_writel(info, NDCB0, info->ndcb0);
751                 nand_writel(info, NDCB0, info->ndcb1);
752                 nand_writel(info, NDCB0, info->ndcb2);
753
754                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
755                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
756                         nand_writel(info, NDCB0, info->ndcb3);
757         }
758
759         if (is_completed)
760                 info->cmd_complete = 1;
761         if (is_ready)
762                 info->dev_ready = 1;
763
764         return ret;
765 }
766
767 static inline int is_buf_blank(uint8_t *buf, size_t len)
768 {
769         for (; len > 0; len--)
770                 if (*buf++ != 0xff)
771                         return 0;
772         return 1;
773 }
774
775 static void set_command_address(struct pxa3xx_nand_info *info,
776                 unsigned int page_size, uint16_t column, int page_addr)
777 {
778         /* small page addr setting */
779         if (page_size < info->chunk_size) {
780                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
781                                 | (column & 0xFF);
782
783                 info->ndcb2 = 0;
784         } else {
785                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
786                                 | (column & 0xFFFF);
787
788                 if (page_addr & 0xFF0000)
789                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
790                 else
791                         info->ndcb2 = 0;
792         }
793 }
794
795 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
796 {
797         struct pxa3xx_nand_host *host = info->host[info->cs];
798         struct mtd_info *mtd = nand_to_mtd(&host->chip);
799
800         /* reset data and oob column point to handle data */
801         info->buf_start         = 0;
802         info->buf_count         = 0;
803         info->data_buff_pos     = 0;
804         info->oob_buff_pos      = 0;
805         info->step_chunk_size   = 0;
806         info->step_spare_size   = 0;
807         info->cur_chunk         = 0;
808         info->use_ecc           = 0;
809         info->use_spare         = 1;
810         info->retcode           = ERR_NONE;
811         info->ecc_err_cnt       = 0;
812         info->ndcb3             = 0;
813         info->need_wait         = 0;
814
815         switch (command) {
816         case NAND_CMD_READ0:
817         case NAND_CMD_READOOB:
818         case NAND_CMD_PAGEPROG:
819                 if (!info->force_raw)
820                         info->use_ecc = 1;
821                 break;
822         case NAND_CMD_PARAM:
823                 info->use_spare = 0;
824                 break;
825         default:
826                 info->ndcb1 = 0;
827                 info->ndcb2 = 0;
828                 break;
829         }
830
831         /*
832          * If we are about to issue a read command, or about to set
833          * the write address, then clean the data buffer.
834          */
835         if (command == NAND_CMD_READ0 ||
836             command == NAND_CMD_READOOB ||
837             command == NAND_CMD_SEQIN) {
838                 info->buf_count = mtd->writesize + mtd->oobsize;
839                 memset(info->data_buff, 0xFF, info->buf_count);
840         }
841 }
842
843 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
844                 int ext_cmd_type, uint16_t column, int page_addr)
845 {
846         int addr_cycle, exec_cmd;
847         struct pxa3xx_nand_host *host;
848         struct mtd_info *mtd;
849
850         host = info->host[info->cs];
851         mtd = nand_to_mtd(&host->chip);
852         addr_cycle = 0;
853         exec_cmd = 1;
854
855         if (info->cs != 0)
856                 info->ndcb0 = NDCB0_CSEL;
857         else
858                 info->ndcb0 = 0;
859
860         if (command == NAND_CMD_SEQIN)
861                 exec_cmd = 0;
862
863         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
864                                     + host->col_addr_cycles);
865
866         switch (command) {
867         case NAND_CMD_READOOB:
868         case NAND_CMD_READ0:
869                 info->buf_start = column;
870                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
871                                 | addr_cycle
872                                 | NAND_CMD_READ0;
873
874                 if (command == NAND_CMD_READOOB)
875                         info->buf_start += mtd->writesize;
876
877                 if (info->cur_chunk < info->nfullchunks) {
878                         info->step_chunk_size = info->chunk_size;
879                         info->step_spare_size = info->spare_size;
880                 } else {
881                         info->step_chunk_size = info->last_chunk_size;
882                         info->step_spare_size = info->last_spare_size;
883                 }
884
885                 /*
886                  * Multiple page read needs an 'extended command type' field,
887                  * which is either naked-read or last-read according to the
888                  * state.
889                  */
890                 if (info->force_raw) {
891                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
892                                        NDCB0_LEN_OVRD |
893                                        NDCB0_EXT_CMD_TYPE(ext_cmd_type);
894                         info->ndcb3 = info->step_chunk_size +
895                                       info->step_spare_size + info->ecc_size;
896                 } else if (mtd->writesize == info->chunk_size) {
897                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
898                 } else if (mtd->writesize > info->chunk_size) {
899                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
900                                         | NDCB0_LEN_OVRD
901                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
902                         info->ndcb3 = info->step_chunk_size +
903                                 info->step_spare_size;
904                 }
905
906                 set_command_address(info, mtd->writesize, column, page_addr);
907                 break;
908
909         case NAND_CMD_SEQIN:
910
911                 info->buf_start = column;
912                 set_command_address(info, mtd->writesize, 0, page_addr);
913
914                 /*
915                  * Multiple page programming needs to execute the initial
916                  * SEQIN command that sets the page address.
917                  */
918                 if (mtd->writesize > info->chunk_size) {
919                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
920                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
921                                 | addr_cycle
922                                 | command;
923                         exec_cmd = 1;
924                 }
925                 break;
926
927         case NAND_CMD_PAGEPROG:
928                 if (is_buf_blank(info->data_buff,
929                                  (mtd->writesize + mtd->oobsize))) {
930                         exec_cmd = 0;
931                         break;
932                 }
933
934                 if (info->cur_chunk < info->nfullchunks) {
935                         info->step_chunk_size = info->chunk_size;
936                         info->step_spare_size = info->spare_size;
937                 } else {
938                         info->step_chunk_size = info->last_chunk_size;
939                         info->step_spare_size = info->last_spare_size;
940                 }
941
942                 /* Second command setting for large pages */
943                 if (mtd->writesize > info->chunk_size) {
944                         /*
945                          * Multiple page write uses the 'extended command'
946                          * field. This can be used to issue a command dispatch
947                          * or a naked-write depending on the current stage.
948                          */
949                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
950                                         | NDCB0_LEN_OVRD
951                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
952                         info->ndcb3 = info->step_chunk_size +
953                                       info->step_spare_size;
954
955                         /*
956                          * This is the command dispatch that completes a chunked
957                          * page program operation.
958                          */
959                         if (info->cur_chunk == info->ntotalchunks) {
960                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
961                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
962                                         | command;
963                                 info->ndcb1 = 0;
964                                 info->ndcb2 = 0;
965                                 info->ndcb3 = 0;
966                         }
967                 } else {
968                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
969                                         | NDCB0_AUTO_RS
970                                         | NDCB0_ST_ROW_EN
971                                         | NDCB0_DBC
972                                         | (NAND_CMD_PAGEPROG << 8)
973                                         | NAND_CMD_SEQIN
974                                         | addr_cycle;
975                 }
976                 break;
977
978         case NAND_CMD_PARAM:
979                 info->buf_count = INIT_BUFFER_SIZE;
980                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
981                                 | NDCB0_ADDR_CYC(1)
982                                 | NDCB0_LEN_OVRD
983                                 | command;
984                 info->ndcb1 = (column & 0xFF);
985                 info->ndcb3 = INIT_BUFFER_SIZE;
986                 info->step_chunk_size = INIT_BUFFER_SIZE;
987                 break;
988
989         case NAND_CMD_READID:
990                 info->buf_count = READ_ID_BYTES;
991                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
992                                 | NDCB0_ADDR_CYC(1)
993                                 | command;
994                 info->ndcb1 = (column & 0xFF);
995
996                 info->step_chunk_size = 8;
997                 break;
998         case NAND_CMD_STATUS:
999                 info->buf_count = 1;
1000                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1001                                 | NDCB0_ADDR_CYC(1)
1002                                 | command;
1003
1004                 info->step_chunk_size = 8;
1005                 break;
1006
1007         case NAND_CMD_ERASE1:
1008                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1009                                 | NDCB0_AUTO_RS
1010                                 | NDCB0_ADDR_CYC(3)
1011                                 | NDCB0_DBC
1012                                 | (NAND_CMD_ERASE2 << 8)
1013                                 | NAND_CMD_ERASE1;
1014                 info->ndcb1 = page_addr;
1015                 info->ndcb2 = 0;
1016
1017                 break;
1018         case NAND_CMD_RESET:
1019                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1020                                 | command;
1021
1022                 break;
1023
1024         case NAND_CMD_ERASE2:
1025                 exec_cmd = 0;
1026                 break;
1027
1028         default:
1029                 exec_cmd = 0;
1030                 dev_err(&info->pdev->dev, "non-supported command %x\n",
1031                         command);
1032                 break;
1033         }
1034
1035         return exec_cmd;
1036 }
1037
1038 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1039                          int column, int page_addr)
1040 {
1041         struct nand_chip *chip = mtd_to_nand(mtd);
1042         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1043         struct pxa3xx_nand_info *info = host->info_data;
1044         int exec_cmd;
1045
1046         /*
1047          * if this is a x16 device ,then convert the input
1048          * "byte" address into a "word" address appropriate
1049          * for indexing a word-oriented device
1050          */
1051         if (info->reg_ndcr & NDCR_DWIDTH_M)
1052                 column /= 2;
1053
1054         /*
1055          * There may be different NAND chip hooked to
1056          * different chip select, so check whether
1057          * chip select has been changed, if yes, reset the timing
1058          */
1059         if (info->cs != host->cs) {
1060                 info->cs = host->cs;
1061                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1062                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1063         }
1064
1065         prepare_start_command(info, command);
1066
1067         info->state = STATE_PREPARED;
1068         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1069
1070         if (exec_cmd) {
1071                 u32 ts;
1072
1073                 info->cmd_complete = 0;
1074                 info->dev_ready = 0;
1075                 info->need_wait = 1;
1076                 pxa3xx_nand_start(info);
1077
1078                 ts = get_timer(0);
1079                 while (1) {
1080                         u32 status;
1081
1082                         status = nand_readl(info, NDSR);
1083                         if (status)
1084                                 pxa3xx_nand_irq(info);
1085
1086                         if (info->cmd_complete)
1087                                 break;
1088
1089                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1090                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1091                                 return;
1092                         }
1093                 }
1094         }
1095         info->state = STATE_IDLE;
1096 }
1097
1098 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1099                                   const unsigned command,
1100                                   int column, int page_addr)
1101 {
1102         struct nand_chip *chip = mtd_to_nand(mtd);
1103         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1104         struct pxa3xx_nand_info *info = host->info_data;
1105         int exec_cmd, ext_cmd_type;
1106
1107         /*
1108          * if this is a x16 device then convert the input
1109          * "byte" address into a "word" address appropriate
1110          * for indexing a word-oriented device
1111          */
1112         if (info->reg_ndcr & NDCR_DWIDTH_M)
1113                 column /= 2;
1114
1115         /*
1116          * There may be different NAND chip hooked to
1117          * different chip select, so check whether
1118          * chip select has been changed, if yes, reset the timing
1119          */
1120         if (info->cs != host->cs) {
1121                 info->cs = host->cs;
1122                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1123                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1124         }
1125
1126         /* Select the extended command for the first command */
1127         switch (command) {
1128         case NAND_CMD_READ0:
1129         case NAND_CMD_READOOB:
1130                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1131                 break;
1132         case NAND_CMD_SEQIN:
1133                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1134                 break;
1135         case NAND_CMD_PAGEPROG:
1136                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1137                 break;
1138         default:
1139                 ext_cmd_type = 0;
1140                 break;
1141         }
1142
1143         prepare_start_command(info, command);
1144
1145         /*
1146          * Prepare the "is ready" completion before starting a command
1147          * transaction sequence. If the command is not executed the
1148          * completion will be completed, see below.
1149          *
1150          * We can do that inside the loop because the command variable
1151          * is invariant and thus so is the exec_cmd.
1152          */
1153         info->need_wait = 1;
1154         info->dev_ready = 0;
1155
1156         do {
1157                 u32 ts;
1158
1159                 info->state = STATE_PREPARED;
1160                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1161                                                column, page_addr);
1162                 if (!exec_cmd) {
1163                         info->need_wait = 0;
1164                         info->dev_ready = 1;
1165                         break;
1166                 }
1167
1168                 info->cmd_complete = 0;
1169                 pxa3xx_nand_start(info);
1170
1171                 ts = get_timer(0);
1172                 while (1) {
1173                         u32 status;
1174
1175                         status = nand_readl(info, NDSR);
1176                         if (status)
1177                                 pxa3xx_nand_irq(info);
1178
1179                         if (info->cmd_complete)
1180                                 break;
1181
1182                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1183                                 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1184                                 return;
1185                         }
1186                 }
1187
1188                 /* Only a few commands need several steps */
1189                 if (command != NAND_CMD_PAGEPROG &&
1190                     command != NAND_CMD_READ0    &&
1191                     command != NAND_CMD_READOOB)
1192                         break;
1193
1194                 info->cur_chunk++;
1195
1196                 /* Check if the sequence is complete */
1197                 if (info->cur_chunk == info->ntotalchunks &&
1198                     command != NAND_CMD_PAGEPROG)
1199                         break;
1200
1201                 /*
1202                  * After a splitted program command sequence has issued
1203                  * the command dispatch, the command sequence is complete.
1204                  */
1205                 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1206                     command == NAND_CMD_PAGEPROG &&
1207                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1208                         break;
1209
1210                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1211                         /* Last read: issue a 'last naked read' */
1212                         if (info->cur_chunk == info->ntotalchunks - 1)
1213                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1214                         else
1215                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1216
1217                 /*
1218                  * If a splitted program command has no more data to transfer,
1219                  * the command dispatch must be issued to complete.
1220                  */
1221                 } else if (command == NAND_CMD_PAGEPROG &&
1222                            info->cur_chunk == info->ntotalchunks) {
1223                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1224                 }
1225         } while (1);
1226
1227         info->state = STATE_IDLE;
1228 }
1229
1230 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1231                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1232                 int page)
1233 {
1234         chip->write_buf(mtd, buf, mtd->writesize);
1235         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1236
1237         return 0;
1238 }
1239
1240 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1241                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1242                 int page)
1243 {
1244         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1245         struct pxa3xx_nand_info *info = host->info_data;
1246         int bf;
1247
1248         chip->read_buf(mtd, buf, mtd->writesize);
1249         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1250
1251         if (info->retcode == ERR_CORERR && info->use_ecc) {
1252                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1253
1254         } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1255                 /*
1256                  * Empty pages will trigger uncorrectable errors. Re-read the
1257                  * entire page in raw mode and check for bits not being "1".
1258                  * If there are more than the supported strength, then it means
1259                  * this is an actual uncorrectable error.
1260                  */
1261                 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1262                 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1263                                                  chip->oob_poi, mtd->oobsize,
1264                                                  NULL, 0, chip->ecc.strength);
1265                 if (bf < 0) {
1266                         mtd->ecc_stats.failed++;
1267                 } else if (bf) {
1268                         mtd->ecc_stats.corrected += bf;
1269                         info->max_bitflips = max_t(unsigned int,
1270                                                    info->max_bitflips, bf);
1271                         info->retcode = ERR_CORERR;
1272                 } else {
1273                         info->retcode = ERR_NONE;
1274                 }
1275
1276         } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1277                 /* Raw read is not supported with Hamming ECC engine */
1278                 if (is_buf_blank(buf, mtd->writesize))
1279                         info->retcode = ERR_NONE;
1280                 else
1281                         mtd->ecc_stats.failed++;
1282         }
1283
1284         return info->max_bitflips;
1285 }
1286
1287 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1288                                      struct nand_chip *chip, uint8_t *buf,
1289                                      int oob_required, int page)
1290 {
1291         struct pxa3xx_nand_host *host = chip->priv;
1292         struct pxa3xx_nand_info *info = host->info_data;
1293         int chunk, ecc_off_buf;
1294
1295         if (!info->ecc_bch)
1296                 return -ENOTSUPP;
1297
1298         /*
1299          * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1300          * pxa3xx_nand_start(), which will actually disable the ECC engine.
1301          */
1302         info->force_raw = true;
1303         chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1304
1305         ecc_off_buf = (info->nfullchunks * info->spare_size) +
1306                       info->last_spare_size;
1307         for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1308                 chip->read_buf(mtd,
1309                                buf + (chunk * info->chunk_size),
1310                                info->chunk_size);
1311                 chip->read_buf(mtd,
1312                                chip->oob_poi +
1313                                (chunk * (info->spare_size)),
1314                                info->spare_size);
1315                 chip->read_buf(mtd,
1316                                chip->oob_poi + ecc_off_buf +
1317                                (chunk * (info->ecc_size)),
1318                                info->ecc_size - 2);
1319         }
1320
1321         if (info->ntotalchunks > info->nfullchunks) {
1322                 chip->read_buf(mtd,
1323                                buf + (info->nfullchunks * info->chunk_size),
1324                                info->last_chunk_size);
1325                 chip->read_buf(mtd,
1326                                chip->oob_poi +
1327                                (info->nfullchunks * (info->spare_size)),
1328                                info->last_spare_size);
1329                 chip->read_buf(mtd,
1330                                chip->oob_poi + ecc_off_buf +
1331                                (info->nfullchunks * (info->ecc_size)),
1332                                info->ecc_size - 2);
1333         }
1334
1335         info->force_raw = false;
1336
1337         return 0;
1338 }
1339
1340 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1341                                     struct nand_chip *chip, int page)
1342 {
1343         /* Invalidate page cache */
1344         chip->pagebuf = -1;
1345
1346         return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1347                                        page);
1348 }
1349
1350 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1351 {
1352         struct nand_chip *chip = mtd_to_nand(mtd);
1353         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1354         struct pxa3xx_nand_info *info = host->info_data;
1355         char retval = 0xFF;
1356
1357         if (info->buf_start < info->buf_count)
1358                 /* Has just send a new command? */
1359                 retval = info->data_buff[info->buf_start++];
1360
1361         return retval;
1362 }
1363
1364 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1365 {
1366         struct nand_chip *chip = mtd_to_nand(mtd);
1367         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1368         struct pxa3xx_nand_info *info = host->info_data;
1369         u16 retval = 0xFFFF;
1370
1371         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1372                 retval = *((u16 *)(info->data_buff+info->buf_start));
1373                 info->buf_start += 2;
1374         }
1375         return retval;
1376 }
1377
1378 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1379 {
1380         struct nand_chip *chip = mtd_to_nand(mtd);
1381         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1382         struct pxa3xx_nand_info *info = host->info_data;
1383         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1384
1385         memcpy(buf, info->data_buff + info->buf_start, real_len);
1386         info->buf_start += real_len;
1387 }
1388
1389 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1390                 const uint8_t *buf, int len)
1391 {
1392         struct nand_chip *chip = mtd_to_nand(mtd);
1393         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1394         struct pxa3xx_nand_info *info = host->info_data;
1395         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1396
1397         memcpy(info->data_buff + info->buf_start, buf, real_len);
1398         info->buf_start += real_len;
1399 }
1400
1401 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1402 {
1403         return;
1404 }
1405
1406 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1407 {
1408         struct nand_chip *chip = mtd_to_nand(mtd);
1409         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1410         struct pxa3xx_nand_info *info = host->info_data;
1411
1412         if (info->need_wait) {
1413                 u32 ts;
1414
1415                 info->need_wait = 0;
1416
1417                 ts = get_timer(0);
1418                 while (1) {
1419                         u32 status;
1420
1421                         status = nand_readl(info, NDSR);
1422                         if (status)
1423                                 pxa3xx_nand_irq(info);
1424
1425                         if (info->dev_ready)
1426                                 break;
1427
1428                         if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1429                                 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1430                                 return NAND_STATUS_FAIL;
1431                         }
1432                 }
1433         }
1434
1435         /* pxa3xx_nand_send_command has waited for command complete */
1436         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1437                 if (info->retcode == ERR_NONE)
1438                         return 0;
1439                 else
1440                         return NAND_STATUS_FAIL;
1441         }
1442
1443         return NAND_STATUS_READY;
1444 }
1445
1446 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1447 {
1448         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1449
1450         /* Configure default flash values */
1451         info->reg_ndcr = 0x0; /* enable all interrupts */
1452         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1453         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1454         info->reg_ndcr |= NDCR_SPARE_EN;
1455
1456         return 0;
1457 }
1458
1459 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1460 {
1461         struct pxa3xx_nand_host *host = info->host[info->cs];
1462         struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1463         struct nand_chip *chip = mtd_to_nand(mtd);
1464
1465         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1466         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1467         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1468 }
1469
1470 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1471 {
1472         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1473         uint32_t ndcr = nand_readl(info, NDCR);
1474
1475         /* Set an initial chunk size */
1476         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1477         info->reg_ndcr = ndcr &
1478                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1479         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1480         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1481         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1482 }
1483
1484 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1485 {
1486         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1487         if (info->data_buff == NULL)
1488                 return -ENOMEM;
1489         return 0;
1490 }
1491
1492 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1493 {
1494         struct pxa3xx_nand_info *info = host->info_data;
1495         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1496         struct mtd_info *mtd;
1497         struct nand_chip *chip;
1498         const struct nand_sdr_timings *timings;
1499         int ret;
1500
1501         mtd = nand_to_mtd(&info->host[info->cs]->chip);
1502         chip = mtd_to_nand(mtd);
1503
1504         /* configure default flash values */
1505         info->reg_ndcr = 0x0; /* enable all interrupts */
1506         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1507         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1508         info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1509
1510         /* use the common timing to make a try */
1511         timings = onfi_async_timing_mode_to_sdr_timings(0);
1512         if (IS_ERR(timings))
1513                 return PTR_ERR(timings);
1514
1515         pxa3xx_nand_set_sdr_timing(host, timings);
1516
1517         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1518         ret = chip->waitfunc(mtd, chip);
1519         if (ret & NAND_STATUS_FAIL)
1520                 return -ENODEV;
1521
1522         return 0;
1523 }
1524
1525 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1526                         struct nand_ecc_ctrl *ecc,
1527                         int strength, int ecc_stepsize, int page_size)
1528 {
1529         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1530                 info->nfullchunks = 1;
1531                 info->ntotalchunks = 1;
1532                 info->chunk_size = 2048;
1533                 info->spare_size = 40;
1534                 info->ecc_size = 24;
1535                 ecc->mode = NAND_ECC_HW;
1536                 ecc->size = 512;
1537                 ecc->strength = 1;
1538
1539         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1540                 info->nfullchunks = 1;
1541                 info->ntotalchunks = 1;
1542                 info->chunk_size = 512;
1543                 info->spare_size = 8;
1544                 info->ecc_size = 8;
1545                 ecc->mode = NAND_ECC_HW;
1546                 ecc->size = 512;
1547                 ecc->strength = 1;
1548
1549         /*
1550          * Required ECC: 4-bit correction per 512 bytes
1551          * Select: 16-bit correction per 2048 bytes
1552          */
1553         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1554                 info->ecc_bch = 1;
1555                 info->nfullchunks = 1;
1556                 info->ntotalchunks = 1;
1557                 info->chunk_size = 2048;
1558                 info->spare_size = 32;
1559                 info->ecc_size = 32;
1560                 ecc->mode = NAND_ECC_HW;
1561                 ecc->size = info->chunk_size;
1562                 ecc->layout = &ecc_layout_2KB_bch4bit;
1563                 ecc->strength = 16;
1564
1565         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1566                 info->ecc_bch = 1;
1567                 info->nfullchunks = 2;
1568                 info->ntotalchunks = 2;
1569                 info->chunk_size = 2048;
1570                 info->spare_size = 32;
1571                 info->ecc_size = 32;
1572                 ecc->mode = NAND_ECC_HW;
1573                 ecc->size = info->chunk_size;
1574                 ecc->layout = &ecc_layout_4KB_bch4bit;
1575                 ecc->strength = 16;
1576
1577         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1578                 info->ecc_bch = 1;
1579                 info->nfullchunks = 4;
1580                 info->ntotalchunks = 4;
1581                 info->chunk_size = 2048;
1582                 info->spare_size = 32;
1583                 info->ecc_size = 32;
1584                 ecc->mode = NAND_ECC_HW;
1585                 ecc->size = info->chunk_size;
1586                 ecc->layout = &ecc_layout_8KB_bch4bit;
1587                 ecc->strength = 16;
1588
1589         /*
1590          * Required ECC: 8-bit correction per 512 bytes
1591          * Select: 16-bit correction per 1024 bytes
1592          */
1593         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1594                 info->ecc_bch = 1;
1595                 info->nfullchunks = 1;
1596                 info->ntotalchunks = 2;
1597                 info->chunk_size = 1024;
1598                 info->spare_size = 0;
1599                 info->last_chunk_size = 1024;
1600                 info->last_spare_size = 32;
1601                 info->ecc_size = 32;
1602                 ecc->mode = NAND_ECC_HW;
1603                 ecc->size = info->chunk_size;
1604                 ecc->layout = &ecc_layout_2KB_bch8bit;
1605                 ecc->strength = 16;
1606
1607         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1608                 info->ecc_bch = 1;
1609                 info->nfullchunks = 4;
1610                 info->ntotalchunks = 5;
1611                 info->chunk_size = 1024;
1612                 info->spare_size = 0;
1613                 info->last_chunk_size = 0;
1614                 info->last_spare_size = 64;
1615                 info->ecc_size = 32;
1616                 ecc->mode = NAND_ECC_HW;
1617                 ecc->size = info->chunk_size;
1618                 ecc->layout = &ecc_layout_4KB_bch8bit;
1619                 ecc->strength = 16;
1620
1621         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1622                 info->ecc_bch = 1;
1623                 info->nfullchunks = 8;
1624                 info->ntotalchunks = 9;
1625                 info->chunk_size = 1024;
1626                 info->spare_size = 0;
1627                 info->last_chunk_size = 0;
1628                 info->last_spare_size = 160;
1629                 info->ecc_size = 32;
1630                 ecc->mode = NAND_ECC_HW;
1631                 ecc->size = info->chunk_size;
1632                 ecc->layout = &ecc_layout_8KB_bch8bit;
1633                 ecc->strength = 16;
1634
1635         } else {
1636                 dev_err(&info->pdev->dev,
1637                         "ECC strength %d at page size %d is not supported\n",
1638                         strength, page_size);
1639                 return -ENODEV;
1640         }
1641
1642         return 0;
1643 }
1644
1645 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1646 {
1647         struct nand_chip *chip = mtd_to_nand(mtd);
1648         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1649         struct pxa3xx_nand_info *info = host->info_data;
1650         struct pxa3xx_nand_platform_data *pdata = info->pdata;
1651         int ret;
1652         uint16_t ecc_strength, ecc_step;
1653
1654         if (pdata->keep_config) {
1655                 pxa3xx_nand_detect_config(info);
1656         } else {
1657                 ret = pxa3xx_nand_config_ident(info);
1658                 if (ret)
1659                         return ret;
1660                 ret = pxa3xx_nand_sensing(host);
1661                 if (ret) {
1662                         dev_info(&info->pdev->dev,
1663                                  "There is no chip on cs %d!\n",
1664                                  info->cs);
1665                         return ret;
1666                 }
1667         }
1668
1669         /* Device detection must be done with ECC disabled */
1670         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1671                 nand_writel(info, NDECCCTRL, 0x0);
1672
1673         if (nand_scan_ident(mtd, 1, NULL))
1674                 return -ENODEV;
1675
1676         if (!pdata->keep_config) {
1677                 ret = pxa3xx_nand_init_timings(host);
1678                 if (ret) {
1679                         dev_err(&info->pdev->dev,
1680                                 "Failed to set timings: %d\n", ret);
1681                         return ret;
1682                 }
1683         }
1684
1685 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1686         /*
1687          * We'll use a bad block table stored in-flash and don't
1688          * allow writing the bad block marker to the flash.
1689          */
1690         chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1691         chip->bbt_td = &bbt_main_descr;
1692         chip->bbt_md = &bbt_mirror_descr;
1693 #endif
1694
1695         if (pdata->ecc_strength && pdata->ecc_step_size) {
1696                 ecc_strength = pdata->ecc_strength;
1697                 ecc_step = pdata->ecc_step_size;
1698         } else {
1699                 ecc_strength = chip->ecc_strength_ds;
1700                 ecc_step = chip->ecc_step_ds;
1701         }
1702
1703         /* Set default ECC strength requirements on non-ONFI devices */
1704         if (ecc_strength < 1 && ecc_step < 1) {
1705                 ecc_strength = 1;
1706                 ecc_step = 512;
1707         }
1708
1709         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1710                            ecc_step, mtd->writesize);
1711         if (ret)
1712                 return ret;
1713
1714         /*
1715          * If the page size is bigger than the FIFO size, let's check
1716          * we are given the right variant and then switch to the extended
1717          * (aka split) command handling,
1718          */
1719         if (mtd->writesize > info->chunk_size) {
1720                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1721                         chip->cmdfunc = nand_cmdfunc_extended;
1722                 } else {
1723                         dev_err(&info->pdev->dev,
1724                                 "unsupported page size on this variant\n");
1725                         return -ENODEV;
1726                 }
1727         }
1728
1729         /* calculate addressing information */
1730         if (mtd->writesize >= 2048)
1731                 host->col_addr_cycles = 2;
1732         else
1733                 host->col_addr_cycles = 1;
1734
1735         /* release the initial buffer */
1736         kfree(info->data_buff);
1737
1738         /* allocate the real data + oob buffer */
1739         info->buf_size = mtd->writesize + mtd->oobsize;
1740         ret = pxa3xx_nand_init_buff(info);
1741         if (ret)
1742                 return ret;
1743         info->oob_buff = info->data_buff + mtd->writesize;
1744
1745         if ((mtd->size >> chip->page_shift) > 65536)
1746                 host->row_addr_cycles = 3;
1747         else
1748                 host->row_addr_cycles = 2;
1749
1750         if (!pdata->keep_config)
1751                 pxa3xx_nand_config_tail(info);
1752
1753         return nand_scan_tail(mtd);
1754 }
1755
1756 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1757 {
1758         struct pxa3xx_nand_platform_data *pdata;
1759         struct pxa3xx_nand_host *host;
1760         struct nand_chip *chip = NULL;
1761         struct mtd_info *mtd;
1762         int ret, cs;
1763
1764         pdata = info->pdata;
1765         if (pdata->num_cs <= 0)
1766                 return -ENODEV;
1767
1768         info->variant = pxa3xx_nand_get_variant();
1769         for (cs = 0; cs < pdata->num_cs; cs++) {
1770                 chip = (struct nand_chip *)
1771                         ((u8 *)&info[1] + sizeof(*host) * cs);
1772                 mtd = nand_to_mtd(chip);
1773                 host = (struct pxa3xx_nand_host *)chip;
1774                 info->host[cs] = host;
1775                 host->cs = cs;
1776                 host->info_data = info;
1777                 mtd->owner = THIS_MODULE;
1778
1779                 nand_set_controller_data(chip, host);
1780                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1781                 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1782                 chip->ecc.read_oob_raw  = pxa3xx_nand_read_oob_raw;
1783                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1784                 chip->controller        = &info->controller;
1785                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1786                 chip->select_chip       = pxa3xx_nand_select_chip;
1787                 chip->read_word         = pxa3xx_nand_read_word;
1788                 chip->read_byte         = pxa3xx_nand_read_byte;
1789                 chip->read_buf          = pxa3xx_nand_read_buf;
1790                 chip->write_buf         = pxa3xx_nand_write_buf;
1791                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1792                 chip->cmdfunc           = nand_cmdfunc;
1793         }
1794
1795         /* Allocate a buffer to allow flash detection */
1796         info->buf_size = INIT_BUFFER_SIZE;
1797         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1798         if (info->data_buff == NULL) {
1799                 ret = -ENOMEM;
1800                 goto fail_disable_clk;
1801         }
1802
1803         /* initialize all interrupts to be disabled */
1804         disable_int(info, NDSR_MASK);
1805
1806         return 0;
1807
1808         kfree(info->data_buff);
1809 fail_disable_clk:
1810         return ret;
1811 }
1812
1813 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1814 {
1815         struct pxa3xx_nand_platform_data *pdata;
1816         const void *blob = gd->fdt_blob;
1817         int node = -1;
1818
1819         pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1820         if (!pdata)
1821                 return -ENOMEM;
1822
1823         /* Get address decoding nodes from the FDT blob */
1824         do {
1825                 node = fdt_node_offset_by_compatible(blob, node,
1826                                                      "marvell,mvebu-pxa3xx-nand");
1827                 if (node < 0)
1828                         break;
1829
1830                 /* Bypass disabeld nodes */
1831                 if (!fdtdec_get_is_enabled(blob, node))
1832                         continue;
1833
1834                 /* Get the first enabled NAND controler base address */
1835                 info->mmio_base =
1836                         (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1837                                         blob, node, "reg", 0, NULL, true);
1838
1839                 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1840                 if (pdata->num_cs != 1) {
1841                         pr_err("pxa3xx driver supports single CS only\n");
1842                         break;
1843                 }
1844
1845                 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1846                         pdata->enable_arbiter = 1;
1847
1848                 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1849                         pdata->keep_config = 1;
1850
1851                 /*
1852                  * ECC parameters.
1853                  * If these are not set, they will be selected according
1854                  * to the detected flash type.
1855                  */
1856                 /* ECC strength */
1857                 pdata->ecc_strength = fdtdec_get_int(blob, node,
1858                                                      "nand-ecc-strength", 0);
1859
1860                 /* ECC step size */
1861                 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1862                                                       "nand-ecc-step-size", 0);
1863
1864                 info->pdata = pdata;
1865
1866                 /* Currently support only a single NAND controller */
1867                 return 0;
1868
1869         } while (node >= 0);
1870
1871         return -EINVAL;
1872 }
1873
1874 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1875 {
1876         struct pxa3xx_nand_platform_data *pdata;
1877         int ret, cs, probe_success;
1878
1879         ret = pxa3xx_nand_probe_dt(info);
1880         if (ret)
1881                 return ret;
1882
1883         pdata = info->pdata;
1884
1885         ret = alloc_nand_resource(info);
1886         if (ret) {
1887                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1888                 return ret;
1889         }
1890
1891         probe_success = 0;
1892         for (cs = 0; cs < pdata->num_cs; cs++) {
1893                 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1894
1895                 /*
1896                  * The mtd name matches the one used in 'mtdparts' kernel
1897                  * parameter. This name cannot be changed or otherwise
1898                  * user's mtd partitions configuration would get broken.
1899                  */
1900                 mtd->name = "pxa3xx_nand-0";
1901                 info->cs = cs;
1902                 ret = pxa3xx_nand_scan(mtd);
1903                 if (ret) {
1904                         dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1905                                  cs);
1906                         continue;
1907                 }
1908
1909                 if (nand_register(cs, mtd))
1910                         continue;
1911
1912                 probe_success = 1;
1913         }
1914
1915         if (!probe_success)
1916                 return -ENODEV;
1917
1918         return 0;
1919 }
1920
1921 /*
1922  * Main initialization routine
1923  */
1924 void board_nand_init(void)
1925 {
1926         struct pxa3xx_nand_info *info;
1927         struct pxa3xx_nand_host *host;
1928         int ret;
1929
1930         info = kzalloc(sizeof(*info) +
1931                        sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1932                        GFP_KERNEL);
1933         if (!info)
1934                 return;
1935
1936         ret = pxa3xx_nand_probe(info);
1937         if (ret)
1938                 return;
1939 }