Linux-libre 3.11-gnu
[librecmc/linux-libre.git] / drivers / mtd / nand / lpc32xx_mlc.c
1 /*
2  * Driver for NAND MLC Controller in LPC32xx
3  *
4  * Author: Roland Stigge <stigge@antcom.de>
5  *
6  * Copyright © 2011 WORK Microwave GmbH
7  * Copyright © 2011, 2012 Roland Stigge
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  *
20  * NAND Flash Controller Operation:
21  * - Read: Auto Decode
22  * - Write: Auto Encode
23  * - Tested Page Sizes: 2048, 4096
24  */
25
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/nand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/clk.h>
33 #include <linux/err.h>
34 #include <linux/delay.h>
35 #include <linux/completion.h>
36 #include <linux/interrupt.h>
37 #include <linux/of.h>
38 #include <linux/of_mtd.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mtd/lpc32xx_mlc.h>
41 #include <linux/io.h>
42 #include <linux/mm.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/dmaengine.h>
45 #include <linux/mtd/nand_ecc.h>
46
47 #define DRV_NAME "lpc32xx_mlc"
48
49 /**********************************************************************
50 * MLC NAND controller register offsets
51 **********************************************************************/
52
53 #define MLC_BUFF(x)                     (x + 0x00000)
54 #define MLC_DATA(x)                     (x + 0x08000)
55 #define MLC_CMD(x)                      (x + 0x10000)
56 #define MLC_ADDR(x)                     (x + 0x10004)
57 #define MLC_ECC_ENC_REG(x)              (x + 0x10008)
58 #define MLC_ECC_DEC_REG(x)              (x + 0x1000C)
59 #define MLC_ECC_AUTO_ENC_REG(x)         (x + 0x10010)
60 #define MLC_ECC_AUTO_DEC_REG(x)         (x + 0x10014)
61 #define MLC_RPR(x)                      (x + 0x10018)
62 #define MLC_WPR(x)                      (x + 0x1001C)
63 #define MLC_RUBP(x)                     (x + 0x10020)
64 #define MLC_ROBP(x)                     (x + 0x10024)
65 #define MLC_SW_WP_ADD_LOW(x)            (x + 0x10028)
66 #define MLC_SW_WP_ADD_HIG(x)            (x + 0x1002C)
67 #define MLC_ICR(x)                      (x + 0x10030)
68 #define MLC_TIME_REG(x)                 (x + 0x10034)
69 #define MLC_IRQ_MR(x)                   (x + 0x10038)
70 #define MLC_IRQ_SR(x)                   (x + 0x1003C)
71 #define MLC_LOCK_PR(x)                  (x + 0x10044)
72 #define MLC_ISR(x)                      (x + 0x10048)
73 #define MLC_CEH(x)                      (x + 0x1004C)
74
75 /**********************************************************************
76 * MLC_CMD bit definitions
77 **********************************************************************/
78 #define MLCCMD_RESET                    0xFF
79
80 /**********************************************************************
81 * MLC_ICR bit definitions
82 **********************************************************************/
83 #define MLCICR_WPROT                    (1 << 3)
84 #define MLCICR_LARGEBLOCK               (1 << 2)
85 #define MLCICR_LONGADDR                 (1 << 1)
86 #define MLCICR_16BIT                    (1 << 0)  /* unsupported by LPC32x0! */
87
88 /**********************************************************************
89 * MLC_TIME_REG bit definitions
90 **********************************************************************/
91 #define MLCTIMEREG_TCEA_DELAY(n)        (((n) & 0x03) << 24)
92 #define MLCTIMEREG_BUSY_DELAY(n)        (((n) & 0x1F) << 19)
93 #define MLCTIMEREG_NAND_TA(n)           (((n) & 0x07) << 16)
94 #define MLCTIMEREG_RD_HIGH(n)           (((n) & 0x0F) << 12)
95 #define MLCTIMEREG_RD_LOW(n)            (((n) & 0x0F) << 8)
96 #define MLCTIMEREG_WR_HIGH(n)           (((n) & 0x0F) << 4)
97 #define MLCTIMEREG_WR_LOW(n)            (((n) & 0x0F) << 0)
98
99 /**********************************************************************
100 * MLC_IRQ_MR and MLC_IRQ_SR bit definitions
101 **********************************************************************/
102 #define MLCIRQ_NAND_READY               (1 << 5)
103 #define MLCIRQ_CONTROLLER_READY         (1 << 4)
104 #define MLCIRQ_DECODE_FAILURE           (1 << 3)
105 #define MLCIRQ_DECODE_ERROR             (1 << 2)
106 #define MLCIRQ_ECC_READY                (1 << 1)
107 #define MLCIRQ_WRPROT_FAULT             (1 << 0)
108
109 /**********************************************************************
110 * MLC_LOCK_PR bit definitions
111 **********************************************************************/
112 #define MLCLOCKPR_MAGIC                 0xA25E
113
114 /**********************************************************************
115 * MLC_ISR bit definitions
116 **********************************************************************/
117 #define MLCISR_DECODER_FAILURE          (1 << 6)
118 #define MLCISR_ERRORS                   ((1 << 4) | (1 << 5))
119 #define MLCISR_ERRORS_DETECTED          (1 << 3)
120 #define MLCISR_ECC_READY                (1 << 2)
121 #define MLCISR_CONTROLLER_READY         (1 << 1)
122 #define MLCISR_NAND_READY               (1 << 0)
123
124 /**********************************************************************
125 * MLC_CEH bit definitions
126 **********************************************************************/
127 #define MLCCEH_NORMAL                   (1 << 0)
128
129 struct lpc32xx_nand_cfg_mlc {
130         uint32_t tcea_delay;
131         uint32_t busy_delay;
132         uint32_t nand_ta;
133         uint32_t rd_high;
134         uint32_t rd_low;
135         uint32_t wr_high;
136         uint32_t wr_low;
137         int wp_gpio;
138         struct mtd_partition *parts;
139         unsigned num_parts;
140 };
141
142 static struct nand_ecclayout lpc32xx_nand_oob = {
143         .eccbytes = 40,
144         .eccpos = { 6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
145                    22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
146                    38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
147                    54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
148         .oobfree = {
149                 { .offset = 0,
150                   .length = 6, },
151                 { .offset = 16,
152                   .length = 6, },
153                 { .offset = 32,
154                   .length = 6, },
155                 { .offset = 48,
156                   .length = 6, },
157                 },
158 };
159
160 static struct nand_bbt_descr lpc32xx_nand_bbt = {
161         .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
162                    NAND_BBT_WRITE,
163         .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
164 };
165
166 static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
167         .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
168                    NAND_BBT_WRITE,
169         .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
170 };
171
172 struct lpc32xx_nand_host {
173         struct nand_chip        nand_chip;
174         struct lpc32xx_mlc_platform_data *pdata;
175         struct clk              *clk;
176         struct mtd_info         mtd;
177         void __iomem            *io_base;
178         int                     irq;
179         struct lpc32xx_nand_cfg_mlc     *ncfg;
180         struct completion       comp_nand;
181         struct completion       comp_controller;
182         uint32_t llptr;
183         /*
184          * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
185          */
186         dma_addr_t              oob_buf_phy;
187         /*
188          * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
189          */
190         uint8_t                 *oob_buf;
191         /* Physical address of DMA base address */
192         dma_addr_t              io_base_phy;
193
194         struct completion       comp_dma;
195         struct dma_chan         *dma_chan;
196         struct dma_slave_config dma_slave_config;
197         struct scatterlist      sgl;
198         uint8_t                 *dma_buf;
199         uint8_t                 *dummy_buf;
200         int                     mlcsubpages; /* number of 512bytes-subpages */
201 };
202
203 /*
204  * Activate/Deactivate DMA Operation:
205  *
206  * Using the PL080 DMA Controller for transferring the 512 byte subpages
207  * instead of doing readl() / writel() in a loop slows it down significantly.
208  * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
209  *
210  * - readl() of 128 x 32 bits in a loop: ~20us
211  * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
212  * - DMA read of 512 bytes (32 bit, no bursts): ~100us
213  *
214  * This applies to the transfer itself. In the DMA case: only the
215  * wait_for_completion() (DMA setup _not_ included).
216  *
217  * Note that the 512 bytes subpage transfer is done directly from/to a
218  * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
219  * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
220  * controller transferring data between its internal buffer to/from the NAND
221  * chip.)
222  *
223  * Therefore, using the PL080 DMA is disabled by default, for now.
224  *
225  */
226 static int use_dma;
227
228 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
229 {
230         uint32_t clkrate, tmp;
231
232         /* Reset MLC controller */
233         writel(MLCCMD_RESET, MLC_CMD(host->io_base));
234         udelay(1000);
235
236         /* Get base clock for MLC block */
237         clkrate = clk_get_rate(host->clk);
238         if (clkrate == 0)
239                 clkrate = 104000000;
240
241         /* Unlock MLC_ICR
242          * (among others, will be locked again automatically) */
243         writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
244
245         /* Configure MLC Controller: Large Block, 5 Byte Address */
246         tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
247         writel(tmp, MLC_ICR(host->io_base));
248
249         /* Unlock MLC_TIME_REG
250          * (among others, will be locked again automatically) */
251         writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
252
253         /* Compute clock setup values, see LPC and NAND manual */
254         tmp = 0;
255         tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
256         tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
257         tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
258         tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
259         tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
260         tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
261         tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
262         writel(tmp, MLC_TIME_REG(host->io_base));
263
264         /* Enable IRQ for CONTROLLER_READY and NAND_READY */
265         writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
266                         MLC_IRQ_MR(host->io_base));
267
268         /* Normal nCE operation: nCE controlled by controller */
269         writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
270 }
271
272 /*
273  * Hardware specific access to control lines
274  */
275 static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
276                                   unsigned int ctrl)
277 {
278         struct nand_chip *nand_chip = mtd->priv;
279         struct lpc32xx_nand_host *host = nand_chip->priv;
280
281         if (cmd != NAND_CMD_NONE) {
282                 if (ctrl & NAND_CLE)
283                         writel(cmd, MLC_CMD(host->io_base));
284                 else
285                         writel(cmd, MLC_ADDR(host->io_base));
286         }
287 }
288
289 /*
290  * Read Device Ready (NAND device _and_ controller ready)
291  */
292 static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
293 {
294         struct nand_chip *nand_chip = mtd->priv;
295         struct lpc32xx_nand_host *host = nand_chip->priv;
296
297         if ((readb(MLC_ISR(host->io_base)) &
298              (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
299             (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
300                 return  1;
301
302         return 0;
303 }
304
305 static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
306 {
307         uint8_t sr;
308
309         /* Clear interrupt flag by reading status */
310         sr = readb(MLC_IRQ_SR(host->io_base));
311         if (sr & MLCIRQ_NAND_READY)
312                 complete(&host->comp_nand);
313         if (sr & MLCIRQ_CONTROLLER_READY)
314                 complete(&host->comp_controller);
315
316         return IRQ_HANDLED;
317 }
318
319 static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
320 {
321         struct lpc32xx_nand_host *host = chip->priv;
322
323         if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
324                 goto exit;
325
326         wait_for_completion(&host->comp_nand);
327
328         while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
329                 /* Seems to be delayed sometimes by controller */
330                 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
331                 cpu_relax();
332         }
333
334 exit:
335         return NAND_STATUS_READY;
336 }
337
338 static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
339                                        struct nand_chip *chip)
340 {
341         struct lpc32xx_nand_host *host = chip->priv;
342
343         if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
344                 goto exit;
345
346         wait_for_completion(&host->comp_controller);
347
348         while (!(readb(MLC_ISR(host->io_base)) &
349                  MLCISR_CONTROLLER_READY)) {
350                 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
351                 cpu_relax();
352         }
353
354 exit:
355         return NAND_STATUS_READY;
356 }
357
358 static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
359 {
360         lpc32xx_waitfunc_nand(mtd, chip);
361         lpc32xx_waitfunc_controller(mtd, chip);
362
363         return NAND_STATUS_READY;
364 }
365
366 /*
367  * Enable NAND write protect
368  */
369 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
370 {
371         if (gpio_is_valid(host->ncfg->wp_gpio))
372                 gpio_set_value(host->ncfg->wp_gpio, 0);
373 }
374
375 /*
376  * Disable NAND write protect
377  */
378 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
379 {
380         if (gpio_is_valid(host->ncfg->wp_gpio))
381                 gpio_set_value(host->ncfg->wp_gpio, 1);
382 }
383
384 static void lpc32xx_dma_complete_func(void *completion)
385 {
386         complete(completion);
387 }
388
389 static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
390                             enum dma_transfer_direction dir)
391 {
392         struct nand_chip *chip = mtd->priv;
393         struct lpc32xx_nand_host *host = chip->priv;
394         struct dma_async_tx_descriptor *desc;
395         int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
396         int res;
397
398         sg_init_one(&host->sgl, mem, len);
399
400         res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
401                          DMA_BIDIRECTIONAL);
402         if (res != 1) {
403                 dev_err(mtd->dev.parent, "Failed to map sg list\n");
404                 return -ENXIO;
405         }
406         desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
407                                        flags);
408         if (!desc) {
409                 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
410                 goto out1;
411         }
412
413         init_completion(&host->comp_dma);
414         desc->callback = lpc32xx_dma_complete_func;
415         desc->callback_param = &host->comp_dma;
416
417         dmaengine_submit(desc);
418         dma_async_issue_pending(host->dma_chan);
419
420         wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
421
422         dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
423                      DMA_BIDIRECTIONAL);
424         return 0;
425 out1:
426         dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
427                      DMA_BIDIRECTIONAL);
428         return -ENXIO;
429 }
430
431 static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
432                              uint8_t *buf, int oob_required, int page)
433 {
434         struct lpc32xx_nand_host *host = chip->priv;
435         int i, j;
436         uint8_t *oobbuf = chip->oob_poi;
437         uint32_t mlc_isr;
438         int res;
439         uint8_t *dma_buf;
440         bool dma_mapped;
441
442         if ((void *)buf <= high_memory) {
443                 dma_buf = buf;
444                 dma_mapped = true;
445         } else {
446                 dma_buf = host->dma_buf;
447                 dma_mapped = false;
448         }
449
450         /* Writing Command and Address */
451         chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
452
453         /* For all sub-pages */
454         for (i = 0; i < host->mlcsubpages; i++) {
455                 /* Start Auto Decode Command */
456                 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
457
458                 /* Wait for Controller Ready */
459                 lpc32xx_waitfunc_controller(mtd, chip);
460
461                 /* Check ECC Error status */
462                 mlc_isr = readl(MLC_ISR(host->io_base));
463                 if (mlc_isr & MLCISR_DECODER_FAILURE) {
464                         mtd->ecc_stats.failed++;
465                         dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
466                 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
467                         mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
468                 }
469
470                 /* Read 512 + 16 Bytes */
471                 if (use_dma) {
472                         res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
473                                                DMA_DEV_TO_MEM);
474                         if (res)
475                                 return res;
476                 } else {
477                         for (j = 0; j < (512 >> 2); j++) {
478                                 *((uint32_t *)(buf)) =
479                                         readl(MLC_BUFF(host->io_base));
480                                 buf += 4;
481                         }
482                 }
483                 for (j = 0; j < (16 >> 2); j++) {
484                         *((uint32_t *)(oobbuf)) =
485                                 readl(MLC_BUFF(host->io_base));
486                         oobbuf += 4;
487                 }
488         }
489
490         if (use_dma && !dma_mapped)
491                 memcpy(buf, dma_buf, mtd->writesize);
492
493         return 0;
494 }
495
496 static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
497                                        struct nand_chip *chip,
498                                        const uint8_t *buf, int oob_required)
499 {
500         struct lpc32xx_nand_host *host = chip->priv;
501         const uint8_t *oobbuf = chip->oob_poi;
502         uint8_t *dma_buf = (uint8_t *)buf;
503         int res;
504         int i, j;
505
506         if (use_dma && (void *)buf >= high_memory) {
507                 dma_buf = host->dma_buf;
508                 memcpy(dma_buf, buf, mtd->writesize);
509         }
510
511         for (i = 0; i < host->mlcsubpages; i++) {
512                 /* Start Encode */
513                 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
514
515                 /* Write 512 + 6 Bytes to Buffer */
516                 if (use_dma) {
517                         res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
518                                                DMA_MEM_TO_DEV);
519                         if (res)
520                                 return res;
521                 } else {
522                         for (j = 0; j < (512 >> 2); j++) {
523                                 writel(*((uint32_t *)(buf)),
524                                        MLC_BUFF(host->io_base));
525                                 buf += 4;
526                         }
527                 }
528                 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
529                 oobbuf += 4;
530                 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
531                 oobbuf += 12;
532
533                 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
534                 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
535
536                 /* Wait for Controller Ready */
537                 lpc32xx_waitfunc_controller(mtd, chip);
538         }
539         return 0;
540 }
541
542 static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip,
543                         uint32_t offset, int data_len, const uint8_t *buf,
544                         int oob_required, int page, int cached, int raw)
545 {
546         int res;
547
548         chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
549         res = lpc32xx_write_page_lowlevel(mtd, chip, buf, oob_required);
550         chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
551         lpc32xx_waitfunc(mtd, chip);
552
553         return res;
554 }
555
556 static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
557                             int page)
558 {
559         struct lpc32xx_nand_host *host = chip->priv;
560
561         /* Read whole page - necessary with MLC controller! */
562         lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
563
564         return 0;
565 }
566
567 static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
568                               int page)
569 {
570         /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
571         return 0;
572 }
573
574 /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
575 static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
576 {
577         /* Always enabled! */
578 }
579
580 static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
581 {
582         struct mtd_info *mtd = &host->mtd;
583         dma_cap_mask_t mask;
584
585         if (!host->pdata || !host->pdata->dma_filter) {
586                 dev_err(mtd->dev.parent, "no DMA platform data\n");
587                 return -ENOENT;
588         }
589
590         dma_cap_zero(mask);
591         dma_cap_set(DMA_SLAVE, mask);
592         host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
593                                              "nand-mlc");
594         if (!host->dma_chan) {
595                 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
596                 return -EBUSY;
597         }
598
599         /*
600          * Set direction to a sensible value even if the dmaengine driver
601          * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
602          * driver criticizes it as "alien transfer direction".
603          */
604         host->dma_slave_config.direction = DMA_DEV_TO_MEM;
605         host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
606         host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
607         host->dma_slave_config.src_maxburst = 128;
608         host->dma_slave_config.dst_maxburst = 128;
609         /* DMA controller does flow control: */
610         host->dma_slave_config.device_fc = false;
611         host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
612         host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
613         if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
614                 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
615                 goto out1;
616         }
617
618         return 0;
619 out1:
620         dma_release_channel(host->dma_chan);
621         return -ENXIO;
622 }
623
624 static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
625 {
626         struct lpc32xx_nand_cfg_mlc *ncfg;
627         struct device_node *np = dev->of_node;
628
629         ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
630         if (!ncfg) {
631                 dev_err(dev, "could not allocate memory for platform data\n");
632                 return NULL;
633         }
634
635         of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
636         of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
637         of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
638         of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
639         of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
640         of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
641         of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
642
643         if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
644             !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
645             !ncfg->wr_low) {
646                 dev_err(dev, "chip parameters not specified correctly\n");
647                 return NULL;
648         }
649
650         ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
651
652         return ncfg;
653 }
654
655 /*
656  * Probe for NAND controller
657  */
658 static int lpc32xx_nand_probe(struct platform_device *pdev)
659 {
660         struct lpc32xx_nand_host *host;
661         struct mtd_info *mtd;
662         struct nand_chip *nand_chip;
663         struct resource *rc;
664         int res;
665         struct mtd_part_parser_data ppdata = {};
666
667         /* Allocate memory for the device structure (and zero it) */
668         host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
669         if (!host) {
670                 dev_err(&pdev->dev, "failed to allocate device structure.\n");
671                 return -ENOMEM;
672         }
673
674         rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
675         host->io_base = devm_ioremap_resource(&pdev->dev, rc);
676         if (IS_ERR(host->io_base))
677                 return PTR_ERR(host->io_base);
678         
679         host->io_base_phy = rc->start;
680
681         mtd = &host->mtd;
682         nand_chip = &host->nand_chip;
683         if (pdev->dev.of_node)
684                 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
685         if (!host->ncfg) {
686                 dev_err(&pdev->dev,
687                         "Missing or bad NAND config from device tree\n");
688                 return -ENOENT;
689         }
690         if (host->ncfg->wp_gpio == -EPROBE_DEFER)
691                 return -EPROBE_DEFER;
692         if (gpio_is_valid(host->ncfg->wp_gpio) &&
693                         gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
694                 dev_err(&pdev->dev, "GPIO not available\n");
695                 return -EBUSY;
696         }
697         lpc32xx_wp_disable(host);
698
699         host->pdata = pdev->dev.platform_data;
700
701         nand_chip->priv = host;         /* link the private data structures */
702         mtd->priv = nand_chip;
703         mtd->owner = THIS_MODULE;
704         mtd->dev.parent = &pdev->dev;
705
706         /* Get NAND clock */
707         host->clk = clk_get(&pdev->dev, NULL);
708         if (IS_ERR(host->clk)) {
709                 dev_err(&pdev->dev, "Clock initialization failure\n");
710                 res = -ENOENT;
711                 goto err_exit1;
712         }
713         clk_enable(host->clk);
714
715         nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
716         nand_chip->dev_ready = lpc32xx_nand_device_ready;
717         nand_chip->chip_delay = 25; /* us */
718         nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
719         nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
720
721         /* Init NAND controller */
722         lpc32xx_nand_setup(host);
723
724         platform_set_drvdata(pdev, host);
725
726         /* Initialize function pointers */
727         nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
728         nand_chip->ecc.read_page_raw = lpc32xx_read_page;
729         nand_chip->ecc.read_page = lpc32xx_read_page;
730         nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
731         nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
732         nand_chip->ecc.write_oob = lpc32xx_write_oob;
733         nand_chip->ecc.read_oob = lpc32xx_read_oob;
734         nand_chip->ecc.strength = 4;
735         nand_chip->write_page = lpc32xx_write_page;
736         nand_chip->waitfunc = lpc32xx_waitfunc;
737
738         nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
739         nand_chip->bbt_td = &lpc32xx_nand_bbt;
740         nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
741
742         /* bitflip_threshold's default is defined as ecc_strength anyway.
743          * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
744          * being 0, it causes bad block table scanning errors in
745          * nand_scan_tail(), so preparing it here. */
746         mtd->bitflip_threshold = nand_chip->ecc.strength;
747
748         if (use_dma) {
749                 res = lpc32xx_dma_setup(host);
750                 if (res) {
751                         res = -EIO;
752                         goto err_exit2;
753                 }
754         }
755
756         /*
757          * Scan to find existance of the device and
758          * Get the type of NAND device SMALL block or LARGE block
759          */
760         if (nand_scan_ident(mtd, 1, NULL)) {
761                 res = -ENXIO;
762                 goto err_exit3;
763         }
764
765         host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
766         if (!host->dma_buf) {
767                 dev_err(&pdev->dev, "Error allocating dma_buf memory\n");
768                 res = -ENOMEM;
769                 goto err_exit3;
770         }
771
772         host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
773         if (!host->dummy_buf) {
774                 dev_err(&pdev->dev, "Error allocating dummy_buf memory\n");
775                 res = -ENOMEM;
776                 goto err_exit3;
777         }
778
779         nand_chip->ecc.mode = NAND_ECC_HW;
780         nand_chip->ecc.size = mtd->writesize;
781         nand_chip->ecc.layout = &lpc32xx_nand_oob;
782         host->mlcsubpages = mtd->writesize / 512;
783
784         /* initially clear interrupt status */
785         readb(MLC_IRQ_SR(host->io_base));
786
787         init_completion(&host->comp_nand);
788         init_completion(&host->comp_controller);
789
790         host->irq = platform_get_irq(pdev, 0);
791         if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
792                 dev_err(&pdev->dev, "failed to get platform irq\n");
793                 res = -EINVAL;
794                 goto err_exit3;
795         }
796
797         if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
798                         IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
799                 dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
800                 res = -ENXIO;
801                 goto err_exit3;
802         }
803
804         /*
805          * Fills out all the uninitialized function pointers with the defaults
806          * And scans for a bad block table if appropriate.
807          */
808         if (nand_scan_tail(mtd)) {
809                 res = -ENXIO;
810                 goto err_exit4;
811         }
812
813         mtd->name = DRV_NAME;
814
815         ppdata.of_node = pdev->dev.of_node;
816         res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
817                                         host->ncfg->num_parts);
818         if (!res)
819                 return res;
820
821         nand_release(mtd);
822
823 err_exit4:
824         free_irq(host->irq, host);
825 err_exit3:
826         if (use_dma)
827                 dma_release_channel(host->dma_chan);
828 err_exit2:
829         clk_disable(host->clk);
830         clk_put(host->clk);
831         platform_set_drvdata(pdev, NULL);
832 err_exit1:
833         lpc32xx_wp_enable(host);
834         gpio_free(host->ncfg->wp_gpio);
835
836         return res;
837 }
838
839 /*
840  * Remove NAND device
841  */
842 static int lpc32xx_nand_remove(struct platform_device *pdev)
843 {
844         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
845         struct mtd_info *mtd = &host->mtd;
846
847         nand_release(mtd);
848         free_irq(host->irq, host);
849         if (use_dma)
850                 dma_release_channel(host->dma_chan);
851
852         clk_disable(host->clk);
853         clk_put(host->clk);
854         platform_set_drvdata(pdev, NULL);
855
856         lpc32xx_wp_enable(host);
857         gpio_free(host->ncfg->wp_gpio);
858
859         return 0;
860 }
861
862 #ifdef CONFIG_PM
863 static int lpc32xx_nand_resume(struct platform_device *pdev)
864 {
865         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
866
867         /* Re-enable NAND clock */
868         clk_enable(host->clk);
869
870         /* Fresh init of NAND controller */
871         lpc32xx_nand_setup(host);
872
873         /* Disable write protect */
874         lpc32xx_wp_disable(host);
875
876         return 0;
877 }
878
879 static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
880 {
881         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
882
883         /* Enable write protect for safety */
884         lpc32xx_wp_enable(host);
885
886         /* Disable clock */
887         clk_disable(host->clk);
888         return 0;
889 }
890
891 #else
892 #define lpc32xx_nand_resume NULL
893 #define lpc32xx_nand_suspend NULL
894 #endif
895
896 static const struct of_device_id lpc32xx_nand_match[] = {
897         { .compatible = "nxp,lpc3220-mlc" },
898         { /* sentinel */ },
899 };
900 MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
901
902 static struct platform_driver lpc32xx_nand_driver = {
903         .probe          = lpc32xx_nand_probe,
904         .remove         = lpc32xx_nand_remove,
905         .resume         = lpc32xx_nand_resume,
906         .suspend        = lpc32xx_nand_suspend,
907         .driver         = {
908                 .name   = DRV_NAME,
909                 .owner  = THIS_MODULE,
910                 .of_match_table = of_match_ptr(lpc32xx_nand_match),
911         },
912 };
913
914 module_platform_driver(lpc32xx_nand_driver);
915
916 MODULE_LICENSE("GPL");
917 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
918 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");