e5312eb08c931ff61c28f246b2057f8249cab094
[librecmc/librecmc.git] / target / linux / mediatek / patches-4.4 / 0075-mtd-mediatek-driver-for-MTK-Smart-Device-Gen1-NAND.patch
1 From de18239fc971cfc17c53320c66ae64dd5ade032d Mon Sep 17 00:00:00 2001
2 From: Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
3 Date: Fri, 29 Apr 2016 12:17:22 -0400
4 Subject: [PATCH 075/102] mtd: mediatek: driver for MTK Smart Device Gen1 NAND
5
6 This patch adds support for mediatek's SDG1 NFC nand controller
7 embedded in SoC 2701
8
9 Signed-off-by: Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
10 ---
11  drivers/mtd/nand/Kconfig    |    7 +
12  drivers/mtd/nand/Makefile   |    1 +
13  drivers/mtd/nand/mtk_ecc.c  |  527 ++++++++++++++++
14  drivers/mtd/nand/mtk_ecc.h  |   53 ++
15  drivers/mtd/nand/mtk_nand.c | 1432 +++++++++++++++++++++++++++++++++++++++++++
16  5 files changed, 2020 insertions(+)
17  create mode 100644 drivers/mtd/nand/mtk_ecc.c
18  create mode 100644 drivers/mtd/nand/mtk_ecc.h
19  create mode 100644 drivers/mtd/nand/mtk_nand.c
20
21 --- a/drivers/mtd/nand/Kconfig
22 +++ b/drivers/mtd/nand/Kconfig
23 @@ -563,4 +563,11 @@ config MTD_NAND_QCOM
24           Enables support for NAND flash chips on SoCs containing the EBI2 NAND
25           controller. This controller is found on IPQ806x SoC.
26  
27 +config MTD_NAND_MTK
28 +       tristate "Support for NAND controller on MTK SoCs"
29 +       depends on HAS_DMA
30 +       help
31 +         Enables support for NAND controller on MTK SoCs.
32 +         This controller is found on mt27xx, mt81xx, mt65xx SoCs.
33 +
34  endif # MTD_NAND
35 --- a/drivers/mtd/nand/Makefile
36 +++ b/drivers/mtd/nand/Makefile
37 @@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI)          += sunxi_n
38  obj-$(CONFIG_MTD_NAND_HISI504)         += hisi504_nand.o
39  obj-$(CONFIG_MTD_NAND_BRCMNAND)                += brcmnand/
40  obj-$(CONFIG_MTD_NAND_QCOM)            += qcom_nandc.o
41 +obj-$(CONFIG_MTD_NAND_MTK)             += mtk_nand.o mtk_ecc.o
42  
43  nand-objs := nand_base.o nand_bbt.o nand_timings.o
44 --- /dev/null
45 +++ b/drivers/mtd/nand/mtk_ecc.c
46 @@ -0,0 +1,527 @@
47 +/*
48 + * MTK ECC controller driver.
49 + * Copyright (C) 2016  MediaTek Inc.
50 + * Authors:    Xiaolei Li              <xiaolei.li@mediatek.com>
51 + *             Jorge Ramirez-Ortiz     <jorge.ramirez-ortiz@linaro.org>
52 + *
53 + * This program is free software; you can redistribute it and/or modify
54 + * it under the terms of the GNU General Public License version 2 as
55 + * published by the Free Software Foundation.
56 + *
57 + * This program is distributed in the hope that it will be useful,
58 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
59 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
60 + * GNU General Public License for more details.
61 + */
62 +
63 +#include <linux/platform_device.h>
64 +#include <linux/dma-mapping.h>
65 +#include <linux/interrupt.h>
66 +#include <linux/clk.h>
67 +#include <linux/module.h>
68 +#include <linux/iopoll.h>
69 +#include <linux/of.h>
70 +#include <linux/of_platform.h>
71 +#include <linux/semaphore.h>
72 +
73 +#include "mtk_ecc.h"
74 +
75 +#define ECC_ENCCON             (0x00)
76 +#define                ENC_EN                  (1)
77 +#define                ENC_DE                  (0)
78 +#define ECC_ENCCNFG            (0x04)
79 +#define                ECC_CNFG_4BIT           (0)
80 +#define                ECC_CNFG_6BIT           (1)
81 +#define                ECC_CNFG_8BIT           (2)
82 +#define                ECC_CNFG_10BIT          (3)
83 +#define                ECC_CNFG_12BIT          (4)
84 +#define                ECC_CNFG_14BIT          (5)
85 +#define                ECC_CNFG_16BIT          (6)
86 +#define                ECC_CNFG_18BIT          (7)
87 +#define                ECC_CNFG_20BIT          (8)
88 +#define                ECC_CNFG_22BIT          (9)
89 +#define                ECC_CNFG_24BIT          (0xa)
90 +#define                ECC_CNFG_28BIT          (0xb)
91 +#define                ECC_CNFG_32BIT          (0xc)
92 +#define                ECC_CNFG_36BIT          (0xd)
93 +#define                ECC_CNFG_40BIT          (0xe)
94 +#define                ECC_CNFG_44BIT          (0xf)
95 +#define                ECC_CNFG_48BIT          (0x10)
96 +#define                ECC_CNFG_52BIT          (0x11)
97 +#define                ECC_CNFG_56BIT          (0x12)
98 +#define                ECC_CNFG_60BIT          (0x13)
99 +#define                ECC_MODE_SHIFT          (5)
100 +#define                ECC_MS_SHIFT            (16)
101 +#define ECC_ENCDIADDR          (0x08)
102 +#define ECC_ENCIDLE            (0x0C)
103 +#define                ENC_IDLE                BIT(0)
104 +#define ECC_ENCPAR(x)          (0x10 + (x) * sizeof(u32))
105 +#define ECC_ENCIRQ_EN          (0x80)
106 +#define                ENC_IRQEN               BIT(0)
107 +#define ECC_ENCIRQ_STA         (0x84)
108 +#define ECC_DECCON             (0x100)
109 +#define                DEC_EN                  (1)
110 +#define                DEC_DE                  (0)
111 +#define ECC_DECCNFG            (0x104)
112 +#define                DEC_EMPTY_EN            BIT(31)
113 +#define                DEC_CNFG_CORRECT        (0x3 << 12)
114 +#define ECC_DECIDLE            (0x10C)
115 +#define                DEC_IDLE                BIT(0)
116 +#define ECC_DECENUM0           (0x114)
117 +#define                ERR_MASK                (0x3f)
118 +#define ECC_DECDONE            (0x124)
119 +#define ECC_DECIRQ_EN          (0x200)
120 +#define                DEC_IRQEN               BIT(0)
121 +#define ECC_DECIRQ_STA         (0x204)
122 +
123 +#define ECC_TIMEOUT            (500000)
124 +
125 +#define ECC_IDLE_REG(x)                ((x) == ECC_ENC ? ECC_ENCIDLE : ECC_DECIDLE)
126 +#define ECC_IDLE_MASK(x)       ((x) == ECC_ENC ? ENC_IDLE : DEC_IDLE)
127 +#define ECC_IRQ_REG(x)         ((x) == ECC_ENC ? ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
128 +#define ECC_IRQ_EN(x)          ((x) == ECC_ENC ? ENC_IRQEN : DEC_IRQEN)
129 +#define ECC_CTL_REG(x)         ((x) == ECC_ENC ? ECC_ENCCON : ECC_DECCON)
130 +#define ECC_CODEC_ENABLE(x)    ((x) == ECC_ENC ? ENC_EN : DEC_EN)
131 +#define ECC_CODEC_DISABLE(x)   ((x) == ECC_ENC ? ENC_DE : DEC_DE)
132 +
133 +struct mtk_ecc {
134 +       struct device *dev;
135 +       void __iomem *regs;
136 +       struct clk *clk;
137 +
138 +       struct completion done;
139 +       struct semaphore sem;
140 +       u32 sec_mask;
141 +};
142 +
143 +static inline void mtk_ecc_codec_wait_idle(struct mtk_ecc *ecc,
144 +                                       enum mtk_ecc_codec codec)
145 +{
146 +       struct device *dev = ecc->dev;
147 +       u32 val;
148 +       int ret;
149 +
150 +       ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(codec), val,
151 +                                       val & ECC_IDLE_MASK(codec),
152 +                                       10, ECC_TIMEOUT);
153 +       if (ret)
154 +               dev_warn(dev, "%s NOT idle\n",
155 +                       codec == ECC_ENC ? "encoder" : "decoder");
156 +}
157 +
158 +static irqreturn_t mtk_ecc_irq(int irq, void *id)
159 +{
160 +       struct mtk_ecc *ecc = id;
161 +       enum mtk_ecc_codec codec;
162 +       u32 dec, enc;
163 +
164 +       dec = readw(ecc->regs + ECC_DECIRQ_STA) & DEC_IRQEN;
165 +       if (dec) {
166 +               codec = ECC_DEC;
167 +               dec = readw(ecc->regs + ECC_DECDONE);
168 +               if (dec & ecc->sec_mask) {
169 +                       ecc->sec_mask = 0;
170 +                       complete(&ecc->done);
171 +               } else
172 +                       return IRQ_HANDLED;
173 +       } else {
174 +               enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ENC_IRQEN;
175 +               if (enc) {
176 +                       codec = ECC_ENC;
177 +                       complete(&ecc->done);
178 +               } else
179 +                       return IRQ_NONE;
180 +       }
181 +
182 +       writel(0, ecc->regs + ECC_IRQ_REG(codec));
183 +
184 +       return IRQ_HANDLED;
185 +}
186 +
187 +static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
188 +{
189 +       u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
190 +       u32 reg;
191 +
192 +       switch (config->strength) {
193 +       case 4:
194 +               ecc_bit = ECC_CNFG_4BIT;
195 +               break;
196 +       case 6:
197 +               ecc_bit = ECC_CNFG_6BIT;
198 +               break;
199 +       case 8:
200 +               ecc_bit = ECC_CNFG_8BIT;
201 +               break;
202 +       case 10:
203 +               ecc_bit = ECC_CNFG_10BIT;
204 +               break;
205 +       case 12:
206 +               ecc_bit = ECC_CNFG_12BIT;
207 +               break;
208 +       case 14:
209 +               ecc_bit = ECC_CNFG_14BIT;
210 +               break;
211 +       case 16:
212 +               ecc_bit = ECC_CNFG_16BIT;
213 +               break;
214 +       case 18:
215 +               ecc_bit = ECC_CNFG_18BIT;
216 +               break;
217 +       case 20:
218 +               ecc_bit = ECC_CNFG_20BIT;
219 +               break;
220 +       case 22:
221 +               ecc_bit = ECC_CNFG_22BIT;
222 +               break;
223 +       case 24:
224 +               ecc_bit = ECC_CNFG_24BIT;
225 +               break;
226 +       case 28:
227 +               ecc_bit = ECC_CNFG_28BIT;
228 +               break;
229 +       case 32:
230 +               ecc_bit = ECC_CNFG_32BIT;
231 +               break;
232 +       case 36:
233 +               ecc_bit = ECC_CNFG_36BIT;
234 +               break;
235 +       case 40:
236 +               ecc_bit = ECC_CNFG_40BIT;
237 +               break;
238 +       case 44:
239 +               ecc_bit = ECC_CNFG_44BIT;
240 +               break;
241 +       case 48:
242 +               ecc_bit = ECC_CNFG_48BIT;
243 +               break;
244 +       case 52:
245 +               ecc_bit = ECC_CNFG_52BIT;
246 +               break;
247 +       case 56:
248 +               ecc_bit = ECC_CNFG_56BIT;
249 +               break;
250 +       case 60:
251 +               ecc_bit = ECC_CNFG_60BIT;
252 +               break;
253 +       default:
254 +               dev_err(ecc->dev, "invalid strength %d\n", config->strength);
255 +       }
256 +
257 +       if (config->codec == ECC_ENC) {
258 +               /* configure ECC encoder (in bits) */
259 +               enc_sz = config->enc_len << 3;
260 +
261 +               reg = ecc_bit | (config->ecc_mode << ECC_MODE_SHIFT);
262 +               reg |= (enc_sz << ECC_MS_SHIFT);
263 +               writel(reg, ecc->regs + ECC_ENCCNFG);
264 +
265 +               if (config->ecc_mode != ECC_NFI_MODE)
266 +                       writel(lower_32_bits(config->addr),
267 +                               ecc->regs + ECC_ENCDIADDR);
268 +
269 +       } else {
270 +               /* configure ECC decoder (in bits) */
271 +               dec_sz = config->dec_len;
272 +
273 +               reg = ecc_bit | (config->ecc_mode << ECC_MODE_SHIFT);
274 +               reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
275 +               reg |= DEC_EMPTY_EN;
276 +               writel(reg, ecc->regs + ECC_DECCNFG);
277 +
278 +               if (config->sec_mask)
279 +                       ecc->sec_mask = 1 << (config->sec_mask - 1);
280 +       }
281 +}
282 +
283 +void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
284 +                       int sectors)
285 +{
286 +       u32 offset, i, err;
287 +       u32 bitflips = 0;
288 +
289 +       stats->corrected = 0;
290 +       stats->failed = 0;
291 +
292 +       for (i = 0; i < sectors; i++) {
293 +               offset = (i >> 2) << 2;
294 +               err = readl(ecc->regs + ECC_DECENUM0 + offset);
295 +               err = err >> ((i % 4) * 8);
296 +               err &= ERR_MASK;
297 +               if (err == ERR_MASK) {
298 +                       /* uncorrectable errors */
299 +                       stats->failed++;
300 +                       continue;
301 +               }
302 +
303 +               stats->corrected += err;
304 +               bitflips = max_t(u32, bitflips, err);
305 +       }
306 +
307 +       stats->bitflips = bitflips;
308 +}
309 +EXPORT_SYMBOL(mtk_ecc_get_stats);
310 +
311 +void mtk_ecc_release(struct mtk_ecc *ecc)
312 +{
313 +       clk_disable_unprepare(ecc->clk);
314 +       put_device(ecc->dev);
315 +}
316 +EXPORT_SYMBOL(mtk_ecc_release);
317 +
318 +static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
319 +{
320 +       struct platform_device *pdev;
321 +       struct mtk_ecc *ecc;
322 +
323 +       pdev = of_find_device_by_node(np);
324 +       if (!pdev || !platform_get_drvdata(pdev))
325 +               return ERR_PTR(-EPROBE_DEFER);
326 +
327 +       get_device(&pdev->dev);
328 +       ecc = platform_get_drvdata(pdev);
329 +       clk_prepare_enable(ecc->clk);
330 +       mtk_ecc_hw_init(ecc);
331 +
332 +       return ecc;
333 +}
334 +
335 +struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
336 +{
337 +       struct mtk_ecc *ecc = NULL;
338 +       struct device_node *np;
339 +
340 +       np = of_parse_phandle(of_node, "ecc-engine", 0);
341 +       if (np) {
342 +               ecc = mtk_ecc_get(np);
343 +               of_node_put(np);
344 +       }
345 +
346 +       return ecc;
347 +}
348 +EXPORT_SYMBOL(of_mtk_ecc_get);
349 +
350 +int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
351 +{
352 +       enum mtk_ecc_codec codec = config->codec;
353 +       int ret;
354 +
355 +       ret = down_interruptible(&ecc->sem);
356 +       if (ret) {
357 +               dev_err(ecc->dev, "interrupted when attempting to lock\n");
358 +               return ret;
359 +       }
360 +
361 +       mtk_ecc_codec_wait_idle(ecc, codec);
362 +       mtk_ecc_config(ecc, config);
363 +       writew(ECC_CODEC_ENABLE(codec), ecc->regs + ECC_CTL_REG(codec));
364 +
365 +       init_completion(&ecc->done);
366 +       writew(ECC_IRQ_EN(codec), ecc->regs + ECC_IRQ_REG(codec));
367 +
368 +       return 0;
369 +}
370 +EXPORT_SYMBOL(mtk_ecc_enable);
371 +
372 +void mtk_ecc_disable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
373 +{
374 +       enum mtk_ecc_codec codec = config->codec;
375 +
376 +       mtk_ecc_codec_wait_idle(ecc, codec);
377 +       writew(0, ecc->regs + ECC_IRQ_REG(codec));
378 +       writew(ECC_CODEC_DISABLE(codec), ecc->regs + ECC_CTL_REG(codec));
379 +       up(&ecc->sem);
380 +}
381 +EXPORT_SYMBOL(mtk_ecc_disable);
382 +
383 +int mtk_ecc_wait_irq_done(struct mtk_ecc *ecc, enum mtk_ecc_codec codec)
384 +{
385 +       int ret;
386 +
387 +       ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
388 +       if (!ret) {
389 +               dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
390 +                               (codec == ECC_ENC) ? "encoder" : "decoder");
391 +               return -ETIMEDOUT;
392 +       }
393 +
394 +       return 0;
395 +}
396 +EXPORT_SYMBOL(mtk_ecc_wait_irq_done);
397 +
398 +int mtk_ecc_encode_non_nfi_mode(struct mtk_ecc *ecc,
399 +                       struct mtk_ecc_config *config, u8 *data, u32 bytes)
400 +{
401 +       dma_addr_t addr;
402 +       u32 *p, len, i;
403 +       int ret = 0;
404 +
405 +       addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
406 +       ret = dma_mapping_error(ecc->dev, addr);
407 +       if (ret) {
408 +               dev_err(ecc->dev, "dma mapping error\n");
409 +               return -EINVAL;
410 +       }
411 +
412 +       config->codec = ECC_ENC;
413 +       config->addr = addr;
414 +       ret = mtk_ecc_enable(ecc, config);
415 +       if (ret) {
416 +               dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
417 +               return ret;
418 +       }
419 +
420 +       ret = mtk_ecc_wait_irq_done(ecc, ECC_ENC);
421 +       if (ret)
422 +               goto timeout;
423 +
424 +       mtk_ecc_codec_wait_idle(ecc, ECC_ENC);
425 +
426 +       /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
427 +       len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
428 +       p = (u32 *) (data + bytes);
429 +
430 +       /* write the parity bytes generated by the ECC back to the OOB region */
431 +       for (i = 0; i < len; i++)
432 +               p[i] = readl(ecc->regs + ECC_ENCPAR(i));
433 +timeout:
434 +
435 +       dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
436 +       mtk_ecc_disable(ecc, config);
437 +
438 +       return ret;
439 +}
440 +EXPORT_SYMBOL(mtk_ecc_encode_non_nfi_mode);
441 +
442 +void mtk_ecc_hw_init(struct mtk_ecc *ecc)
443 +{
444 +       mtk_ecc_codec_wait_idle(ecc, ECC_ENC);
445 +       writew(ENC_DE, ecc->regs + ECC_ENCCON);
446 +
447 +       mtk_ecc_codec_wait_idle(ecc, ECC_DEC);
448 +       writel(DEC_DE, ecc->regs + ECC_DECCON);
449 +}
450 +
451 +void mtk_ecc_update_strength(u32 *p)
452 +{
453 +       u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
454 +                       40, 44, 48, 52, 56, 60};
455 +       int i;
456 +
457 +       for (i = 0; i < ARRAY_SIZE(ecc); i++) {
458 +               if (*p <= ecc[i]) {
459 +                       if (!i)
460 +                               *p = ecc[i];
461 +                       else if (*p != ecc[i])
462 +                               *p = ecc[i - 1];
463 +                       return;
464 +               }
465 +       }
466 +
467 +       *p = ecc[ARRAY_SIZE(ecc) - 1];
468 +}
469 +EXPORT_SYMBOL(mtk_ecc_update_strength);
470 +
471 +static int mtk_ecc_probe(struct platform_device *pdev)
472 +{
473 +       struct device *dev = &pdev->dev;
474 +       struct mtk_ecc *ecc;
475 +       struct resource *res;
476 +       int irq, ret;
477 +
478 +       ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
479 +       if (!ecc)
480 +               return -ENOMEM;
481 +
482 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
483 +       ecc->regs = devm_ioremap_resource(dev, res);
484 +       if (IS_ERR(ecc->regs)) {
485 +               dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
486 +               return PTR_ERR(ecc->regs);
487 +       }
488 +
489 +       ecc->clk = devm_clk_get(dev, NULL);
490 +       if (IS_ERR(ecc->clk)) {
491 +               dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
492 +               return PTR_ERR(ecc->clk);
493 +       }
494 +
495 +       irq = platform_get_irq(pdev, 0);
496 +       if (irq < 0) {
497 +               dev_err(dev, "failed to get irq\n");
498 +               return -EINVAL;
499 +       }
500 +
501 +       ret = dma_set_mask(dev, DMA_BIT_MASK(32));
502 +       if (ret) {
503 +               dev_err(dev, "failed to set DMA mask\n");
504 +               return ret;
505 +       }
506 +
507 +       ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
508 +       if (ret) {
509 +               dev_err(dev, "failed to request irq\n");
510 +               return -EINVAL;
511 +       }
512 +
513 +       ecc->dev = dev;
514 +       sema_init(&ecc->sem, 1);
515 +       platform_set_drvdata(pdev, ecc);
516 +       dev_info(dev, "probed\n");
517 +
518 +       return 0;
519 +}
520 +
521 +#ifdef CONFIG_PM_SLEEP
522 +static int mtk_ecc_suspend(struct device *dev)
523 +{
524 +       struct mtk_ecc *ecc = dev_get_drvdata(dev);
525 +
526 +       clk_disable_unprepare(ecc->clk);
527 +
528 +       return 0;
529 +}
530 +
531 +static int mtk_ecc_resume(struct device *dev)
532 +{
533 +       struct mtk_ecc *ecc = dev_get_drvdata(dev);
534 +       int ret;
535 +
536 +       ret = clk_prepare_enable(ecc->clk);
537 +       if (ret) {
538 +               dev_err(dev, "failed to enable clk\n");
539 +               return ret;
540 +       }
541 +
542 +       mtk_ecc_hw_init(ecc);
543 +
544 +       return 0;
545 +}
546 +
547 +static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
548 +#endif
549 +
550 +static const struct of_device_id mtk_ecc_dt_match[] = {
551 +       { .compatible = "mediatek,mt2701-ecc" },
552 +       {},
553 +};
554 +
555 +MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
556 +
557 +static struct platform_driver mtk_ecc_driver = {
558 +       .probe  = mtk_ecc_probe,
559 +       .driver = {
560 +               .name  = "mtk-ecc",
561 +               .of_match_table = of_match_ptr(mtk_ecc_dt_match),
562 +#ifdef CONFIG_PM_SLEEP
563 +               .pm = &mtk_ecc_pm_ops,
564 +#endif
565 +       },
566 +};
567 +
568 +module_platform_driver(mtk_ecc_driver);
569 +
570 +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
571 +MODULE_AUTHOR("Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>");
572 +MODULE_DESCRIPTION("MTK Nand ECC Driver");
573 +MODULE_LICENSE("GPL");
574 --- /dev/null
575 +++ b/drivers/mtd/nand/mtk_ecc.h
576 @@ -0,0 +1,53 @@
577 +/*
578 + * MTK SDG1 ECC controller
579 + *
580 + * Copyright (c) 2016 Mediatek
581 + * Authors:    Xiaolei Li              <xiaolei.li@mediatek.com>
582 + *             Jorge Ramirez-Ortiz     <jorge.ramirez-ortiz@linaro.org>
583 + * This program is free software; you can redistribute it and/or modify it
584 + * under the terms of the GNU General Public License version 2 as published
585 + * by the Free Software Foundation.
586 + */
587 +
588 +#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
589 +#define __DRIVERS_MTD_NAND_MTK_ECC_H__
590 +
591 +#include <linux/types.h>
592 +
593 +#define ECC_PARITY_BITS                (14)
594 +
595 +enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
596 +enum mtk_ecc_codec {ECC_ENC, ECC_DEC};
597 +
598 +struct device_node;
599 +struct mtk_ecc;
600 +
601 +struct mtk_ecc_stats {
602 +       u32 corrected;
603 +       u32 bitflips;
604 +       u32 failed;
605 +};
606 +
607 +struct mtk_ecc_config {
608 +       enum mtk_ecc_mode ecc_mode;
609 +       enum mtk_ecc_codec codec;
610 +       dma_addr_t addr;
611 +       u32 sec_mask;
612 +       u32 strength;
613 +       u32 enc_len;
614 +       u32 dec_len;
615 +};
616 +
617 +int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
618 +void mtk_ecc_disable(struct mtk_ecc *, struct mtk_ecc_config *);
619 +int mtk_ecc_encode_non_nfi_mode(struct mtk_ecc *, struct mtk_ecc_config *,
620 +                               u8 *, u32);
621 +void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
622 +int mtk_ecc_wait_irq_done(struct mtk_ecc *, enum mtk_ecc_codec);
623 +void mtk_ecc_hw_init(struct mtk_ecc *);
624 +void mtk_ecc_update_strength(u32 *);
625 +
626 +struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
627 +void mtk_ecc_release(struct mtk_ecc *);
628 +
629 +#endif
630 --- /dev/null
631 +++ b/drivers/mtd/nand/mtk_nand.c
632 @@ -0,0 +1,1432 @@
633 +/*
634 + * MTK NAND Flash controller driver.
635 + * Copyright (C) 2016 MediaTek Inc.
636 + * Authors:    Xiaolei Li              <xiaolei.li@mediatek.com>
637 + *             Jorge Ramirez-Ortiz     <jorge.ramirez-ortiz@linaro.org>
638 + *
639 + * This program is free software; you can redistribute it and/or modify
640 + * it under the terms of the GNU General Public License version 2 as
641 + * published by the Free Software Foundation.
642 + *
643 + * This program is distributed in the hope that it will be useful,
644 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
645 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
646 + * GNU General Public License for more details.
647 + */
648 +
649 +#include <linux/platform_device.h>
650 +#include <linux/dma-mapping.h>
651 +#include <linux/interrupt.h>
652 +#include <linux/delay.h>
653 +#include <linux/clk.h>
654 +#include <linux/mtd/nand.h>
655 +#include <linux/mtd/mtd.h>
656 +#include <linux/module.h>
657 +#include <linux/iopoll.h>
658 +#include <linux/of.h>
659 +#include "mtk_ecc.h"
660 +
661 +/* NAND controller register definition */
662 +#define NFI_CNFG               (0x00)
663 +#define                CNFG_AHB                BIT(0)
664 +#define                CNFG_READ_EN            BIT(1)
665 +#define                CNFG_DMA_BURST_EN       BIT(2)
666 +#define                CNFG_BYTE_RW            BIT(6)
667 +#define                CNFG_HW_ECC_EN          BIT(8)
668 +#define                CNFG_AUTO_FMT_EN        BIT(9)
669 +#define                CNFG_OP_CUST            (6 << 12)
670 +#define NFI_PAGEFMT            (0x04)
671 +#define                PAGEFMT_FDM_ECC_SHIFT   (12)
672 +#define                PAGEFMT_FDM_SHIFT       (8)
673 +#define                PAGEFMT_SPARE_16        (0)
674 +#define                PAGEFMT_SPARE_26        (1)
675 +#define                PAGEFMT_SPARE_27        (2)
676 +#define                PAGEFMT_SPARE_28        (3)
677 +#define                PAGEFMT_SPARE_32        (4)
678 +#define                PAGEFMT_SPARE_36        (5)
679 +#define                PAGEFMT_SPARE_40        (6)
680 +#define                PAGEFMT_SPARE_44        (7)
681 +#define                PAGEFMT_SPARE_48        (8)
682 +#define                PAGEFMT_SPARE_49        (9)
683 +#define                PAGEFMT_SPARE_50        (0xa)
684 +#define                PAGEFMT_SPARE_51        (0xb)
685 +#define                PAGEFMT_SPARE_52        (0xc)
686 +#define                PAGEFMT_SPARE_62        (0xd)
687 +#define                PAGEFMT_SPARE_63        (0xe)
688 +#define                PAGEFMT_SPARE_64        (0xf)
689 +#define                PAGEFMT_SPARE_SHIFT     (4)
690 +#define                PAGEFMT_SEC_SEL_512     BIT(2)
691 +#define                PAGEFMT_512_2K          (0)
692 +#define                PAGEFMT_2K_4K           (1)
693 +#define                PAGEFMT_4K_8K           (2)
694 +#define                PAGEFMT_8K_16K          (3)
695 +/* NFI control */
696 +#define NFI_CON                        (0x08)
697 +#define                CON_FIFO_FLUSH          BIT(0)
698 +#define                CON_NFI_RST             BIT(1)
699 +#define                CON_BRD                 BIT(8)  /* burst  read */
700 +#define                CON_BWR                 BIT(9)  /* burst  write */
701 +#define                CON_SEC_SHIFT           (12)
702 +/* Timming control register */
703 +#define NFI_ACCCON             (0x0C)
704 +#define NFI_INTR_EN            (0x10)
705 +#define                INTR_AHB_DONE_EN        BIT(6)
706 +#define NFI_INTR_STA           (0x14)
707 +#define NFI_CMD                        (0x20)
708 +#define NFI_ADDRNOB            (0x30)
709 +#define NFI_COLADDR            (0x34)
710 +#define NFI_ROWADDR            (0x38)
711 +#define NFI_STRDATA            (0x40)
712 +#define                STAR_EN                 (1)
713 +#define                STAR_DE                 (0)
714 +#define NFI_CNRNB              (0x44)
715 +#define NFI_DATAW              (0x50)
716 +#define NFI_DATAR              (0x54)
717 +#define NFI_PIO_DIRDY          (0x58)
718 +#define                PIO_DI_RDY              (0x01)
719 +#define NFI_STA                        (0x60)
720 +#define                STA_CMD                 BIT(0)
721 +#define                STA_ADDR                BIT(1)
722 +#define                STA_BUSY                BIT(8)
723 +#define                STA_EMP_PAGE            BIT(12)
724 +#define                NFI_FSM_CUSTDATA        (0xe << 16)
725 +#define                NFI_FSM_MASK            (0xf << 16)
726 +#define NFI_ADDRCNTR           (0x70)
727 +#define                CNTR_MASK               GENMASK(16, 12)
728 +#define NFI_STRADDR            (0x80)
729 +#define NFI_BYTELEN            (0x84)
730 +#define NFI_CSEL               (0x90)
731 +#define NFI_FDML(x)            (0xA0 + (x) * sizeof(u32) * 2)
732 +#define NFI_FDMM(x)            (0xA4 + (x) * sizeof(u32) * 2)
733 +#define NFI_FDM_MAX_SIZE       (8)
734 +#define NFI_MASTER_STA         (0x224)
735 +#define                MASTER_STA_MASK         (0x0FFF)
736 +#define NFI_EMPTY_THRESH       (0x23C)
737 +
738 +#define MTK_NAME               "mtk-nand"
739 +#define KB(x)                  ((x) * 1024UL)
740 +#define MB(x)                  (KB(x) * 1024UL)
741 +
742 +#define MTK_TIMEOUT            (500000)
743 +#define MTK_RESET_TIMEOUT      (1000000)
744 +#define MTK_MAX_SECTOR         (16)
745 +#define MTK_NAND_MAX_NSELS     (2)
746 +
747 +typedef void (*bad_mark_swap)(struct mtd_info *, uint8_t *buf, int raw);
748 +struct mtk_nfc_bad_mark_ctl {
749 +       bad_mark_swap bm_swap;
750 +       u32 sec;
751 +       u32 pos;
752 +};
753 +
754 +/*
755 + * FDM: region used to store free OOB data
756 + */
757 +struct mtk_nfc_fdm {
758 +       u32 reg_size;
759 +       u32 ecc_size;
760 +};
761 +
762 +struct mtk_nfc_nand_chip {
763 +       struct list_head node;
764 +       struct nand_chip nand;
765 +
766 +       struct mtk_nfc_bad_mark_ctl bad_mark;
767 +       struct mtk_nfc_fdm fdm;
768 +       u32 spare_per_sector;
769 +
770 +       int nsels;
771 +       u8 sels[0];
772 +       /* nothing after this field */
773 +};
774 +
775 +struct mtk_nfc_clk {
776 +       struct clk *nfi_clk;
777 +       struct clk *pad_clk;
778 +};
779 +
780 +struct mtk_nfc {
781 +       struct nand_hw_control controller;
782 +       struct mtk_ecc_config ecc_cfg;
783 +       struct mtk_nfc_clk clk;
784 +       struct mtk_ecc *ecc;
785 +
786 +       struct device *dev;
787 +       void __iomem *regs;
788 +
789 +       struct completion done;
790 +       struct list_head chips;
791 +
792 +       u8 *buffer;
793 +};
794 +
795 +static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
796 +{
797 +       return container_of(nand, struct mtk_nfc_nand_chip, nand);
798 +}
799 +
800 +static inline uint8_t *data_ptr(struct nand_chip *chip, const uint8_t *p, int i)
801 +{
802 +       return (uint8_t *) p + i * chip->ecc.size;
803 +}
804 +
805 +static inline uint8_t *oob_ptr(struct nand_chip *chip, int i)
806 +{
807 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
808 +       uint8_t *poi;
809 +
810 +       if (i < mtk_nand->bad_mark.sec)
811 +               poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
812 +       else if (i == mtk_nand->bad_mark.sec)
813 +               poi = chip->oob_poi;
814 +       else
815 +               poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
816 +
817 +       return poi;
818 +}
819 +
820 +static inline int mtk_data_len(struct nand_chip *chip)
821 +{
822 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
823 +
824 +       return chip->ecc.size + mtk_nand->spare_per_sector;
825 +}
826 +
827 +static inline uint8_t *mtk_data_ptr(struct nand_chip *chip,  int i)
828 +{
829 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
830 +
831 +       return nfc->buffer + i * mtk_data_len(chip);
832 +}
833 +
834 +static inline uint8_t *mtk_oob_ptr(struct nand_chip *chip, int i)
835 +{
836 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
837 +
838 +       return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
839 +}
840 +
841 +static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
842 +{
843 +       writel(val, nfc->regs + reg);
844 +}
845 +
846 +static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
847 +{
848 +       writew(val, nfc->regs + reg);
849 +}
850 +
851 +static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
852 +{
853 +       writeb(val, nfc->regs + reg);
854 +}
855 +
856 +static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
857 +{
858 +       return readl_relaxed(nfc->regs + reg);
859 +}
860 +
861 +static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
862 +{
863 +       return readw_relaxed(nfc->regs + reg);
864 +}
865 +
866 +static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
867 +{
868 +       return readb_relaxed(nfc->regs + reg);
869 +}
870 +
871 +static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
872 +{
873 +       struct device *dev = nfc->dev;
874 +       u32 val;
875 +       int ret;
876 +
877 +       /* reset all registers and force the NFI master to terminate */
878 +       nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
879 +
880 +       /* wait for the master to finish the last transaction */
881 +       ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
882 +                       !(val & MASTER_STA_MASK), 50, MTK_RESET_TIMEOUT);
883 +       if (ret)
884 +               dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
885 +                       NFI_MASTER_STA, val);
886 +
887 +       /* ensure any status register affected by the NFI master is reset */
888 +       nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
889 +       nfi_writew(nfc, STAR_DE, NFI_STRDATA);
890 +}
891 +
892 +static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
893 +{
894 +       struct device *dev = nfc->dev;
895 +       u32 val;
896 +       int ret;
897 +
898 +       nfi_writel(nfc, command, NFI_CMD);
899 +
900 +       ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
901 +                                       !(val & STA_CMD), 10,  MTK_TIMEOUT);
902 +       if (ret) {
903 +               dev_warn(dev, "nfi core timed out entering command mode\n");
904 +               return -EIO;
905 +       }
906 +
907 +       return 0;
908 +}
909 +
910 +static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
911 +{
912 +       struct device *dev = nfc->dev;
913 +       u32 val;
914 +       int ret;
915 +
916 +       nfi_writel(nfc, addr, NFI_COLADDR);
917 +       nfi_writel(nfc, 0, NFI_ROWADDR);
918 +       nfi_writew(nfc, 1, NFI_ADDRNOB);
919 +
920 +       ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
921 +                                       !(val & STA_ADDR), 10, MTK_TIMEOUT);
922 +       if (ret) {
923 +               dev_warn(dev, "nfi core timed out entering address mode\n");
924 +               return -EIO;
925 +       }
926 +
927 +       return 0;
928 +}
929 +
930 +static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
931 +{
932 +       struct nand_chip *chip = mtd_to_nand(mtd);
933 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
934 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
935 +       u32 fmt, spare;
936 +
937 +       if (!mtd->writesize)
938 +               return 0;
939 +
940 +       spare = mtk_nand->spare_per_sector;
941 +
942 +       switch (mtd->writesize) {
943 +       case 512:
944 +               fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
945 +               break;
946 +       case KB(2):
947 +               if (chip->ecc.size == 512)
948 +                       fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
949 +               else
950 +                       fmt = PAGEFMT_512_2K;
951 +               break;
952 +       case KB(4):
953 +               if (chip->ecc.size == 512)
954 +                       fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
955 +               else
956 +                       fmt = PAGEFMT_2K_4K;
957 +               break;
958 +       case KB(8):
959 +               if (chip->ecc.size == 512)
960 +                       fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
961 +               else
962 +                       fmt = PAGEFMT_4K_8K;
963 +               break;
964 +       case KB(16):
965 +               fmt = PAGEFMT_8K_16K;
966 +               break;
967 +       default:
968 +               dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
969 +               return -EINVAL;
970 +       }
971 +
972 +       /* the hardware doubles the value for this eccsize so let's halve it */
973 +       if (chip->ecc.size == 1024)
974 +               spare >>= 1;
975 +
976 +       switch (spare) {
977 +       case 16:
978 +               fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
979 +               break;
980 +       case 26:
981 +               fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
982 +               break;
983 +       case 27:
984 +               fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
985 +               break;
986 +       case 28:
987 +               fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
988 +               break;
989 +       case 32:
990 +               fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
991 +               break;
992 +       case 36:
993 +               fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
994 +               break;
995 +       case 40:
996 +               fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
997 +               break;
998 +       case 44:
999 +               fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
1000 +               break;
1001 +       case 48:
1002 +               fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
1003 +               break;
1004 +       case 49:
1005 +               fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
1006 +               break;
1007 +       case 50:
1008 +               fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
1009 +               break;
1010 +       case 51:
1011 +               fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
1012 +               break;
1013 +       case 52:
1014 +               fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
1015 +               break;
1016 +       case 62:
1017 +               fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
1018 +               break;
1019 +       case 63:
1020 +               fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
1021 +               break;
1022 +       case 64:
1023 +               fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
1024 +               break;
1025 +       default:
1026 +               dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
1027 +               return -EINVAL;
1028 +       }
1029 +
1030 +       fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
1031 +       fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
1032 +       nfi_writew(nfc, fmt, NFI_PAGEFMT);
1033 +
1034 +       nfc->ecc_cfg.strength = chip->ecc.strength;
1035 +       nfc->ecc_cfg.enc_len = chip->ecc.size + mtk_nand->fdm.ecc_size;
1036 +       nfc->ecc_cfg.dec_len = (nfc->ecc_cfg.enc_len << 3)
1037 +                               + chip->ecc.strength * ECC_PARITY_BITS;
1038 +
1039 +       return 0;
1040 +}
1041 +
1042 +static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
1043 +{
1044 +       struct nand_chip *nand = mtd_to_nand(mtd);
1045 +       struct mtk_nfc *nfc = nand_get_controller_data(nand);
1046 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
1047 +
1048 +       if (chip < 0)
1049 +               return;
1050 +
1051 +       mtk_nfc_hw_runtime_config(mtd);
1052 +
1053 +       nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
1054 +}
1055 +
1056 +static int mtk_nfc_dev_ready(struct mtd_info *mtd)
1057 +{
1058 +       struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
1059 +
1060 +       if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
1061 +               return 0;
1062 +
1063 +       return 1;
1064 +}
1065 +
1066 +static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
1067 +{
1068 +       struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
1069 +
1070 +       if (ctrl & NAND_ALE)
1071 +               mtk_nfc_send_address(nfc, dat);
1072 +       else if (ctrl & NAND_CLE) {
1073 +               mtk_nfc_hw_reset(nfc);
1074 +
1075 +               nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
1076 +               mtk_nfc_send_command(nfc, dat);
1077 +       }
1078 +}
1079 +
1080 +static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
1081 +{
1082 +       int rc;
1083 +       u8 val;
1084 +
1085 +       rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
1086 +                                       val & PIO_DI_RDY, 10, MTK_TIMEOUT);
1087 +       if (rc < 0)
1088 +               dev_err(nfc->dev, "data not ready\n");
1089 +}
1090 +
1091 +static inline uint8_t mtk_nfc_read_byte(struct mtd_info *mtd)
1092 +{
1093 +       struct nand_chip *chip = mtd_to_nand(mtd);
1094 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1095 +       u32 reg;
1096 +
1097 +       reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
1098 +       if (reg != NFI_FSM_CUSTDATA) {
1099 +               reg = nfi_readw(nfc, NFI_CNFG);
1100 +               reg |= CNFG_BYTE_RW | CNFG_READ_EN;
1101 +               nfi_writew(nfc, reg, NFI_CNFG);
1102 +
1103 +               reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
1104 +               nfi_writel(nfc, reg, NFI_CON);
1105 +
1106 +               /* trigger to fetch data */
1107 +               nfi_writew(nfc, STAR_EN, NFI_STRDATA);
1108 +       }
1109 +
1110 +       mtk_nfc_wait_ioready(nfc);
1111 +
1112 +       return nfi_readb(nfc, NFI_DATAR);
1113 +}
1114 +
1115 +static void mtk_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1116 +{
1117 +       int i;
1118 +
1119 +       for (i = 0; i < len; i++)
1120 +               buf[i] = mtk_nfc_read_byte(mtd);
1121 +}
1122 +
1123 +static void mtk_nfc_write_byte(struct mtd_info *mtd, uint8_t byte)
1124 +{
1125 +       struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
1126 +       u32 reg;
1127 +
1128 +       reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
1129 +
1130 +       if (reg != NFI_FSM_CUSTDATA) {
1131 +               reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
1132 +               nfi_writew(nfc, reg, NFI_CNFG);
1133 +
1134 +               reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
1135 +               nfi_writel(nfc, reg, NFI_CON);
1136 +
1137 +               nfi_writew(nfc, STAR_EN, NFI_STRDATA);
1138 +       }
1139 +
1140 +       mtk_nfc_wait_ioready(nfc);
1141 +       nfi_writeb(nfc, byte, NFI_DATAW);
1142 +}
1143 +
1144 +static void mtk_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
1145 +{
1146 +       int i;
1147 +
1148 +       for (i = 0; i < len; i++)
1149 +               mtk_nfc_write_byte(mtd, buf[i]);
1150 +}
1151 +
1152 +static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
1153 +{
1154 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1155 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1156 +       int size = chip->ecc.size + mtk_nand->fdm.reg_size;
1157 +
1158 +       nfc->ecc_cfg.ecc_mode = ECC_DMA_MODE;
1159 +       nfc->ecc_cfg.codec = ECC_ENC;
1160 +       return mtk_ecc_encode_non_nfi_mode(nfc->ecc, &nfc->ecc_cfg, data, size);
1161 +}
1162 +
1163 +static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, uint8_t *b, int c)
1164 +{
1165 +       /* nope */
1166 +}
1167 +
1168 +static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, uint8_t *buf, int raw)
1169 +{
1170 +       struct nand_chip *chip = mtd_to_nand(mtd);
1171 +       struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
1172 +       u32 bad_pos = nand->bad_mark.pos;
1173 +
1174 +       if (raw)
1175 +               bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
1176 +       else
1177 +               bad_pos += nand->bad_mark.sec * chip->ecc.size;
1178 +
1179 +       swap(chip->oob_poi[0], buf[bad_pos]);
1180 +}
1181 +
1182 +static int mtk_nfc_format_subpage(struct mtd_info *mtd, uint32_t offset,
1183 +                       uint32_t len, const uint8_t *buf)
1184 +{
1185 +       struct nand_chip *chip = mtd_to_nand(mtd);
1186 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1187 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1188 +       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1189 +       u32 start, end;
1190 +       int i, ret;
1191 +
1192 +       start = offset / chip->ecc.size;
1193 +       end = DIV_ROUND_UP(offset + len, chip->ecc.size);
1194 +
1195 +       memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
1196 +       for (i = 0; i < chip->ecc.steps; i++) {
1197 +
1198 +               memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
1199 +                       chip->ecc.size);
1200 +
1201 +               if (start > i || i >= end)
1202 +                       continue;
1203 +
1204 +               if (i == mtk_nand->bad_mark.sec)
1205 +                       mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
1206 +
1207 +               memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
1208 +
1209 +               /* program the CRC back to the OOB */
1210 +               ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
1211 +               if (ret < 0)
1212 +                       return ret;
1213 +       }
1214 +
1215 +       return 0;
1216 +}
1217 +
1218 +static void mtk_nfc_format_page(struct mtd_info *mtd, const uint8_t *buf)
1219 +{
1220 +       struct nand_chip *chip = mtd_to_nand(mtd);
1221 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1222 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1223 +       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1224 +       u32 i;
1225 +
1226 +       memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
1227 +       for (i = 0; i < chip->ecc.steps; i++) {
1228 +               if (buf)
1229 +                       memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
1230 +                               chip->ecc.size);
1231 +
1232 +               if (i == mtk_nand->bad_mark.sec)
1233 +                       mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
1234 +
1235 +               memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
1236 +       }
1237 +}
1238 +
1239 +static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
1240 +                                       u32 sectors)
1241 +{
1242 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1243 +       u32 *p;
1244 +       int i;
1245 +
1246 +       for (i = 0; i < sectors; i++) {
1247 +               p = (u32 *) oob_ptr(chip, start + i);
1248 +               p[0] = nfi_readl(nfc, NFI_FDML(i));
1249 +               p[1] = nfi_readl(nfc, NFI_FDMM(i));
1250 +       }
1251 +}
1252 +
1253 +static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
1254 +{
1255 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1256 +       u32 *p;
1257 +       int i;
1258 +
1259 +       for (i = 0; i < chip->ecc.steps ; i++) {
1260 +               p = (u32 *) oob_ptr(chip, i);
1261 +               nfi_writel(nfc, p[0], NFI_FDML(i));
1262 +               nfi_writel(nfc, p[1], NFI_FDMM(i));
1263 +       }
1264 +}
1265 +
1266 +static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1267 +                               const uint8_t *buf, int page, int len)
1268 +{
1269 +
1270 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1271 +       struct device *dev = nfc->dev;
1272 +       dma_addr_t addr;
1273 +       u32 reg;
1274 +       int ret;
1275 +
1276 +       addr = dma_map_single(dev, (void *) buf, len, DMA_TO_DEVICE);
1277 +       ret = dma_mapping_error(nfc->dev, addr);
1278 +       if (ret) {
1279 +               dev_err(nfc->dev, "dma mapping error\n");
1280 +               return -EINVAL;
1281 +       }
1282 +
1283 +       reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
1284 +       nfi_writew(nfc, reg, NFI_CNFG);
1285 +
1286 +       nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
1287 +       nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
1288 +       nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
1289 +
1290 +       init_completion(&nfc->done);
1291 +
1292 +       reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
1293 +       nfi_writel(nfc, reg, NFI_CON);
1294 +       nfi_writew(nfc, STAR_EN, NFI_STRDATA);
1295 +
1296 +       ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
1297 +       if (!ret) {
1298 +               dev_err(dev, "program ahb done timeout\n");
1299 +               nfi_writew(nfc, 0, NFI_INTR_EN);
1300 +               ret = -ETIMEDOUT;
1301 +               goto timeout;
1302 +       }
1303 +
1304 +       ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
1305 +                       (reg & CNTR_MASK) >= chip->ecc.steps, 10, MTK_TIMEOUT);
1306 +       if (ret)
1307 +               dev_err(dev, "hwecc write timeout\n");
1308 +
1309 +timeout:
1310 +
1311 +       dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
1312 +       nfi_writel(nfc, 0, NFI_CON);
1313 +
1314 +       return ret;
1315 +}
1316 +
1317 +static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1318 +                       const uint8_t *buf, int page, int raw)
1319 +{
1320 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1321 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1322 +       size_t len;
1323 +       const u8 *bufpoi;
1324 +       u32 reg;
1325 +       int ret;
1326 +
1327 +       if (!raw) {
1328 +               /* OOB => FDM: from register,  ECC: from HW */
1329 +               reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
1330 +               nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
1331 +
1332 +               nfc->ecc_cfg.codec = ECC_ENC;
1333 +               nfc->ecc_cfg.ecc_mode = ECC_NFI_MODE;
1334 +               ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
1335 +               if (ret) {
1336 +                       /* clear NFI config */
1337 +                       reg = nfi_readw(nfc, NFI_CNFG);
1338 +                       reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
1339 +                       nfi_writew(nfc, reg, NFI_CNFG);
1340 +
1341 +                       return ret;
1342 +               }
1343 +
1344 +               memcpy(nfc->buffer, buf, mtd->writesize);
1345 +               mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
1346 +               bufpoi = nfc->buffer;
1347 +
1348 +               /* write OOB into the FDM registers (OOB area in MTK NAND) */
1349 +               mtk_nfc_write_fdm(chip);
1350 +       } else
1351 +               bufpoi = buf;
1352 +
1353 +       len = mtd->writesize + (raw ? mtd->oobsize : 0);
1354 +       ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
1355 +
1356 +       if (!raw)
1357 +               mtk_ecc_disable(nfc->ecc, &nfc->ecc_cfg);
1358 +
1359 +       return ret;
1360 +}
1361 +
1362 +static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
1363 +       struct nand_chip *chip, const uint8_t *buf, int oob_on, int page)
1364 +{
1365 +       return mtk_nfc_write_page(mtd, chip, buf, page, 0);
1366 +}
1367 +
1368 +static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1369 +                               const uint8_t *buf, int oob_on, int pg)
1370 +{
1371 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1372 +
1373 +       mtk_nfc_format_page(mtd, buf);
1374 +       return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
1375 +}
1376 +
1377 +static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
1378 +               struct nand_chip *chip, uint32_t offset, uint32_t data_len,
1379 +               const uint8_t *buf, int oob_on, int page)
1380 +{
1381 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1382 +       int ret;
1383 +
1384 +       ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
1385 +       if (ret < 0)
1386 +               return ret;
1387 +
1388 +       /* use the data in the private buffer (now with FDM and CRC) */
1389 +       return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
1390 +}
1391 +
1392 +static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1393 +                                       int page)
1394 +{
1395 +       int ret;
1396 +
1397 +       chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1398 +
1399 +       ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
1400 +       if (ret < 0)
1401 +               return -EIO;
1402 +
1403 +       chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1404 +       ret = chip->waitfunc(mtd, chip);
1405 +
1406 +       return ret & NAND_STATUS_FAIL ? -EIO : 0;
1407 +}
1408 +
1409 +static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
1410 +{
1411 +       struct nand_chip *chip = mtd_to_nand(mtd);
1412 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1413 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1414 +       struct mtk_ecc_stats stats;
1415 +       int rc, i;
1416 +
1417 +       rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
1418 +       if (rc) {
1419 +               memset(buf, 0xff, sectors * chip->ecc.size);
1420 +               for (i = 0; i < sectors; i++)
1421 +                       memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
1422 +               return 0;
1423 +       }
1424 +
1425 +       mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
1426 +       mtd->ecc_stats.corrected += stats.corrected;
1427 +       mtd->ecc_stats.failed += stats.failed;
1428 +
1429 +       return stats.bitflips;
1430 +}
1431 +
1432 +static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1433 +               uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1434 +               int page, int raw)
1435 +{
1436 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1437 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1438 +       u32 spare = mtk_nand->spare_per_sector;
1439 +       u32 column, sectors, start, end, reg;
1440 +       dma_addr_t addr;
1441 +       int bitflips;
1442 +       size_t len;
1443 +       u8 *buf;
1444 +       int rc;
1445 +
1446 +       start = data_offs / chip->ecc.size;
1447 +       end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
1448 +
1449 +       sectors = end - start;
1450 +       column = start * (chip->ecc.size + spare);
1451 +
1452 +       len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
1453 +       buf = bufpoi + start * chip->ecc.size;
1454 +
1455 +       if (column != 0)
1456 +               chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
1457 +
1458 +       addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
1459 +       rc = dma_mapping_error(nfc->dev, addr);
1460 +       if (rc) {
1461 +               dev_err(nfc->dev, "dma mapping error\n");
1462 +
1463 +               return -EINVAL;
1464 +       }
1465 +
1466 +       reg = nfi_readw(nfc, NFI_CNFG);
1467 +       reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
1468 +       if (!raw) {
1469 +               reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
1470 +               nfi_writew(nfc, reg, NFI_CNFG);
1471 +
1472 +               nfc->ecc_cfg.ecc_mode = ECC_NFI_MODE;
1473 +               nfc->ecc_cfg.sec_mask = sectors;
1474 +               nfc->ecc_cfg.codec = ECC_DEC;
1475 +               rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
1476 +               if (rc) {
1477 +                       dev_err(nfc->dev, "ecc enable\n");
1478 +                       /* clear NFI_CNFG */
1479 +                       reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
1480 +                               CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
1481 +                       nfi_writew(nfc, reg, NFI_CNFG);
1482 +                       dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
1483 +
1484 +                       return rc;
1485 +               }
1486 +       } else
1487 +               nfi_writew(nfc, reg, NFI_CNFG);
1488 +
1489 +       nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
1490 +       nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
1491 +       nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
1492 +
1493 +       init_completion(&nfc->done);
1494 +       reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
1495 +       nfi_writel(nfc, reg, NFI_CON);
1496 +       nfi_writew(nfc, STAR_EN, NFI_STRDATA);
1497 +
1498 +       rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
1499 +       if (!rc)
1500 +               dev_warn(nfc->dev, "read ahb/dma done timeout\n");
1501 +
1502 +       rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
1503 +                               (reg & CNTR_MASK) >= sectors, 10, MTK_TIMEOUT);
1504 +       if (rc < 0) {
1505 +               dev_err(nfc->dev, "subpage done timeout\n");
1506 +               bitflips = -EIO;
1507 +       } else {
1508 +               bitflips = 0;
1509 +               if (!raw) {
1510 +                       rc = mtk_ecc_wait_irq_done(nfc->ecc, ECC_DEC);
1511 +                       bitflips = rc < 0 ? -ETIMEDOUT :
1512 +                               mtk_nfc_update_ecc_stats(mtd, buf, sectors);
1513 +                       mtk_nfc_read_fdm(chip, start, sectors);
1514 +               }
1515 +       }
1516 +
1517 +       dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
1518 +
1519 +       if (raw)
1520 +               goto done;
1521 +
1522 +       mtk_ecc_disable(nfc->ecc, &nfc->ecc_cfg);
1523 +
1524 +       if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
1525 +               mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
1526 +done:
1527 +       nfi_writel(nfc, 0, NFI_CON);
1528 +
1529 +       return bitflips;
1530 +}
1531 +
1532 +static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
1533 +       struct nand_chip *chip, uint32_t off, uint32_t len, uint8_t *p, int pg)
1534 +{
1535 +       return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
1536 +}
1537 +
1538 +static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
1539 +       struct nand_chip *chip, uint8_t *p, int oob_on, int pg)
1540 +{
1541 +       return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
1542 +}
1543 +
1544 +static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1545 +                               uint8_t *buf, int oob_on, int page)
1546 +{
1547 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1548 +       struct mtk_nfc *nfc = nand_get_controller_data(chip);
1549 +       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1550 +       int i, ret;
1551 +
1552 +       memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
1553 +       ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
1554 +                                       page, 1);
1555 +       if (ret < 0)
1556 +               return ret;
1557 +
1558 +       for (i = 0; i < chip->ecc.steps; i++) {
1559 +               memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
1560 +               if (i == mtk_nand->bad_mark.sec)
1561 +                       mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
1562 +
1563 +               if (buf)
1564 +                       memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
1565 +                               chip->ecc.size);
1566 +       }
1567 +
1568 +       return ret;
1569 +}
1570 +
1571 +static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1572 +                               int page)
1573 +{
1574 +       chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1575 +
1576 +       return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
1577 +}
1578 +
1579 +static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
1580 +{
1581 +       nfi_writel(nfc, 0x10804211, NFI_ACCCON);
1582 +       nfi_writew(nfc, 0xf1, NFI_CNRNB);
1583 +       nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
1584 +
1585 +       mtk_nfc_hw_reset(nfc);
1586 +
1587 +       nfi_readl(nfc, NFI_INTR_STA);
1588 +       nfi_writel(nfc, 0, NFI_INTR_EN);
1589 +}
1590 +
1591 +static irqreturn_t mtk_nfc_irq(int irq, void *id)
1592 +{
1593 +       struct mtk_nfc *nfc = id;
1594 +       u16 sta, ien;
1595 +
1596 +       sta = nfi_readw(nfc, NFI_INTR_STA);
1597 +       ien = nfi_readw(nfc, NFI_INTR_EN);
1598 +
1599 +       if (!(sta & ien))
1600 +               return IRQ_NONE;
1601 +
1602 +       nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
1603 +       complete(&nfc->done);
1604 +
1605 +       return IRQ_HANDLED;
1606 +}
1607 +
1608 +static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
1609 +{
1610 +       int ret;
1611 +
1612 +       ret = clk_prepare_enable(clk->nfi_clk);
1613 +       if (ret) {
1614 +               dev_err(dev, "failed to enable nfi clk\n");
1615 +               return ret;
1616 +       }
1617 +
1618 +       ret = clk_prepare_enable(clk->pad_clk);
1619 +       if (ret) {
1620 +               dev_err(dev, "failed to enable pad clk\n");
1621 +               clk_disable_unprepare(clk->nfi_clk);
1622 +               return ret;
1623 +       }
1624 +
1625 +       return 0;
1626 +}
1627 +
1628 +static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
1629 +{
1630 +       clk_disable_unprepare(clk->nfi_clk);
1631 +       clk_disable_unprepare(clk->pad_clk);
1632 +}
1633 +
1634 +static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
1635 +                               struct mtd_oob_region *oob_region)
1636 +{
1637 +       struct nand_chip *chip = mtd_to_nand(mtd);
1638 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1639 +       struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1640 +       u32 eccsteps;
1641 +
1642 +       eccsteps = mtd->writesize / chip->ecc.size;
1643 +
1644 +       if (section >= eccsteps)
1645 +               return -ERANGE;
1646 +
1647 +       oob_region->length = fdm->reg_size - fdm->ecc_size;
1648 +       oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
1649 +
1650 +       return 0;
1651 +}
1652 +
1653 +static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
1654 +                               struct mtd_oob_region *oob_region)
1655 +{
1656 +       struct nand_chip *chip = mtd_to_nand(mtd);
1657 +       struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1658 +       u32 eccsteps;
1659 +
1660 +       if (section)
1661 +               return -ERANGE;
1662 +
1663 +       eccsteps = mtd->writesize / chip->ecc.size;
1664 +       oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
1665 +       oob_region->length = mtd->oobsize - oob_region->offset;
1666 +
1667 +       return 0;
1668 +}
1669 +
1670 +static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
1671 +       .free = mtk_nfc_ooblayout_free,
1672 +       .ecc = mtk_nfc_ooblayout_ecc,
1673 +};
1674 +
1675 +static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
1676 +{
1677 +       struct nand_chip *nand = mtd_to_nand(mtd);
1678 +       struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
1679 +       u32 ecc_bytes;
1680 +
1681 +       ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
1682 +
1683 +       fdm->reg_size = chip->spare_per_sector - ecc_bytes;
1684 +       if (fdm->reg_size > NFI_FDM_MAX_SIZE)
1685 +               fdm->reg_size = NFI_FDM_MAX_SIZE;
1686 +
1687 +       /* bad block mark storage */
1688 +       fdm->ecc_size = 1;
1689 +}
1690 +
1691 +static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
1692 +                               struct mtd_info *mtd)
1693 +{
1694 +       struct nand_chip *nand = mtd_to_nand(mtd);
1695 +
1696 +       if (mtd->writesize == 512)
1697 +               bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
1698 +       else {
1699 +               bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
1700 +               bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
1701 +               bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
1702 +       }
1703 +}
1704 +
1705 +static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
1706 +{
1707 +       struct nand_chip *nand = mtd_to_nand(mtd);
1708 +       u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
1709 +                       48, 49, 50, 51, 52, 62, 63, 64};
1710 +       u32 eccsteps, i;
1711 +
1712 +       eccsteps = mtd->writesize / nand->ecc.size;
1713 +       *sps = mtd->oobsize / eccsteps;
1714 +
1715 +       if (nand->ecc.size == 1024)
1716 +               *sps >>= 1;
1717 +
1718 +       for (i = 0; i < ARRAY_SIZE(spare); i++) {
1719 +               if (*sps <= spare[i]) {
1720 +                       if (!i)
1721 +                               *sps = spare[i];
1722 +                       else if (*sps != spare[i])
1723 +                               *sps = spare[i - 1];
1724 +                       break;
1725 +               }
1726 +       }
1727 +
1728 +       if (i >= ARRAY_SIZE(spare))
1729 +               *sps = spare[ARRAY_SIZE(spare) - 1];
1730 +
1731 +       if (nand->ecc.size == 1024)
1732 +               *sps <<= 1;
1733 +}
1734 +
1735 +static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
1736 +{
1737 +       struct nand_chip *nand = mtd_to_nand(mtd);
1738 +       u32 spare;
1739 +
1740 +       /* support only ecc hw mode */
1741 +       if (nand->ecc.mode != NAND_ECC_HW) {
1742 +               dev_err(dev, "ecc.mode not supported\n");
1743 +               return -EINVAL;
1744 +       }
1745 +
1746 +       /* if optional DT settings are not present */
1747 +       if (!nand->ecc.size || !nand->ecc.strength) {
1748 +
1749 +               /* controller only supports sizes 512 and 1024 */
1750 +               nand->ecc.size = (mtd->writesize > 512) ? 1024 : 512;
1751 +
1752 +               /* get controller valid values */
1753 +               mtk_nfc_set_spare_per_sector(&spare, mtd);
1754 +               spare = spare - NFI_FDM_MAX_SIZE;
1755 +               nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
1756 +       }
1757 +
1758 +       mtk_ecc_update_strength(&nand->ecc.strength);
1759 +
1760 +       dev_info(dev, "eccsize %d eccstrength %d\n",
1761 +               nand->ecc.size, nand->ecc.strength);
1762 +
1763 +       return 0;
1764 +}
1765 +
1766 +static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
1767 +                               struct device_node *np)
1768 +{
1769 +       struct mtk_nfc_nand_chip *chip;
1770 +       struct nand_chip *nand;
1771 +       struct mtd_info *mtd;
1772 +       int nsels, len;
1773 +       u32 tmp;
1774 +       int ret;
1775 +       int i;
1776 +
1777 +       if (!of_get_property(np, "reg", &nsels))
1778 +               return -ENODEV;
1779 +
1780 +       nsels /= sizeof(u32);
1781 +       if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
1782 +               dev_err(dev, "invalid reg property size %d\n", nsels);
1783 +               return -EINVAL;
1784 +       }
1785 +
1786 +       chip = devm_kzalloc(dev,
1787 +                       sizeof(*chip) + nsels * sizeof(u8), GFP_KERNEL);
1788 +       if (!chip)
1789 +               return -ENOMEM;
1790 +
1791 +       chip->nsels = nsels;
1792 +       for (i = 0; i < nsels; i++) {
1793 +               ret = of_property_read_u32_index(np, "reg", i, &tmp);
1794 +               if (ret) {
1795 +                       dev_err(dev, "reg property failure : %d\n", ret);
1796 +                       return ret;
1797 +               }
1798 +               chip->sels[i] = tmp;
1799 +       }
1800 +
1801 +       nand = &chip->nand;
1802 +       nand->controller = &nfc->controller;
1803 +
1804 +       nand_set_flash_node(nand, np);
1805 +       nand_set_controller_data(nand, nfc);
1806 +
1807 +       nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
1808 +       nand->dev_ready = mtk_nfc_dev_ready;
1809 +       nand->select_chip = mtk_nfc_select_chip;
1810 +       nand->write_byte = mtk_nfc_write_byte;
1811 +       nand->write_buf = mtk_nfc_write_buf;
1812 +       nand->read_byte = mtk_nfc_read_byte;
1813 +       nand->read_buf = mtk_nfc_read_buf;
1814 +       nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
1815 +
1816 +       /* set default mode in case dt entry is missing */
1817 +       nand->ecc.mode = NAND_ECC_HW;
1818 +
1819 +       nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
1820 +       nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
1821 +       nand->ecc.write_page = mtk_nfc_write_page_hwecc;
1822 +       nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
1823 +       nand->ecc.write_oob = mtk_nfc_write_oob_std;
1824 +
1825 +       nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
1826 +       nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
1827 +       nand->ecc.read_page = mtk_nfc_read_page_hwecc;
1828 +       nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
1829 +       nand->ecc.read_oob = mtk_nfc_read_oob_std;
1830 +
1831 +       mtd = nand_to_mtd(nand);
1832 +       mtd->owner = THIS_MODULE;
1833 +       mtd->dev.parent = dev;
1834 +       mtd->name = MTK_NAME;
1835 +       mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
1836 +
1837 +       mtk_nfc_hw_init(nfc);
1838 +
1839 +       ret = nand_scan_ident(mtd, nsels, NULL);
1840 +       if (ret)
1841 +               return -ENODEV;
1842 +
1843 +       /* store bbt magic in page, cause OOB is not protected */
1844 +       if (nand->bbt_options & NAND_BBT_USE_FLASH)
1845 +               nand->bbt_options |= NAND_BBT_NO_OOB;
1846 +
1847 +       ret = mtk_nfc_ecc_init(dev, mtd);
1848 +       if (ret)
1849 +               return -EINVAL;
1850 +
1851 +       mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
1852 +       mtk_nfc_set_fdm(&chip->fdm, mtd);
1853 +       mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
1854 +
1855 +       len = mtd->writesize + mtd->oobsize;
1856 +       nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
1857 +       if (!nfc->buffer)
1858 +               return  -ENOMEM;
1859 +
1860 +       ret = nand_scan_tail(mtd);
1861 +       if (ret)
1862 +               return -ENODEV;
1863 +
1864 +       ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
1865 +       if (ret) {
1866 +               dev_err(dev, "mtd parse partition error\n");
1867 +               nand_release(mtd);
1868 +               return ret;
1869 +       }
1870 +
1871 +       list_add_tail(&chip->node, &nfc->chips);
1872 +
1873 +       return 0;
1874 +}
1875 +
1876 +static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
1877 +{
1878 +       struct device_node *np = dev->of_node;
1879 +       struct device_node *nand_np;
1880 +       int ret;
1881 +
1882 +       for_each_child_of_node(np, nand_np) {
1883 +               ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
1884 +               if (ret) {
1885 +                       of_node_put(nand_np);
1886 +                       return ret;
1887 +               }
1888 +       }
1889 +
1890 +       return 0;
1891 +}
1892 +
1893 +static int mtk_nfc_probe(struct platform_device *pdev)
1894 +{
1895 +       struct device *dev = &pdev->dev;
1896 +       struct device_node *np = dev->of_node;
1897 +       struct mtk_nfc *nfc;
1898 +       struct resource *res;
1899 +       int ret, irq;
1900 +
1901 +       nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
1902 +       if (!nfc)
1903 +               return -ENOMEM;
1904 +
1905 +       spin_lock_init(&nfc->controller.lock);
1906 +       init_waitqueue_head(&nfc->controller.wq);
1907 +       INIT_LIST_HEAD(&nfc->chips);
1908 +
1909 +       /* probe defer if not ready */
1910 +       nfc->ecc = of_mtk_ecc_get(np);
1911 +       if (IS_ERR(nfc->ecc))
1912 +               return PTR_ERR(nfc->ecc);
1913 +       else if (!nfc->ecc)
1914 +               return -ENODEV;
1915 +
1916 +       nfc->dev = dev;
1917 +
1918 +       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1919 +       nfc->regs = devm_ioremap_resource(dev, res);
1920 +       if (IS_ERR(nfc->regs)) {
1921 +               ret = PTR_ERR(nfc->regs);
1922 +               dev_err(dev, "no nfi base\n");
1923 +               goto release_ecc;
1924 +       }
1925 +
1926 +       nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
1927 +       if (IS_ERR(nfc->clk.nfi_clk)) {
1928 +               dev_err(dev, "no clk\n");
1929 +               ret = PTR_ERR(nfc->clk.nfi_clk);
1930 +               goto release_ecc;
1931 +       }
1932 +
1933 +       nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
1934 +       if (IS_ERR(nfc->clk.pad_clk)) {
1935 +               dev_err(dev, "no pad clk\n");
1936 +               ret = PTR_ERR(nfc->clk.pad_clk);
1937 +               goto release_ecc;
1938 +       }
1939 +
1940 +       ret = mtk_nfc_enable_clk(dev, &nfc->clk);
1941 +       if (ret)
1942 +               goto release_ecc;
1943 +
1944 +       irq = platform_get_irq(pdev, 0);
1945 +       if (irq < 0) {
1946 +               dev_err(dev, "no nfi irq resource\n");
1947 +               ret = -EINVAL;
1948 +               goto clk_disable;
1949 +       }
1950 +
1951 +       ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
1952 +       if (ret) {
1953 +               dev_err(dev, "failed to request nfi irq\n");
1954 +               goto clk_disable;
1955 +       }
1956 +
1957 +       ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1958 +       if (ret) {
1959 +               dev_err(dev, "failed to set dma mask\n");
1960 +               goto clk_disable;
1961 +       }
1962 +
1963 +       platform_set_drvdata(pdev, nfc);
1964 +
1965 +       ret = mtk_nfc_nand_chips_init(dev, nfc);
1966 +       if (ret) {
1967 +               dev_err(dev, "failed to init nand chips\n");
1968 +               goto clk_disable;
1969 +       }
1970 +
1971 +       return 0;
1972 +
1973 +clk_disable:
1974 +       mtk_nfc_disable_clk(&nfc->clk);
1975 +
1976 +release_ecc:
1977 +       mtk_ecc_release(nfc->ecc);
1978 +
1979 +       return ret;
1980 +}
1981 +
1982 +static int mtk_nfc_remove(struct platform_device *pdev)
1983 +{
1984 +       struct mtk_nfc *nfc = platform_get_drvdata(pdev);
1985 +       struct mtk_nfc_nand_chip *chip;
1986 +
1987 +       while (!list_empty(&nfc->chips)) {
1988 +               chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
1989 +                                       node);
1990 +               nand_release(nand_to_mtd(&chip->nand));
1991 +               list_del(&chip->node);
1992 +       }
1993 +
1994 +       mtk_ecc_release(nfc->ecc);
1995 +       mtk_nfc_disable_clk(&nfc->clk);
1996 +
1997 +       return 0;
1998 +}
1999 +
2000 +#ifdef CONFIG_PM_SLEEP
2001 +static int mtk_nfc_suspend(struct device *dev)
2002 +{
2003 +       struct mtk_nfc *nfc = dev_get_drvdata(dev);
2004 +
2005 +       mtk_nfc_disable_clk(&nfc->clk);
2006 +
2007 +       return 0;
2008 +}
2009 +
2010 +static int mtk_nfc_resume(struct device *dev)
2011 +{
2012 +       struct mtk_nfc *nfc = dev_get_drvdata(dev);
2013 +       struct mtk_nfc_nand_chip *chip;
2014 +       struct nand_chip *nand;
2015 +       struct mtd_info *mtd;
2016 +       int ret;
2017 +       u32 i;
2018 +
2019 +       udelay(200);
2020 +
2021 +       ret = mtk_nfc_enable_clk(dev, &nfc->clk);
2022 +       if (ret)
2023 +               return ret;
2024 +
2025 +       mtk_nfc_hw_init(nfc);
2026 +
2027 +       list_for_each_entry(chip, &nfc->chips, node) {
2028 +               nand = &chip->nand;
2029 +               mtd = nand_to_mtd(nand);
2030 +               for (i = 0; i < chip->nsels; i++) {
2031 +                       nand->select_chip(mtd, i);
2032 +                       nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2033 +               }
2034 +       }
2035 +
2036 +       return 0;
2037 +}
2038 +static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
2039 +#endif
2040 +
2041 +static const struct of_device_id mtk_nfc_id_table[] = {
2042 +       { .compatible = "mediatek,mt2701-nfc" },
2043 +       {}
2044 +};
2045 +MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
2046 +
2047 +static struct platform_driver mtk_nfc_driver = {
2048 +       .probe  = mtk_nfc_probe,
2049 +       .remove = mtk_nfc_remove,
2050 +       .driver = {
2051 +               .name  = MTK_NAME,
2052 +               .of_match_table = mtk_nfc_id_table,
2053 +#ifdef CONFIG_PM_SLEEP
2054 +               .pm = &mtk_nfc_pm_ops,
2055 +#endif
2056 +       },
2057 +};
2058 +
2059 +module_platform_driver(mtk_nfc_driver);
2060 +
2061 +MODULE_LICENSE("GPL");
2062 +MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
2063 +MODULE_AUTHOR("Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>");
2064 +MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");