97e434466546f5620e25bb6057d1928d2e84b6dd
[oweals/openwrt.git] / target / linux / ramips / patches-4.14 / 0039-mtd-add-mt7621-nand-support.patch
1 From 0e1c4e3c97b83b4e7da65b1c56f0a7d40736ac53 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sun, 27 Jul 2014 11:05:17 +0100
4 Subject: [PATCH 39/53] mtd: add mt7621 nand support
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8  drivers/mtd/nand/Kconfig            |    6 +
9  drivers/mtd/nand/Makefile           |    1 +
10  drivers/mtd/nand/bmt.c              |  750 ++++++++++++
11  drivers/mtd/nand/bmt.h              |   80 ++
12  drivers/mtd/nand/dev-nand.c         |   63 +
13  drivers/mtd/nand/mt6575_typedefs.h  |  340 ++++++
14  drivers/mtd/nand/mtk_nand2.c         | 2304 +++++++++++++++++++++++++++++++++++
15  drivers/mtd/nand/mtk_nand2.h         |  452 +++++++
16  drivers/mtd/nand/nand_base.c        |    6 +-
17  drivers/mtd/nand/nand_bbt.c         |   19 +
18  drivers/mtd/nand/nand_def.h         |  123 ++
19  drivers/mtd/nand/nand_device_list.h |   55 +
20  drivers/mtd/nand/partition.h        |  115 ++
21  13 files changed, 4311 insertions(+), 3 deletions(-)
22  create mode 100644 drivers/mtd/nand/bmt.c
23  create mode 100644 drivers/mtd/nand/bmt.h
24  create mode 100644 drivers/mtd/nand/dev-nand.c
25  create mode 100644 drivers/mtd/nand/mt6575_typedefs.h
26  create mode 100644 drivers/mtd/nand/mtk_nand2.c
27  create mode 100644 drivers/mtd/nand/mtk_nand2.h
28  create mode 100644 drivers/mtd/nand/nand_def.h
29  create mode 100644 drivers/mtd/nand/nand_device_list.h
30  create mode 100644 drivers/mtd/nand/partition.h
31
32 --- a/drivers/mtd/nand/Kconfig
33 +++ b/drivers/mtd/nand/Kconfig
34 @@ -563,4 +563,10 @@ config MTD_NAND_MTK
35           Enables support for NAND controller on MTK SoCs.
36           This controller is found on mt27xx, mt81xx, mt65xx SoCs.
37  
38 +config MTK_MTD_NAND
39 +       tristate "Support for MTK SoC NAND controller"
40 +       depends on SOC_MT7621
41 +       select MTD_NAND_IDS
42 +       select MTD_NAND_ECC
43 +
44  endif # MTD_NAND
45 --- a/drivers/mtd/nand/Makefile
46 +++ b/drivers/mtd/nand/Makefile
47 @@ -60,6 +60,7 @@ obj-$(CONFIG_MTD_NAND_HISI504)                +
48  obj-$(CONFIG_MTD_NAND_BRCMNAND)                += brcmnand/
49  obj-$(CONFIG_MTD_NAND_QCOM)            += qcom_nandc.o
50  obj-$(CONFIG_MTD_NAND_MTK)             += mtk_nand.o mtk_ecc.o
51 +obj-$(CONFIG_MTK_MTD_NAND)             += mtk_nand2.o bmt.o
52  
53  nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
54  nand-objs += nand_amd.o
55 --- /dev/null
56 +++ b/drivers/mtd/nand/bmt.c
57 @@ -0,0 +1,750 @@
58 +#include "bmt.h"
59 +
60 +typedef struct
61 +{
62 +    char signature[3];
63 +    u8 version;
64 +    u8 bad_count;               // bad block count in pool
65 +    u8 mapped_count;            // mapped block count in pool
66 +    u8 checksum;
67 +    u8 reseverd[13];
68 +} phys_bmt_header;
69 +
70 +typedef struct
71 +{
72 +    phys_bmt_header header;
73 +    bmt_entry table[MAX_BMT_SIZE];
74 +} phys_bmt_struct;
75 +
76 +typedef struct
77 +{
78 +    char signature[3];
79 +} bmt_oob_data;
80 +
81 +static char MAIN_SIGNATURE[] = "BMT";
82 +static char OOB_SIGNATURE[] = "bmt";
83 +#define SIGNATURE_SIZE      (3)
84 +
85 +#define MAX_DAT_SIZE        0x1000
86 +#define MAX_OOB_SIZE        0x80
87 +
88 +static struct mtd_info *mtd_bmt;
89 +static struct nand_chip *nand_chip_bmt;
90 +#define BLOCK_SIZE_BMT          (1 << nand_chip_bmt->phys_erase_shift)
91 +#define PAGE_SIZE_BMT           (1 << nand_chip_bmt->page_shift)
92 +
93 +#define OFFSET(block)       ((block) * BLOCK_SIZE_BMT)  
94 +#define PAGE_ADDR(block)    ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT)
95 +
96 +/*********************************************************************
97 +* Flash is splited into 2 parts, system part is for normal system    *
98 +* system usage, size is system_block_count, another is replace pool  *
99 +*    +-------------------------------------------------+             *
100 +*    |     system_block_count     |   bmt_block_count  |             *
101 +*    +-------------------------------------------------+             *
102 +*********************************************************************/
103 +static u32 total_block_count;   // block number in flash
104 +static u32 system_block_count;
105 +static int bmt_block_count;     // bmt table size
106 +// static int bmt_count;               // block used in bmt
107 +static int page_per_block;      // page per count
108 +
109 +static u32 bmt_block_index;     // bmt block index
110 +static bmt_struct bmt;          // dynamic created global bmt table
111 +
112 +static u8 dat_buf[MAX_DAT_SIZE];
113 +static u8 oob_buf[MAX_OOB_SIZE];
114 +static bool pool_erased;
115 +
116 +/***************************************************************
117 +*                                                              
118 +* Interface adaptor for preloader/uboot/kernel                 
119 +*    These interfaces operate on physical address, read/write
120 +*       physical data.
121 +*                                                              
122 +***************************************************************/
123 +int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob)
124 +{
125 +    return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
126 +}
127 +
128 +bool nand_block_bad_bmt(u32 offset)
129 +{
130 +    return mtk_nand_block_bad_hw(mtd_bmt, offset);
131 +}
132 +
133 +bool nand_erase_bmt(u32 offset)
134 +{
135 +    int status;
136 +    if (offset < 0x20000)
137 +    {
138 +        MSG(INIT, "erase offset: 0x%x\n", offset);
139 +    }
140 +
141 +    status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined
142 +    if (status & NAND_STATUS_FAIL)
143 +        return false;
144 +    else
145 +        return true;
146 +}
147 +
148 +int mark_block_bad_bmt(u32 offset)
149 +{
150 +    return mtk_nand_block_markbad_hw(mtd_bmt, offset);   //mark_block_bad_hw(offset);
151 +}
152 +
153 +bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob)
154 +{
155 +    if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
156 +        return false;
157 +    else
158 +        return true;
159 +}
160 +
161 +/***************************************************************
162 +*                                                              *
163 +* static internal function                                     *
164 +*                                                              *
165 +***************************************************************/
166 +static void dump_bmt_info(bmt_struct * bmt)
167 +{
168 +    int i;
169 +
170 +    MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
171 +    for (i = 0; i < bmt->mapped_count; i++)
172 +    {
173 +        MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
174 +    }
175 +}
176 +
177 +static bool match_bmt_signature(u8 * dat, u8 * oob)
178 +{
179 +
180 +    if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
181 +    {
182 +        return false;
183 +    }
184 +
185 +    if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
186 +    {
187 +        MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
188 +    }
189 +    return true;
190 +}
191 +
192 +static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size)
193 +{
194 +    int i;
195 +    u8 checksum = 0;
196 +    u8 *dat = (u8 *) phys_table;
197 +
198 +    checksum += phys_table->header.version;
199 +    checksum += phys_table->header.mapped_count;
200 +
201 +    dat += sizeof(phys_bmt_header);
202 +    for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
203 +    {
204 +        checksum += dat[i];
205 +    }
206 +
207 +    return checksum;
208 +}
209 +
210 +
211 +static int is_block_mapped(int index)
212 +{
213 +    int i;
214 +    for (i = 0; i < bmt.mapped_count; i++)
215 +    {
216 +        if (index == bmt.table[i].mapped_index)
217 +            return i;
218 +    }
219 +    return -1;
220 +}
221 +
222 +static bool is_page_used(u8 * dat, u8 * oob)
223 +{
224 +    return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
225 +}
226 +
227 +static bool valid_bmt_data(phys_bmt_struct * phys_table)
228 +{
229 +    int i;
230 +    u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
231 +
232 +    // checksum correct?
233 +    if (phys_table->header.checksum != checksum)
234 +    {
235 +        MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum);
236 +        return false;
237 +    }
238 +
239 +    MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
240 +
241 +    // block index correct?
242 +    for (i = 0; i < phys_table->header.mapped_count; i++)
243 +    {
244 +        if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count)
245 +        {
246 +            MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
247 +            return false;
248 +        }
249 +    }
250 +
251 +    // pass check, valid bmt.
252 +    MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
253 +    return true;
254 +}
255 +
256 +static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob)
257 +{
258 +    phys_bmt_struct phys_bmt;
259 +
260 +    dump_bmt_info(bmt);
261 +
262 +    // fill phys_bmt_struct structure with bmt_struct
263 +    memset(&phys_bmt, 0xFF, sizeof(phys_bmt));
264 +
265 +    memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
266 +    phys_bmt.header.version = BMT_VERSION;
267 +    // phys_bmt.header.bad_count = bmt->bad_count;
268 +    phys_bmt.header.mapped_count = bmt->mapped_count;
269 +    memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
270 +
271 +    phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count);
272 +
273 +    memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt));
274 +    memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
275 +}
276 +
277 +// return valid index if found BMT, else return 0
278 +static int load_bmt_data(int start, int pool_size)
279 +{
280 +    int bmt_index = start + pool_size - 1;  // find from the end
281 +    phys_bmt_struct phys_table;
282 +    int i;
283 +
284 +    MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index);
285 +
286 +    for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
287 +    {
288 +        if (nand_block_bad_bmt(OFFSET(bmt_index)))
289 +        {
290 +            MSG(INIT, "Skip bad block: %d\n", bmt_index);
291 +            continue;
292 +        }
293 +
294 +        if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
295 +        {
296 +            MSG(INIT, "Error found when read block %d\n", bmt_index);
297 +            continue;
298 +        }
299 +
300 +        if (!match_bmt_signature(dat_buf, oob_buf))
301 +        {
302 +            continue;
303 +        }
304 +
305 +        MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index);
306 +
307 +        memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table));
308 +
309 +        if (!valid_bmt_data(&phys_table))
310 +        {
311 +            MSG(INIT, "BMT data is not correct %d\n", bmt_index);
312 +            continue;
313 +        } else
314 +        {
315 +            bmt.mapped_count = phys_table.header.mapped_count;
316 +            bmt.version = phys_table.header.version;
317 +            // bmt.bad_count = phys_table.header.bad_count;
318 +            memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry));
319 +
320 +            MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
321 +
322 +            for (i = 0; i < bmt.mapped_count; i++)
323 +            {
324 +                if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index)))
325 +                {
326 +                    MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
327 +                    mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
328 +                }
329 +            }
330 +
331 +            return bmt_index;
332 +        }
333 +    }
334 +
335 +    MSG(INIT, "bmt block not found!\n");
336 +    return 0;
337 +}
338 +
339 +/*************************************************************************
340 +* Find an available block and erase.                                     *
341 +* start_from_end: if true, find available block from end of flash.       *
342 +*                 else, find from the beginning of the pool              *
343 +* need_erase: if true, all unmapped blocks in the pool will be erased    *
344 +*************************************************************************/
345 +static int find_available_block(bool start_from_end)
346 +{
347 +    int i;                      // , j;
348 +    int block = system_block_count;
349 +    int direction;
350 +    // int avail_index = 0;
351 +    MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
352 +
353 +    // erase all un-mapped blocks in pool when finding avaliable block
354 +    if (!pool_erased)
355 +    {
356 +        MSG(INIT, "Erase all un-mapped blocks in pool\n");
357 +        for (i = 0; i < bmt_block_count; i++)
358 +        {
359 +            if (block == bmt_block_index)
360 +            {
361 +                MSG(INIT, "Skip bmt block 0x%x\n", block);
362 +                continue;
363 +            }
364 +
365 +            if (nand_block_bad_bmt(OFFSET(block + i)))
366 +            {
367 +                MSG(INIT, "Skip bad block 0x%x\n", block + i);
368 +                continue;
369 +            }
370 +//if(block==4095)
371 +//{
372 +//  continue;
373 +//}
374 +
375 +            if (is_block_mapped(block + i) >= 0)
376 +            {
377 +                MSG(INIT, "Skip mapped block 0x%x\n", block + i);
378 +                continue;
379 +            }
380 +
381 +            if (!nand_erase_bmt(OFFSET(block + i)))
382 +            {
383 +                MSG(INIT, "Erase block 0x%x failed\n", block + i);
384 +                mark_block_bad_bmt(OFFSET(block + i));
385 +            }
386 +        }
387 +
388 +        pool_erased = 1;
389 +    }
390 +
391 +    if (start_from_end)
392 +    {
393 +        block = total_block_count - 1;
394 +        direction = -1;
395 +    } else
396 +    {
397 +        block = system_block_count;
398 +        direction = 1;
399 +    }
400 +
401 +    for (i = 0; i < bmt_block_count; i++, block += direction)
402 +    {
403 +        if (block == bmt_block_index)
404 +        {
405 +            MSG(INIT, "Skip bmt block 0x%x\n", block);
406 +            continue;
407 +        }
408 +
409 +        if (nand_block_bad_bmt(OFFSET(block)))
410 +        {
411 +            MSG(INIT, "Skip bad block 0x%x\n", block);
412 +            continue;
413 +        }
414 +
415 +        if (is_block_mapped(block) >= 0)
416 +        {
417 +            MSG(INIT, "Skip mapped block 0x%x\n", block);
418 +            continue;
419 +        }
420 +
421 +        MSG(INIT, "Find block 0x%x available\n", block);
422 +        return block;
423 +    }
424 +
425 +    return 0;
426 +}
427 +
428 +static unsigned short get_bad_index_from_oob(u8 * oob_buf)
429 +{
430 +    unsigned short index;
431 +    memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
432 +
433 +    return index;
434 +}
435 +
436 +void set_bad_index_to_oob(u8 * oob, u16 index)
437 +{
438 +    memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
439 +}
440 +
441 +static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob)
442 +{
443 +    int page;
444 +    int error_block = offset / BLOCK_SIZE_BMT;
445 +    int error_page = (offset / PAGE_SIZE_BMT) % page_per_block;
446 +    int to_index;
447 +
448 +    memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
449 +
450 +    to_index = find_available_block(false);
451 +
452 +    if (!to_index)
453 +    {
454 +        MSG(INIT, "Cannot find an available block for BMT\n");
455 +        return 0;
456 +    }
457 +
458 +    {                           // migrate error page first
459 +        MSG(INIT, "Write error page: 0x%x\n", error_page);
460 +        if (!write_dat)
461 +        {
462 +            nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
463 +            write_dat = dat_buf;
464 +        }
465 +        // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
466 +
467 +        if (error_block < system_block_count)
468 +            set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
469 +
470 +        if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
471 +        {
472 +            MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
473 +            mark_block_bad_bmt(to_index);
474 +            return migrate_from_bad(offset, write_dat, write_oob);
475 +        }
476 +    }
477 +
478 +    for (page = 0; page < page_per_block; page++)
479 +    {
480 +        if (page != error_page)
481 +        {
482 +            nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
483 +            if (is_page_used(dat_buf, oob_buf))
484 +            {
485 +                if (error_block < system_block_count)
486 +                {
487 +                    set_bad_index_to_oob(oob_buf, error_block);
488 +                }
489 +                MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
490 +                if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
491 +                {
492 +                    MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page);
493 +                    mark_block_bad_bmt(to_index);
494 +                    return migrate_from_bad(offset, write_dat, write_oob);
495 +                }
496 +            }
497 +        }
498 +    }
499 +
500 +    MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
501 +
502 +    return to_index;
503 +}
504 +
505 +static bool write_bmt_to_flash(u8 * dat, u8 * oob)
506 +{
507 +    bool need_erase = true;
508 +    MSG(INIT, "Try to write BMT\n");
509 +
510 +    if (bmt_block_index == 0)
511 +    {
512 +        // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
513 +        need_erase = false;
514 +        if (!(bmt_block_index = find_available_block(true)))
515 +        {
516 +            MSG(INIT, "Cannot find an available block for BMT\n");
517 +            return false;
518 +        }
519 +    }
520 +
521 +    MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
522 +
523 +    // write bmt to flash
524 +    if (need_erase)
525 +    {
526 +        if (!nand_erase_bmt(OFFSET(bmt_block_index)))
527 +        {
528 +            MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
529 +            mark_block_bad_bmt(OFFSET(bmt_block_index));
530 +            // bmt.bad_count++;
531 +
532 +            bmt_block_index = 0;
533 +            return write_bmt_to_flash(dat, oob);    // recursive call 
534 +        }
535 +    }
536 +
537 +    if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob))
538 +    {
539 +        MSG(INIT, "Write BMT data fail, need to write again\n");
540 +        mark_block_bad_bmt(OFFSET(bmt_block_index));
541 +        // bmt.bad_count++;
542 +
543 +        bmt_block_index = 0;
544 +        return write_bmt_to_flash(dat, oob);    // recursive call 
545 +    }
546 +
547 +    MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
548 +    return true;
549 +}
550 +
551 +/*******************************************************************
552 +* Reconstruct bmt, called when found bmt info doesn't match bad 
553 +* block info in flash.
554 +* 
555 +* Return NULL for failure
556 +*******************************************************************/
557 +bmt_struct *reconstruct_bmt(bmt_struct * bmt)
558 +{
559 +    int i;
560 +    int index = system_block_count;
561 +    unsigned short bad_index;
562 +    int mapped;
563 +
564 +    // init everything in BMT struct 
565 +    bmt->version = BMT_VERSION;
566 +    bmt->bad_count = 0;
567 +    bmt->mapped_count = 0;
568 +
569 +    memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
570 +
571 +    for (i = 0; i < bmt_block_count; i++, index++)
572 +    {
573 +        if (nand_block_bad_bmt(OFFSET(index)))
574 +        {
575 +            MSG(INIT, "Skip bad block: 0x%x\n", index);
576 +            // bmt->bad_count++;
577 +            continue;
578 +        }
579 +
580 +        MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
581 +        nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
582 +        /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
583 +           {
584 +           MSG(INIT,  "Error when read block %d\n", bmt_block_index);
585 +           continue;
586 +           } */
587 +
588 +        if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
589 +        {
590 +            MSG(INIT, "get bad index: 0x%x\n", bad_index);
591 +            if (bad_index != 0xFFFF)
592 +                MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index);
593 +            continue;
594 +        }
595 +
596 +        MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
597 +
598 +        if (!nand_block_bad_bmt(OFFSET(bad_index)))
599 +        {
600 +            MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index);
601 +            continue;           // no need to erase here, it will be erased later when trying to write BMT
602 +        }
603 +
604 +        if ((mapped = is_block_mapped(bad_index)) >= 0)
605 +        {
606 +            MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
607 +            bmt->table[mapped].mapped_index = index;    // use new one instead.
608 +        } else
609 +        {
610 +            // add mapping to BMT
611 +            bmt->table[bmt->mapped_count].bad_index = bad_index;
612 +            bmt->table[bmt->mapped_count].mapped_index = index;
613 +            bmt->mapped_count++;
614 +        }
615 +
616 +        MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
617 +
618 +    }
619 +
620 +    MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
621 +    // dump_bmt_info(bmt);
622 +
623 +    // fill NAND BMT buffer
624 +    memset(oob_buf, 0xFF, sizeof(oob_buf));
625 +    fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
626 +
627 +    // write BMT back
628 +    if (!write_bmt_to_flash(dat_buf, oob_buf))
629 +    {
630 +        MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
631 +    }
632 +
633 +    return bmt;
634 +}
635 +
636 +/*******************************************************************
637 +* [BMT Interface]
638 +*
639 +* Description:
640 +*   Init bmt from nand. Reconstruct if not found or data error
641 +*
642 +* Parameter:
643 +*   size: size of bmt and replace pool
644 +* 
645 +* Return: 
646 +*   NULL for failure, and a bmt struct for success
647 +*******************************************************************/
648 +bmt_struct *init_bmt(struct nand_chip * chip, int size)
649 +{
650 +    struct mtk_nand_host *host;
651 +
652 +    if (size > 0 && size < MAX_BMT_SIZE)
653 +    {
654 +        MSG(INIT, "Init bmt table, size: %d\n", size);
655 +        bmt_block_count = size;
656 +    } else
657 +    {
658 +        MSG(INIT, "Invalid bmt table size: %d\n", size);
659 +        return NULL;
660 +    }
661 +    nand_chip_bmt = chip;
662 +    system_block_count = chip->chipsize >> chip->phys_erase_shift;
663 +    total_block_count = bmt_block_count + system_block_count;
664 +    page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
665 +    host = (struct mtk_nand_host *)chip->priv;
666 +    mtd_bmt = host->mtd;
667 +
668 +    MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
669 +    MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
670 +
671 +    // set this flag, and unmapped block in pool will be erased.
672 +    pool_erased = 0;
673 +    memset(bmt.table, 0, size * sizeof(bmt_entry));
674 +    if ((bmt_block_index = load_bmt_data(system_block_count, size)))
675 +    {
676 +        MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
677 +        dump_bmt_info(&bmt);
678 +        return &bmt;
679 +    } else
680 +    {
681 +        MSG(INIT, "Load bmt data fail, need re-construct!\n");
682 +#ifndef __UBOOT_NAND__            // BMT is not re-constructed in UBOOT.
683 +        if (reconstruct_bmt(&bmt))
684 +            return &bmt;
685 +        else
686 +#endif
687 +            return NULL;
688 +    }
689 +}
690 +
691 +/*******************************************************************
692 +* [BMT Interface]
693 +*
694 +* Description:
695 +*   Update BMT.
696 +*
697 +* Parameter:
698 +*   offset: update block/page offset.
699 +*   reason: update reason, see update_reason_t for reason.
700 +*   dat/oob: data and oob buffer for write fail.
701 +* 
702 +* Return: 
703 +*   Return true for success, and false for failure.
704 +*******************************************************************/
705 +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob)
706 +{
707 +    int map_index;
708 +    int orig_bad_block = -1;
709 +    // int bmt_update_index;
710 +    int i;
711 +    int bad_index = offset / BLOCK_SIZE_BMT;
712 +
713 +#ifndef MTK_NAND_BMT
714 +       return false;
715 +#endif
716 +    if (reason == UPDATE_WRITE_FAIL)
717 +    {
718 +        MSG(INIT, "Write fail, need to migrate\n");
719 +        if (!(map_index = migrate_from_bad(offset, dat, oob)))
720 +        {
721 +            MSG(INIT, "migrate fail\n");
722 +            return false;
723 +        }
724 +    } else
725 +    {
726 +        if (!(map_index = find_available_block(false)))
727 +        {
728 +            MSG(INIT, "Cannot find block in pool\n");
729 +            return false;
730 +        }
731 +    }
732 +
733 +    // now let's update BMT
734 +    if (bad_index >= system_block_count)    // mapped block become bad, find original bad block
735 +    {
736 +        for (i = 0; i < bmt_block_count; i++)
737 +        {
738 +            if (bmt.table[i].mapped_index == bad_index)
739 +            {
740 +                orig_bad_block = bmt.table[i].bad_index;
741 +                break;
742 +            }
743 +        }
744 +        // bmt.bad_count++;
745 +        MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
746 +
747 +        bmt.table[i].mapped_index = map_index;
748 +    } else
749 +    {
750 +        bmt.table[bmt.mapped_count].mapped_index = map_index;
751 +        bmt.table[bmt.mapped_count].bad_index = bad_index;
752 +        bmt.mapped_count++;
753 +    }
754 +
755 +    memset(oob_buf, 0xFF, sizeof(oob_buf));
756 +    fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
757 +    if (!write_bmt_to_flash(dat_buf, oob_buf))
758 +        return false;
759 +
760 +    mark_block_bad_bmt(offset);
761 +
762 +    return true;
763 +}
764 +
765 +/*******************************************************************
766 +* [BMT Interface]
767 +*
768 +* Description:
769 +*   Given an block index, return mapped index if it's mapped, else 
770 +*   return given index.
771 +*
772 +* Parameter:
773 +*   index: given an block index. This value cannot exceed 
774 +*   system_block_count.
775 +*
776 +* Return NULL for failure
777 +*******************************************************************/
778 +u16 get_mapping_block_index(int index)
779 +{
780 +    int i;
781 +#ifndef MTK_NAND_BMT
782 +       return index;
783 +#endif
784 +    if (index > system_block_count)
785 +    {
786 +        return index;
787 +    }
788 +
789 +    for (i = 0; i < bmt.mapped_count; i++)
790 +    {
791 +        if (bmt.table[i].bad_index == index)
792 +        {
793 +            return bmt.table[i].mapped_index;
794 +        }
795 +    }
796 +
797 +    return index;
798 +}
799 +#ifdef __KERNEL_NAND__
800 +EXPORT_SYMBOL_GPL(init_bmt);
801 +EXPORT_SYMBOL_GPL(update_bmt);
802 +EXPORT_SYMBOL_GPL(get_mapping_block_index);
803 +
804 +MODULE_LICENSE("GPL");
805 +MODULE_AUTHOR("MediaTek");
806 +MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");
807 +#endif
808 --- /dev/null
809 +++ b/drivers/mtd/nand/bmt.h
810 @@ -0,0 +1,80 @@
811 +#ifndef __BMT_H__
812 +#define __BMT_H__
813 +
814 +#include "nand_def.h"
815 +
816 +#if defined(__PRELOADER_NAND__)
817 +
818 +#include "nand.h"
819 +
820 +#elif defined(__UBOOT_NAND__)
821 +
822 +#include <linux/mtd/nand.h>
823 +#include "mtk_nand2.h"
824 +
825 +#elif defined(__KERNEL_NAND__)
826 +
827 +#include <linux/mtd/mtd.h>
828 +#include <linux/mtd/rawnand.h>
829 +#include <linux/module.h>
830 +#include "mtk_nand2.h"
831 +
832 +#endif
833 +
834 +
835 +#define MAX_BMT_SIZE        (0x80)
836 +#define BMT_VERSION         (1) // initial version
837 +
838 +#define MAIN_SIGNATURE_OFFSET   (0)
839 +#define OOB_SIGNATURE_OFFSET    (1)
840 +#define OOB_INDEX_OFFSET        (29)
841 +#define OOB_INDEX_SIZE          (2)
842 +#define FAKE_INDEX              (0xAAAA)
843 +
844 +typedef struct _bmt_entry_
845 +{
846 +    u16 bad_index;              // bad block index
847 +    u16 mapped_index;           // mapping block index in the replace pool
848 +} bmt_entry;
849 +
850 +typedef enum
851 +{
852 +    UPDATE_ERASE_FAIL,
853 +    UPDATE_WRITE_FAIL,
854 +    UPDATE_UNMAPPED_BLOCK,
855 +    UPDATE_REASON_COUNT,
856 +} update_reason_t;
857 +
858 +typedef struct
859 +{
860 +    bmt_entry table[MAX_BMT_SIZE];
861 +    u8 version;
862 +    u8 mapped_count;            // mapped block count in pool
863 +    u8 bad_count;               // bad block count in pool. Not used in V1
864 +} bmt_struct;
865 +
866 +/***************************************************************
867 +*                                                              *
868 +* Interface BMT need to use                                    *
869 +*                                                              *
870 +***************************************************************/
871 +extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
872 +extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs);
873 +extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page);
874 +extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs);
875 +extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
876 +
877 +
878 +/***************************************************************
879 +*                                                              *
880 +* Different function interface for preloader/uboot/kernel      *
881 +*                                                              *
882 +***************************************************************/
883 +void set_bad_index_to_oob(u8 * oob, u16 index);
884 +
885 +
886 +bmt_struct *init_bmt(struct nand_chip *nand, int size);
887 +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob);
888 +unsigned short get_mapping_block_index(int index);
889 +
890 +#endif                          // #ifndef __BMT_H__
891 --- /dev/null
892 +++ b/drivers/mtd/nand/dev-nand.c
893 @@ -0,0 +1,63 @@
894 +#include <linux/init.h>
895 +#include <linux/kernel.h>
896 +#include <linux/platform_device.h>
897 +
898 +#include "mt6575_typedefs.h"
899 +
900 +#define RALINK_NAND_CTRL_BASE               0xBE003000
901 +#define NFI_base    RALINK_NAND_CTRL_BASE
902 +#define RALINK_NANDECC_CTRL_BASE    0xBE003800
903 +#define NFIECC_base RALINK_NANDECC_CTRL_BASE
904 +#define MT7621_NFI_IRQ_ID              SURFBOARDINT_NAND
905 +#define MT7621_NFIECC_IRQ_ID   SURFBOARDINT_NAND_ECC
906 +
907 +#define SURFBOARDINT_NAND 22
908 +#define SURFBOARDINT_NAND_ECC 23
909 +
910 +static struct resource MT7621_resource_nand[] = {
911 +        {
912 +                .start          = NFI_base,
913 +                .end            = NFI_base + 0x1A0,
914 +                .flags          = IORESOURCE_MEM,
915 +        },
916 +        {
917 +                .start          = NFIECC_base,
918 +                .end            = NFIECC_base + 0x150,
919 +                .flags          = IORESOURCE_MEM,
920 +        },
921 +        {
922 +                .start          = MT7621_NFI_IRQ_ID,
923 +                .flags          = IORESOURCE_IRQ,
924 +        },
925 +        {
926 +                .start          = MT7621_NFIECC_IRQ_ID,
927 +                .flags          = IORESOURCE_IRQ,
928 +        },
929 +};
930 +
931 +static struct platform_device MT7621_nand_dev = {
932 +    .name = "MT7621-NAND",
933 +    .id   = 0,
934 +        .num_resources  = ARRAY_SIZE(MT7621_resource_nand),
935 +        .resource               = MT7621_resource_nand,
936 +    .dev            = {
937 +        .platform_data = &mt7621_nand_hw,
938 +    },
939 +};
940 +
941 +
942 +int __init mtk_nand_register(void)
943 +{
944 +
945 +       int retval = 0;
946 +
947 +       retval = platform_device_register(&MT7621_nand_dev);
948 +       if (retval != 0) {
949 +               printk(KERN_ERR "register nand device fail\n");
950 +               return retval;
951 +       }
952 +
953 +
954 +       return retval;
955 +}
956 +arch_initcall(mtk_nand_register);
957 --- /dev/null
958 +++ b/drivers/mtd/nand/mt6575_typedefs.h
959 @@ -0,0 +1,340 @@
960 +/* Copyright Statement:
961 + *
962 + * This software/firmware and related documentation ("MediaTek Software") are
963 + * protected under relevant copyright laws. The information contained herein
964 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
965 + * Without the prior written permission of MediaTek inc. and/or its licensors,
966 + * any reproduction, modification, use or disclosure of MediaTek Software,
967 + * and information contained herein, in whole or in part, shall be strictly prohibited.
968 + */
969 +/* MediaTek Inc. (C) 2010. All rights reserved.
970 + *
971 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
972 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
973 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
974 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
975 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
976 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
977 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
978 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
979 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
980 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
981 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
982 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
983 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
984 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
985 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
986 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
987 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
988 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
989 + *
990 + * The following software/firmware and/or related documentation ("MediaTek Software")
991 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
992 + * applicable license agreements with MediaTek Inc.
993 + */
994 +
995 +/*****************************************************************************
996 +*  Copyright Statement:
997 +*  --------------------
998 +*  This software is protected by Copyright and the information contained
999 +*  herein is confidential. The software may not be copied and the information
1000 +*  contained herein may not be used or disclosed except with the written
1001 +*  permission of MediaTek Inc. (C) 2008
1002 +*
1003 +*  BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
1004 +*  THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
1005 +*  RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
1006 +*  AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
1007 +*  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
1008 +*  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
1009 +*  NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
1010 +*  SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
1011 +*  SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
1012 +*  THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
1013 +*  NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
1014 +*  SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
1015 +*
1016 +*  BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
1017 +*  LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
1018 +*  AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
1019 +*  OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
1020 +*  MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
1021 +*
1022 +*  THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
1023 +*  WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
1024 +*  LAWS PRINCIPLES.  ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
1025 +*  RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
1026 +*  THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
1027 +*
1028 +*****************************************************************************/
1029 +
1030 +#ifndef _MT6575_TYPEDEFS_H
1031 +#define _MT6575_TYPEDEFS_H
1032 +
1033 +#if defined (__KERNEL_NAND__)
1034 +#include <linux/bug.h>
1035 +#else
1036 +#define true           1 
1037 +#define false          0  
1038 +#define bool           u8
1039 +#endif
1040 +
1041 +// ---------------------------------------------------------------------------
1042 +//  Basic Type Definitions
1043 +// ---------------------------------------------------------------------------
1044 +
1045 +typedef volatile unsigned char  *P_kal_uint8;
1046 +typedef volatile unsigned short *P_kal_uint16;
1047 +typedef volatile unsigned int   *P_kal_uint32;
1048 +
1049 +typedef long            LONG;
1050 +typedef unsigned char   UBYTE;
1051 +typedef short           SHORT;
1052 +
1053 +typedef signed char     kal_int8;
1054 +typedef signed short    kal_int16;
1055 +typedef signed int      kal_int32;
1056 +typedef long long       kal_int64;
1057 +typedef unsigned char   kal_uint8;
1058 +typedef unsigned short  kal_uint16;
1059 +typedef unsigned int    kal_uint32;
1060 +typedef unsigned long long  kal_uint64;
1061 +typedef char            kal_char;
1062 +
1063 +typedef unsigned int            *UINT32P;
1064 +typedef volatile unsigned short *UINT16P;
1065 +typedef volatile unsigned char  *UINT8P;
1066 +typedef unsigned char           *U8P;
1067 +
1068 +typedef volatile unsigned char  *P_U8;
1069 +typedef volatile signed char    *P_S8;
1070 +typedef volatile unsigned short *P_U16;
1071 +typedef volatile signed short   *P_S16;
1072 +typedef volatile unsigned int   *P_U32;
1073 +typedef volatile signed int     *P_S32;
1074 +typedef unsigned long long      *P_U64;
1075 +typedef signed long long        *P_S64;
1076 +
1077 +typedef unsigned char       U8;
1078 +typedef signed char         S8;
1079 +typedef unsigned short      U16;
1080 +typedef signed short        S16;
1081 +typedef unsigned int        U32;
1082 +typedef signed int          S32;
1083 +typedef unsigned long long  U64;
1084 +typedef signed long long    S64;
1085 +//typedef unsigned char       bool;
1086 +
1087 +typedef unsigned char   UINT8;
1088 +typedef unsigned short  UINT16;
1089 +typedef unsigned int    UINT32;
1090 +typedef unsigned short  USHORT;
1091 +typedef signed char     INT8;
1092 +typedef signed short    INT16;
1093 +typedef signed int      INT32;
1094 +typedef unsigned int    DWORD;
1095 +typedef void            VOID;
1096 +typedef unsigned char   BYTE;
1097 +typedef float           FLOAT;
1098 +
1099 +typedef char           *LPCSTR;
1100 +typedef short          *LPWSTR;
1101 +
1102 +
1103 +// ---------------------------------------------------------------------------
1104 +//  Constants
1105 +// ---------------------------------------------------------------------------
1106 +
1107 +#define IMPORT  EXTERN
1108 +#ifndef __cplusplus
1109 +  #define EXTERN  extern
1110 +#else
1111 +  #define EXTERN  extern "C"
1112 +#endif
1113 +#define LOCAL     static
1114 +#define GLOBAL
1115 +#define EXPORT    GLOBAL
1116 +
1117 +#define EQ        ==
1118 +#define NEQ       !=
1119 +#define AND       &&
1120 +#define OR        ||
1121 +#define XOR(A,B)  ((!(A) AND (B)) OR ((A) AND !(B)))
1122 +
1123 +#ifndef FALSE
1124 +  #define FALSE (0)
1125 +#endif
1126 +
1127 +#ifndef TRUE
1128 +  #define TRUE  (1)
1129 +#endif
1130 +
1131 +#ifndef NULL
1132 +  #define NULL  (0)
1133 +#endif
1134 +
1135 +//enum boolean {false, true};
1136 +enum {RX, TX, NONE};
1137 +
1138 +#ifndef BOOL
1139 +typedef unsigned char  BOOL;
1140 +#endif
1141 +
1142 +typedef enum {
1143 +   KAL_FALSE = 0,
1144 +   KAL_TRUE  = 1,
1145 +} kal_bool;
1146 +
1147 +
1148 +// ---------------------------------------------------------------------------
1149 +//  Type Casting
1150 +// ---------------------------------------------------------------------------
1151 +
1152 +#define AS_INT32(x)     (*(INT32 *)((void*)x))
1153 +#define AS_INT16(x)     (*(INT16 *)((void*)x))
1154 +#define AS_INT8(x)      (*(INT8  *)((void*)x))
1155 +
1156 +#define AS_UINT32(x)    (*(UINT32 *)((void*)x))
1157 +#define AS_UINT16(x)    (*(UINT16 *)((void*)x))
1158 +#define AS_UINT8(x)     (*(UINT8  *)((void*)x))
1159 +
1160 +
1161 +// ---------------------------------------------------------------------------
1162 +//  Register Manipulations
1163 +// ---------------------------------------------------------------------------
1164 +
1165 +#define READ_REGISTER_UINT32(reg) \
1166 +    (*(volatile UINT32 * const)(reg))
1167 +
1168 +#define WRITE_REGISTER_UINT32(reg, val) \
1169 +    (*(volatile UINT32 * const)(reg)) = (val)
1170 +
1171 +#define READ_REGISTER_UINT16(reg) \
1172 +    (*(volatile UINT16 * const)(reg))
1173 +
1174 +#define WRITE_REGISTER_UINT16(reg, val) \
1175 +    (*(volatile UINT16 * const)(reg)) = (val)
1176 +
1177 +#define READ_REGISTER_UINT8(reg) \
1178 +    (*(volatile UINT8 * const)(reg))
1179 +
1180 +#define WRITE_REGISTER_UINT8(reg, val) \
1181 +    (*(volatile UINT8 * const)(reg)) = (val)
1182 +
1183 +#define INREG8(x)           READ_REGISTER_UINT8((UINT8*)((void*)(x)))
1184 +#define OUTREG8(x, y)       WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y))
1185 +#define SETREG8(x, y)       OUTREG8(x, INREG8(x)|(y))
1186 +#define CLRREG8(x, y)       OUTREG8(x, INREG8(x)&~(y))
1187 +#define MASKREG8(x, y, z)   OUTREG8(x, (INREG8(x)&~(y))|(z))
1188 +
1189 +#define INREG16(x)          READ_REGISTER_UINT16((UINT16*)((void*)(x)))
1190 +#define OUTREG16(x, y)      WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y))
1191 +#define SETREG16(x, y)      OUTREG16(x, INREG16(x)|(y))
1192 +#define CLRREG16(x, y)      OUTREG16(x, INREG16(x)&~(y))
1193 +#define MASKREG16(x, y, z)  OUTREG16(x, (INREG16(x)&~(y))|(z))
1194 +
1195 +#define INREG32(x)          READ_REGISTER_UINT32((UINT32*)((void*)(x)))
1196 +#define OUTREG32(x, y)      WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y))
1197 +#define SETREG32(x, y)      OUTREG32(x, INREG32(x)|(y))
1198 +#define CLRREG32(x, y)      OUTREG32(x, INREG32(x)&~(y))
1199 +#define MASKREG32(x, y, z)  OUTREG32(x, (INREG32(x)&~(y))|(z))
1200 +
1201 +
1202 +#define DRV_Reg8(addr)              INREG8(addr)
1203 +#define DRV_WriteReg8(addr, data)   OUTREG8(addr, data)
1204 +#define DRV_SetReg8(addr, data)     SETREG8(addr, data)
1205 +#define DRV_ClrReg8(addr, data)     CLRREG8(addr, data)
1206 +
1207 +#define DRV_Reg16(addr)             INREG16(addr)
1208 +#define DRV_WriteReg16(addr, data)  OUTREG16(addr, data)
1209 +#define DRV_SetReg16(addr, data)    SETREG16(addr, data)
1210 +#define DRV_ClrReg16(addr, data)    CLRREG16(addr, data)
1211 +
1212 +#define DRV_Reg32(addr)             INREG32(addr)
1213 +#define DRV_WriteReg32(addr, data)  OUTREG32(addr, data)
1214 +#define DRV_SetReg32(addr, data)    SETREG32(addr, data)
1215 +#define DRV_ClrReg32(addr, data)    CLRREG32(addr, data)
1216 +
1217 +// !!! DEPRECATED, WILL BE REMOVED LATER !!!
1218 +#define DRV_Reg(addr)               DRV_Reg16(addr)
1219 +#define DRV_WriteReg(addr, data)    DRV_WriteReg16(addr, data)
1220 +#define DRV_SetReg(addr, data)      DRV_SetReg16(addr, data)
1221 +#define DRV_ClrReg(addr, data)      DRV_ClrReg16(addr, data)
1222 +
1223 +
1224 +// ---------------------------------------------------------------------------
1225 +//  Compiler Time Deduction Macros
1226 +// ---------------------------------------------------------------------------
1227 +
1228 +#define _MASK_OFFSET_1(x, n)  ((x) & 0x1) ? (n) :
1229 +#define _MASK_OFFSET_2(x, n)  _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1)
1230 +#define _MASK_OFFSET_4(x, n)  _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2)
1231 +#define _MASK_OFFSET_8(x, n)  _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4)
1232 +#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8)
1233 +#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16)
1234 +
1235 +#define MASK_OFFSET_ERROR (0xFFFFFFFF)
1236 +
1237 +#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR)
1238 +
1239 +
1240 +// ---------------------------------------------------------------------------
1241 +//  Assertions
1242 +// ---------------------------------------------------------------------------
1243 +
1244 +#ifndef ASSERT
1245 +    #define ASSERT(expr)        BUG_ON(!(expr))
1246 +#endif
1247 +
1248 +#ifndef NOT_IMPLEMENTED
1249 +    #define NOT_IMPLEMENTED()   BUG_ON(1)
1250 +#endif    
1251 +
1252 +#define STATIC_ASSERT(pred)         STATIC_ASSERT_X(pred, __LINE__)
1253 +#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line)
1254 +#define STATIC_ASSERT_XX(pred, line) \
1255 +    extern char assertion_failed_at_##line[(pred) ? 1 : -1]
1256 +
1257 +// ---------------------------------------------------------------------------
1258 +//  Resolve Compiler Warnings
1259 +// ---------------------------------------------------------------------------
1260 +
1261 +#define NOT_REFERENCED(x)   { (x) = (x); }
1262 +
1263 +
1264 +// ---------------------------------------------------------------------------
1265 +//  Utilities
1266 +// ---------------------------------------------------------------------------
1267 +
1268 +#define MAXIMUM(A,B)       (((A)>(B))?(A):(B))
1269 +#define MINIMUM(A,B)       (((A)<(B))?(A):(B))
1270 +
1271 +#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0])))
1272 +#define DVT_DELAYMACRO(u4Num)                                            \
1273 +{                                                                        \
1274 +    UINT32 u4Count = 0 ;                                                 \
1275 +    for (u4Count = 0; u4Count < u4Num; u4Count++ );                      \
1276 +}                                                                        \
1277 +
1278 +#define    A68351B      0
1279 +#define    B68351B      1
1280 +#define    B68351D      2
1281 +#define    B68351E      3
1282 +#define    UNKNOWN_IC_VERSION   0xFF
1283 +
1284 +/* NAND driver */
1285 +struct mtk_nand_host_hw {
1286 +    unsigned int nfi_bus_width;                    /* NFI_BUS_WIDTH */ 
1287 +       unsigned int nfi_access_timing;         /* NFI_ACCESS_TIMING */  
1288 +       unsigned int nfi_cs_num;                        /* NFI_CS_NUM */
1289 +       unsigned int nand_sec_size;                     /* NAND_SECTOR_SIZE */
1290 +       unsigned int nand_sec_shift;            /* NAND_SECTOR_SHIFT */
1291 +       unsigned int nand_ecc_size;
1292 +       unsigned int nand_ecc_bytes;
1293 +       unsigned int nand_ecc_mode;
1294 +};
1295 +extern struct mtk_nand_host_hw mt7621_nand_hw;
1296 +extern unsigned int    CFG_BLOCKSIZE;
1297 +
1298 +#endif  // _MT6575_TYPEDEFS_H
1299 +
1300 --- /dev/null
1301 +++ b/drivers/mtd/nand/mtk_nand2.c
1302 @@ -0,0 +1,2363 @@
1303 +/******************************************************************************
1304 +* mtk_nand2.c - MTK NAND Flash Device Driver
1305 + *
1306 +* Copyright 2009-2012 MediaTek Co.,Ltd.
1307 + *
1308 +* DESCRIPTION:
1309 +*      This file provid the other drivers nand relative functions
1310 + *
1311 +* modification history
1312 +* ----------------------------------------
1313 +* v3.0, 11 Feb 2010, mtk
1314 +* ----------------------------------------
1315 +******************************************************************************/
1316 +#include "nand_def.h"
1317 +#include <linux/slab.h>
1318 +#include <linux/init.h>
1319 +#include <linux/module.h>
1320 +#include <linux/delay.h>
1321 +#include <linux/errno.h>
1322 +#include <linux/sched.h>
1323 +#include <linux/types.h>
1324 +#include <linux/wait.h>
1325 +#include <linux/spinlock.h>
1326 +#include <linux/interrupt.h>
1327 +#include <linux/mtd/mtd.h>
1328 +#include <linux/mtd/rawnand.h>
1329 +#include <linux/mtd/partitions.h>
1330 +#include <linux/mtd/nand_ecc.h>
1331 +#include <linux/dma-mapping.h>
1332 +#include <linux/jiffies.h>
1333 +#include <linux/platform_device.h>
1334 +#include <linux/proc_fs.h>
1335 +#include <linux/time.h>
1336 +#include <linux/mm.h>
1337 +#include <asm/io.h>
1338 +#include <asm/cacheflush.h>
1339 +#include <asm/uaccess.h>
1340 +#include <linux/miscdevice.h>
1341 +#include "mtk_nand2.h"
1342 +#include "nand_device_list.h"
1343 +
1344 +#include "bmt.h"
1345 +#include "partition.h"
1346 +
1347 +unsigned int CFG_BLOCKSIZE;
1348 +
1349 +static int shift_on_bbt = 0;
1350 +extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag);
1351 +extern int nand_bbt_get(struct mtd_info *mtd, int page);
1352 +int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page);
1353 +
1354 +static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL };
1355 +
1356 +#define NAND_CMD_STATUS_MULTI  0x71
1357 +
1358 +void show_stack(struct task_struct *tsk, unsigned long *sp);
1359 +extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
1360 +extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity);
1361 +
1362 +struct mtk_nand_host   mtk_nand_host;  /* include mtd_info and nand_chip structs */
1363 +struct mtk_nand_host_hw mt7621_nand_hw = {
1364 +    .nfi_bus_width          = 8,
1365 +    .nfi_access_timing      = NFI_DEFAULT_ACCESS_TIMING,
1366 +    .nfi_cs_num             = NFI_CS_NUM,
1367 +    .nand_sec_size          = 512,
1368 +    .nand_sec_shift         = 9,
1369 +    .nand_ecc_size          = 2048,
1370 +    .nand_ecc_bytes         = 32,
1371 +    .nand_ecc_mode          = NAND_ECC_HW,
1372 +};
1373 +
1374 +
1375 +/*******************************************************************************
1376 + * Gloable Varible Definition
1377 + *******************************************************************************/
1378 +
1379 +#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
1380 +   do { \
1381 +      DRV_WriteReg(NFI_CMD_REG16,cmd);\
1382 +      while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
1383 +      DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
1384 +      DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
1385 +      DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
1386 +      while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
1387 +   }while(0);
1388 +
1389 +//-------------------------------------------------------------------------------
1390 +static struct NAND_CMD g_kCMD;
1391 +static u32 g_u4ChipVer;
1392 +bool g_bInitDone;
1393 +static bool g_bcmdstatus;
1394 +static u32 g_value = 0;
1395 +static int g_page_size;
1396 +
1397 +BOOL g_bHwEcc = true;
1398 +
1399 +
1400 +static u8 *local_buffer_16_align;   // 16 byte aligned buffer, for HW issue
1401 +static u8 local_buffer[4096 + 512];
1402 +
1403 +extern void nand_release_device(struct mtd_info *mtd);
1404 +extern int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state);
1405 +
1406 +#if defined(MTK_NAND_BMT)
1407 +static bmt_struct *g_bmt;
1408 +#endif
1409 +struct mtk_nand_host *host;
1410 +extern struct mtd_partition g_pasStatic_Partition[];
1411 +int part_num = NUM_PARTITIONS;
1412 +int manu_id;
1413 +int dev_id;
1414 +
1415 +/* this constant was taken from linux/nand/nand.h v 3.14
1416 + * in later versions it seems it was removed in order to save a bit of space
1417 + */
1418 +#define NAND_MAX_OOBSIZE 774
1419 +static u8 local_oob_buf[NAND_MAX_OOBSIZE];
1420 +
1421 +static u8 nand_badblock_offset = 0;
1422 +
1423 +void nand_enable_clock(void)
1424 +{
1425 +    //enable_clock(MT65XX_PDN_PERI_NFI, "NAND");
1426 +}
1427 +
1428 +void nand_disable_clock(void)
1429 +{
1430 +    //disable_clock(MT65XX_PDN_PERI_NFI, "NAND");
1431 +}
1432 +
1433 +struct nand_ecclayout {
1434 +       __u32 eccbytes;
1435 +       __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
1436 +       __u32 oobavail;
1437 +       struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
1438 +};
1439 +
1440 +static struct nand_ecclayout *layout;
1441 +
1442 +static struct nand_ecclayout nand_oob_16 = {
1443 +       .eccbytes = 8,
1444 +       .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
1445 +       .oobfree = {{1, 6}, {0, 0}}
1446 +};
1447 +
1448 +struct nand_ecclayout nand_oob_64 = {
1449 +       .eccbytes = 32,
1450 +       .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
1451 +               40, 41, 42, 43, 44, 45, 46, 47,
1452 +               48, 49, 50, 51, 52, 53, 54, 55,
1453 +               56, 57, 58, 59, 60, 61, 62, 63},
1454 +       .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
1455 +};
1456 +
1457 +struct nand_ecclayout nand_oob_128 = {
1458 +       .eccbytes = 64,
1459 +       .eccpos = {
1460 +               64, 65, 66, 67, 68, 69, 70, 71,
1461 +               72, 73, 74, 75, 76, 77, 78, 79,
1462 +               80, 81, 82, 83, 84, 85, 86, 86,
1463 +               88, 89, 90, 91, 92, 93, 94, 95,
1464 +               96, 97, 98, 99, 100, 101, 102, 103,
1465 +               104, 105, 106, 107, 108, 109, 110, 111,
1466 +               112, 113, 114, 115, 116, 117, 118, 119,
1467 +               120, 121, 122, 123, 124, 125, 126, 127},
1468 +       .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
1469 +};
1470 +
1471 +flashdev_info devinfo;
1472 +
1473 +void dump_nfi(void)
1474 +{
1475 +}
1476 +
1477 +void dump_ecc(void)
1478 +{
1479 +}
1480 +
1481 +u32
1482 +nand_virt_to_phys_add(u32 va)
1483 +{
1484 +       u32 pageOffset = (va & (PAGE_SIZE - 1));
1485 +       pgd_t *pgd;
1486 +       pmd_t *pmd;
1487 +       pte_t *pte;
1488 +       u32 pa;
1489 +
1490 +       if (virt_addr_valid(va))
1491 +               return __virt_to_phys(va);
1492 +
1493 +       if (NULL == current) {
1494 +               printk(KERN_ERR "[nand_virt_to_phys_add] ERROR ,current is NULL! \n");
1495 +               return 0;
1496 +       }
1497 +
1498 +       if (NULL == current->mm) {
1499 +               printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
1500 +               return 0;
1501 +       }
1502 +
1503 +       pgd = pgd_offset(current->mm, va);  /* what is tsk->mm */
1504 +       if (pgd_none(*pgd) || pgd_bad(*pgd)) {
1505 +               printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va);
1506 +               return 0;
1507 +       }
1508 +
1509 +       pmd = pmd_offset((pud_t *)pgd, va);
1510 +       if (pmd_none(*pmd) || pmd_bad(*pmd)) {
1511 +               printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va);
1512 +               return 0;
1513 +       }
1514 +
1515 +       pte = pte_offset_map(pmd, va);
1516 +       if (pte_present(*pte)) {
1517 +               pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset;
1518 +               return pa;
1519 +       }
1520 +
1521 +       printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va);
1522 +       return 0;
1523 +}
1524 +EXPORT_SYMBOL(nand_virt_to_phys_add);
1525 +
1526 +bool
1527 +get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo)
1528 +{
1529 +       u32 index;
1530 +       for (index = 0; gen_FlashTable[index].id != 0; index++) {
1531 +               if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) {
1532 +                       pdevinfo->id = gen_FlashTable[index].id;
1533 +                       pdevinfo->ext_id = gen_FlashTable[index].ext_id;
1534 +                       pdevinfo->blocksize = gen_FlashTable[index].blocksize;
1535 +                       pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle;
1536 +                       pdevinfo->iowidth = gen_FlashTable[index].iowidth;
1537 +                       pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting;
1538 +                       pdevinfo->advancedmode = gen_FlashTable[index].advancedmode;
1539 +                       pdevinfo->pagesize = gen_FlashTable[index].pagesize;
1540 +                       pdevinfo->sparesize = gen_FlashTable[index].sparesize;
1541 +                       pdevinfo->totalsize = gen_FlashTable[index].totalsize;
1542 +                       memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename));
1543 +                       printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id);
1544 +
1545 +                       goto find;
1546 +               }
1547 +       }
1548 +
1549 +find:
1550 +       if (0 == pdevinfo->id) {
1551 +               printk(KERN_INFO "Device not found, ID: %x\n", id);
1552 +               return false;
1553 +       } else {
1554 +               return true;
1555 +       }
1556 +}
1557 +
1558 +static void
1559 +ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit)
1560 +{
1561 +       u32 u4ENCODESize;
1562 +       u32 u4DECODESize;
1563 +       u32 ecc_bit_cfg = ECC_CNFG_ECC4;
1564 +
1565 +       switch(ecc_bit){
1566 +       case 4:
1567 +               ecc_bit_cfg = ECC_CNFG_ECC4;
1568 +               break;
1569 +       case 8:
1570 +               ecc_bit_cfg = ECC_CNFG_ECC8;
1571 +               break;
1572 +       case 10:
1573 +               ecc_bit_cfg = ECC_CNFG_ECC10;
1574 +               break;
1575 +       case 12:
1576 +               ecc_bit_cfg = ECC_CNFG_ECC12;
1577 +               break;
1578 +       default:
1579 +               break;
1580 +       }
1581 +       DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1582 +       do {
1583 +       } while (!DRV_Reg16(ECC_DECIDLE_REG16));
1584 +
1585 +       DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1586 +       do {
1587 +       } while (!DRV_Reg32(ECC_ENCIDLE_REG32));
1588 +
1589 +       /* setup FDM register base */
1590 +       DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
1591 +
1592 +       /* Sector + FDM */
1593 +       u4ENCODESize = (hw->nand_sec_size + 8) << 3;
1594 +       /* Sector + FDM + YAFFS2 meta data bits */
1595 +       u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13;
1596 +
1597 +       /* configure ECC decoder && encoder */
1598 +       DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT));
1599 +
1600 +       DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
1601 +       NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
1602 +}
1603 +
1604 +static void
1605 +ECC_Decode_Start(void)
1606 +{
1607 +       while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
1608 +               ;
1609 +       DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
1610 +}
1611 +
1612 +static void
1613 +ECC_Decode_End(void)
1614 +{
1615 +       while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
1616 +               ;
1617 +       DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1618 +}
1619 +
1620 +static void
1621 +ECC_Encode_Start(void)
1622 +{
1623 +       while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE))
1624 +               ;
1625 +       mb();
1626 +       DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
1627 +}
1628 +
1629 +static void
1630 +ECC_Encode_End(void)
1631 +{
1632 +       /* wait for device returning idle */
1633 +       while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ;
1634 +       mb();
1635 +       DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1636 +}
1637 +
1638 +static bool
1639 +mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr)
1640 +{
1641 +       bool bRet = true;
1642 +       u16 u2SectorDoneMask = 1 << u4SecIndex;
1643 +       u32 u4ErrorNumDebug, i, u4ErrNum;
1644 +       u32 timeout = 0xFFFF;
1645 +       // int el;
1646 +       u32 au4ErrBitLoc[6];
1647 +       u32 u4ErrByteLoc, u4BitOffset;
1648 +       u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
1649 +
1650 +       //4 // Wait for Decode Done
1651 +       while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) {
1652 +               timeout--;
1653 +               if (0 == timeout)
1654 +                       return false;
1655 +       }
1656 +       /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
1657 +       memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
1658 +       u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32);
1659 +       u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2);
1660 +       u4ErrNum &= 0xF;
1661 +
1662 +       if (u4ErrNum) {
1663 +               if (0xF == u4ErrNum) {
1664 +                       mtd->ecc_stats.failed++;
1665 +                       bRet = false;
1666 +                       printk(KERN_ERR"mtk_nand: UnCorrectable at PageAddr=%d\n", u4PageAddr);
1667 +               } else {
1668 +                       for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) {
1669 +                               au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
1670 +                               u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF;
1671 +                               if (u4ErrBitLoc1th < 0x1000) {
1672 +                                       u4ErrByteLoc = u4ErrBitLoc1th / 8;
1673 +                                       u4BitOffset = u4ErrBitLoc1th % 8;
1674 +                                       pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1675 +                                       mtd->ecc_stats.corrected++;
1676 +                               } else {
1677 +                                       mtd->ecc_stats.failed++;
1678 +                               }
1679 +                               u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF;
1680 +                               if (0 != u4ErrBitLoc2nd) {
1681 +                                       if (u4ErrBitLoc2nd < 0x1000) {
1682 +                                               u4ErrByteLoc = u4ErrBitLoc2nd / 8;
1683 +                                               u4BitOffset = u4ErrBitLoc2nd % 8;
1684 +                                               pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1685 +                                               mtd->ecc_stats.corrected++;
1686 +                                       } else {
1687 +                                               mtd->ecc_stats.failed++;
1688 +                                               //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
1689 +                                       }
1690 +                               }
1691 +                       }
1692 +               }
1693 +               if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
1694 +                       bRet = false;
1695 +       }
1696 +       return bRet;
1697 +}
1698 +
1699 +static bool
1700 +mtk_nand_RFIFOValidSize(u16 u2Size)
1701 +{
1702 +       u32 timeout = 0xFFFF;
1703 +       while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) {
1704 +               timeout--;
1705 +               if (0 == timeout)
1706 +                       return false;
1707 +       }
1708 +       return true;
1709 +}
1710 +
1711 +static bool
1712 +mtk_nand_WFIFOValidSize(u16 u2Size)
1713 +{
1714 +       u32 timeout = 0xFFFF;
1715 +
1716 +       while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) {
1717 +               timeout--;
1718 +               if (0 == timeout)
1719 +                       return false;
1720 +       }
1721 +       return true;
1722 +}
1723 +
1724 +static bool
1725 +mtk_nand_status_ready(u32 u4Status)
1726 +{
1727 +       u32 timeout = 0xFFFF;
1728 +
1729 +       while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) {
1730 +               timeout--;
1731 +               if (0 == timeout)
1732 +                       return false;
1733 +       }
1734 +       return true;
1735 +}
1736 +
1737 +static bool
1738 +mtk_nand_reset(void)
1739 +{
1740 +       int timeout = 0xFFFF;
1741 +       if (DRV_Reg16(NFI_MASTERSTA_REG16)) {
1742 +               mb();
1743 +               DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1744 +               while (DRV_Reg16(NFI_MASTERSTA_REG16)) {
1745 +                       timeout--;
1746 +                       if (!timeout)
1747 +                               MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
1748 +               }
1749 +       }
1750 +       /* issue reset operation */
1751 +       mb();
1752 +       DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1753 +
1754 +       return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0);
1755 +}
1756 +
1757 +static void
1758 +mtk_nand_set_mode(u16 u2OpMode)
1759 +{
1760 +       u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
1761 +       u2Mode &= ~CNFG_OP_MODE_MASK;
1762 +       u2Mode |= u2OpMode;
1763 +       DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
1764 +}
1765 +
1766 +static void
1767 +mtk_nand_set_autoformat(bool bEnable)
1768 +{
1769 +       if (bEnable)
1770 +               NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1771 +       else
1772 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1773 +}
1774 +
1775 +static void
1776 +mtk_nand_configure_fdm(u16 u2FDMSize)
1777 +{
1778 +       NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
1779 +       NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
1780 +       NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
1781 +}
1782 +
1783 +static void
1784 +mtk_nand_configure_lock(void)
1785 +{
1786 +       u32 u4WriteColNOB = 2;
1787 +       u32 u4WriteRowNOB = 3;
1788 +       u32 u4EraseColNOB = 0;
1789 +       u32 u4EraseRowNOB = 3;
1790 +       DRV_WriteReg16(NFI_LOCKANOB_REG16,
1791 +               (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT));
1792 +
1793 +       if (CHIPVER_ECO_1 == g_u4ChipVer) {
1794 +               int i;
1795 +               for (i = 0; i < 16; ++i) {
1796 +                       DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF);
1797 +                       DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF);
1798 +               }
1799 +               //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0);
1800 +               DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF);
1801 +               DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON);
1802 +       }
1803 +}
1804 +
1805 +static bool
1806 +mtk_nand_pio_ready(void)
1807 +{
1808 +       int count = 0;
1809 +       while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) {
1810 +               count++;
1811 +               if (count > 0xffff) {
1812 +                       printk("PIO_DIRDY timeout\n");
1813 +                       return false;
1814 +               }
1815 +       }
1816 +
1817 +       return true;
1818 +}
1819 +
1820 +static bool
1821 +mtk_nand_set_command(u16 command)
1822 +{
1823 +       mb();
1824 +       DRV_WriteReg16(NFI_CMD_REG16, command);
1825 +       return mtk_nand_status_ready(STA_CMD_STATE);
1826 +}
1827 +
1828 +static bool
1829 +mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
1830 +{
1831 +       mb();
1832 +       DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr);
1833 +       DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr);
1834 +       DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT));
1835 +       return mtk_nand_status_ready(STA_ADDR_STATE);
1836 +}
1837 +
1838 +static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
1839 +{
1840 +       if (ctrl & NAND_ALE) {
1841 +               mtk_nand_set_address(dat, 0, 1, 0);
1842 +       } else if (ctrl & NAND_CLE) {
1843 +               mtk_nand_reset();
1844 +                mtk_nand_set_mode(0x6000);
1845 +               mtk_nand_set_command(dat);
1846 +       }
1847 +}
1848 +
1849 +static bool
1850 +mtk_nand_check_RW_count(u16 u2WriteSize)
1851 +{
1852 +       u32 timeout = 0xFFFF;
1853 +       u16 u2SecNum = u2WriteSize >> 9;
1854 +
1855 +       while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) {
1856 +               timeout--;
1857 +               if (0 == timeout) {
1858 +                       printk(KERN_INFO "[%s] timeout\n", __FUNCTION__);
1859 +                       return false;
1860 +               }
1861 +       }
1862 +       return true;
1863 +}
1864 +
1865 +static bool
1866 +mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf)
1867 +{
1868 +       /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
1869 +       bool bRet = false;
1870 +       u16 sec_num = 1 << (nand->page_shift - 9);
1871 +       u32 col_addr = u4ColAddr;
1872 +       u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
1873 +       if (nand->options & NAND_BUSWIDTH_16)
1874 +               col_addr /= 2;
1875 +
1876 +       if (!mtk_nand_reset())
1877 +               goto cleanup;
1878 +       if (g_bHwEcc)   {
1879 +               NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1880 +       } else  {
1881 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1882 +       }
1883 +
1884 +       mtk_nand_set_mode(CNFG_OP_READ);
1885 +       NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
1886 +       DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
1887 +
1888 +       if (full) {
1889 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1890 +
1891 +               if (g_bHwEcc)
1892 +                       NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1893 +               else
1894 +                       NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1895 +       } else {
1896 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1897 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1898 +       }
1899 +
1900 +       mtk_nand_set_autoformat(full);
1901 +       if (full)
1902 +               if (g_bHwEcc)
1903 +                       ECC_Decode_Start();
1904 +       if (!mtk_nand_set_command(NAND_CMD_READ0))
1905 +               goto cleanup;
1906 +       if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
1907 +               goto cleanup;
1908 +       if (!mtk_nand_set_command(NAND_CMD_READSTART))
1909 +               goto cleanup;
1910 +       if (!mtk_nand_status_ready(STA_NAND_BUSY))
1911 +               goto cleanup;
1912 +
1913 +       bRet = true;
1914 +
1915 +cleanup:
1916 +       return bRet;
1917 +}
1918 +
1919 +static bool
1920 +mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf)
1921 +{
1922 +       bool bRet = false;
1923 +       u32 sec_num = 1 << (nand->page_shift - 9);
1924 +       u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
1925 +       if (nand->options & NAND_BUSWIDTH_16)
1926 +               col_addr /= 2;
1927 +
1928 +       /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
1929 +       if (!mtk_nand_reset())
1930 +               return false;
1931 +
1932 +       mtk_nand_set_mode(CNFG_OP_PRGM);
1933 +
1934 +       NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
1935 +
1936 +       DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
1937 +
1938 +       if (full) {
1939 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1940 +               if (g_bHwEcc)
1941 +                       NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1942 +               else
1943 +                       NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1944 +       } else {
1945 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1946 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1947 +       }
1948 +
1949 +       mtk_nand_set_autoformat(full);
1950 +
1951 +       if (full)
1952 +               if (g_bHwEcc)
1953 +                       ECC_Encode_Start();
1954 +
1955 +       if (!mtk_nand_set_command(NAND_CMD_SEQIN))
1956 +               goto cleanup;
1957 +       //1 FIXED ME: For Any Kind of AddrCycle
1958 +       if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
1959 +               goto cleanup;
1960 +
1961 +       if (!mtk_nand_status_ready(STA_NAND_BUSY))
1962 +               goto cleanup;
1963 +
1964 +       bRet = true;
1965 +
1966 +cleanup:
1967 +       return bRet;
1968 +}
1969 +
1970 +static bool
1971 +mtk_nand_check_dececc_done(u32 u4SecNum)
1972 +{
1973 +       u32 timeout, dec_mask;
1974 +
1975 +       timeout = 0xffff;
1976 +       dec_mask = (1 << u4SecNum) - 1;
1977 +       while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0)
1978 +               timeout--;
1979 +       if (timeout == 0) {
1980 +               MSG(VERIFY, "ECC_DECDONE: timeout\n");
1981 +               return false;
1982 +       }
1983 +       return true;
1984 +}
1985 +
1986 +static bool
1987 +mtk_nand_mcu_read_data(u8 * buf, u32 length)
1988 +{
1989 +       int timeout = 0xffff;
1990 +       u32 i;
1991 +       u32 *buf32 = (u32 *) buf;
1992 +       if ((u32) buf % 4 || length % 4)
1993 +               NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
1994 +       else
1995 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
1996 +
1997 +       //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
1998 +       mb();
1999 +       NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD);
2000 +
2001 +       if ((u32) buf % 4 || length % 4) {
2002 +               for (i = 0; (i < (length)) && (timeout > 0);) {
2003 +                       if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2004 +                               *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
2005 +                               i++;
2006 +                       } else {
2007 +                               timeout--;
2008 +                       }
2009 +                       if (0 == timeout) {
2010 +                               printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2011 +                               dump_nfi();
2012 +                               return false;
2013 +                       }
2014 +               }
2015 +       } else {
2016 +               for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
2017 +                       if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2018 +                               *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
2019 +                               i++;
2020 +                       } else {
2021 +                               timeout--;
2022 +                       }
2023 +                       if (0 == timeout) {
2024 +                               printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2025 +                               dump_nfi();
2026 +                               return false;
2027 +                       }
2028 +               }
2029 +       }
2030 +       return true;
2031 +}
2032 +
2033 +static bool
2034 +mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size)
2035 +{
2036 +       return mtk_nand_mcu_read_data(pDataBuf, u4Size);
2037 +}
2038 +
2039 +static bool
2040 +mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length)
2041 +{
2042 +       u32 timeout = 0xFFFF;
2043 +       u32 i;
2044 +       u32 *pBuf32;
2045 +       NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2046 +       mb();
2047 +       NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR);
2048 +       pBuf32 = (u32 *) buf;
2049 +
2050 +       if ((u32) buf % 4 || length % 4)
2051 +               NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2052 +       else
2053 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2054 +
2055 +       if ((u32) buf % 4 || length % 4) {
2056 +               for (i = 0; (i < (length)) && (timeout > 0);) {
2057 +                       if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2058 +                               DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
2059 +                               i++;
2060 +                       } else {
2061 +                               timeout--;
2062 +                       }
2063 +                       if (0 == timeout) {
2064 +                               printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2065 +                               dump_nfi();
2066 +                               return false;
2067 +                       }
2068 +               }
2069 +       } else {
2070 +               for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
2071 +                       if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2072 +                               DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
2073 +                               i++;
2074 +                       } else {
2075 +                               timeout--;
2076 +                       }
2077 +                       if (0 == timeout) {
2078 +                               printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2079 +                               dump_nfi();
2080 +                               return false;
2081 +                       }
2082 +               }
2083 +       }
2084 +
2085 +       return true;
2086 +}
2087 +
2088 +static bool
2089 +mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size)
2090 +{
2091 +       return mtk_nand_mcu_write_data(mtd, buf, size);
2092 +}
2093 +
2094 +static void
2095 +mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum)
2096 +{
2097 +       u32 i;
2098 +       u32 *pBuf32 = (u32 *) pDataBuf;
2099 +
2100 +       if (pBuf32) {
2101 +               for (i = 0; i < u4SecNum; ++i) {
2102 +                       *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
2103 +                       *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
2104 +               }
2105 +       }
2106 +}
2107 +
2108 +static u8 fdm_buf[64];
2109 +static void
2110 +mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum)
2111 +{
2112 +       u32 i, j;
2113 +       u8 checksum = 0;
2114 +       bool empty = true;
2115 +       struct nand_oobfree *free_entry;
2116 +       u32 *pBuf32;
2117 +
2118 +       memcpy(fdm_buf, pDataBuf, u4SecNum * 8);
2119 +
2120 +       free_entry = layout->oobfree;
2121 +       for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) {
2122 +               for (j = 0; j < free_entry[i].length; j++) {
2123 +                       if (pDataBuf[free_entry[i].offset + j] != 0xFF)
2124 +                               empty = false;
2125 +                       checksum ^= pDataBuf[free_entry[i].offset + j];
2126 +               }
2127 +       }
2128 +
2129 +       if (!empty) {
2130 +               fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum;
2131 +       }
2132 +
2133 +       pBuf32 = (u32 *) fdm_buf;
2134 +       for (i = 0; i < u4SecNum; ++i) {
2135 +               DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++);
2136 +               DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++);
2137 +       }
2138 +}
2139 +
2140 +static void
2141 +mtk_nand_stop_read(void)
2142 +{
2143 +       NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
2144 +       mtk_nand_reset();
2145 +       if (g_bHwEcc)
2146 +               ECC_Decode_End();
2147 +       DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2148 +}
2149 +
2150 +static void
2151 +mtk_nand_stop_write(void)
2152 +{
2153 +       NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
2154 +       if (g_bHwEcc)
2155 +               ECC_Encode_End();
2156 +       DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2157 +}
2158 +
2159 +bool
2160 +mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
2161 +{
2162 +       u8 *buf;
2163 +       bool bRet = true;
2164 +       struct nand_chip *nand = mtd->priv;
2165 +       u32 u4SecNum = u4PageSize >> 9;
2166 +
2167 +       if (((u32) pPageBuf % 16) && local_buffer_16_align)
2168 +               buf = local_buffer_16_align;
2169 +       else
2170 +               buf = pPageBuf;
2171 +       if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) {
2172 +               int j;
2173 +               for (j = 0 ; j < u4SecNum; j++) {
2174 +                       if (!mtk_nand_read_page_data(mtd, buf+j*512, 512))
2175 +                               bRet = false;
2176 +                       if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1))
2177 +                               bRet = false;
2178 +                       if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr))
2179 +                               bRet = false;
2180 +               }
2181 +               if (!mtk_nand_status_ready(STA_NAND_BUSY))
2182 +                       bRet = false;
2183 +
2184 +               mtk_nand_read_fdm_data(pFDMBuf, u4SecNum);
2185 +               mtk_nand_stop_read();
2186 +       }
2187 +
2188 +       if (buf == local_buffer_16_align)
2189 +               memcpy(pPageBuf, buf, u4PageSize);
2190 +
2191 +       return bRet;
2192 +}
2193 +
2194 +int
2195 +mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
2196 +{
2197 +       struct nand_chip *chip = mtd->priv;
2198 +       u32 u4SecNum = u4PageSize >> 9;
2199 +       u8 *buf;
2200 +       u8 status;
2201 +
2202 +       MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
2203 +
2204 +       if (((u32) pPageBuf % 16) && local_buffer_16_align) {
2205 +               printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf);
2206 +               memcpy(local_buffer_16_align, pPageBuf, mtd->writesize);
2207 +               buf = local_buffer_16_align;
2208 +       } else
2209 +               buf = pPageBuf;
2210 +
2211 +       if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) {
2212 +               mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
2213 +               (void)mtk_nand_write_page_data(mtd, buf, u4PageSize);
2214 +               (void)mtk_nand_check_RW_count(u4PageSize);
2215 +               mtk_nand_stop_write();
2216 +               (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
2217 +               while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
2218 +       }
2219 +
2220 +       status = chip->waitfunc(mtd, chip);
2221 +       if (status & NAND_STATUS_FAIL)
2222 +               return -EIO;
2223 +       return 0;
2224 +}
2225 +
2226 +static int
2227 +get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk)
2228 +{
2229 +       struct nand_chip *chip = mtd->priv;
2230 +       int i;
2231 +
2232 +       *start_blk = 0;
2233 +        for (i = 0; i <= part_num; i++)
2234 +        {
2235 +               if (i == part_num)
2236 +               {
2237 +                       // try the last reset partition
2238 +                       *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1;
2239 +                       if (*start_blk <= *end_blk)
2240 +                       {
2241 +                               if ((block >= *start_blk) && (block <= *end_blk))
2242 +                                       break;
2243 +                       }
2244 +               }
2245 +               // skip All partition entry
2246 +               else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL)
2247 +               {
2248 +                       continue;
2249 +               }
2250 +                *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1;
2251 +                if ((block >= *start_blk) && (block <= *end_blk))
2252 +                        break;
2253 +                *start_blk = *end_blk + 1;
2254 +        }
2255 +        if (*start_blk > *end_blk)
2256 +       {
2257 +                return -1;
2258 +       }
2259 +       return 0;
2260 +}
2261 +
2262 +static int
2263 +block_remap(struct mtd_info *mtd, int block)
2264 +{
2265 +       struct nand_chip *chip = mtd->priv;
2266 +       int start_blk, end_blk;
2267 +       int j, block_offset;
2268 +       int bad_block = 0;
2269 +
2270 +       if (chip->bbt == NULL) {
2271 +               printk("ERROR!! no bbt table for block_remap\n");
2272 +               return -1;
2273 +       }
2274 +
2275 +       if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) {
2276 +               printk("ERROR!! can not find start_blk and end_blk\n");
2277 +               return -1;
2278 +       }
2279 +
2280 +       block_offset = block - start_blk;
2281 +       for (j = start_blk; j <= end_blk;j++) {
2282 +               if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) {
2283 +                       if (!block_offset)
2284 +                               break;
2285 +                       block_offset--;
2286 +               } else {
2287 +                       bad_block++;
2288 +               }
2289 +       }
2290 +       if (j <= end_blk) {
2291 +               return j;
2292 +       } else {
2293 +               // remap to the bad block
2294 +               for (j = end_blk; bad_block > 0; j--)
2295 +               {
2296 +                       if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0)
2297 +                       {
2298 +                               bad_block--;
2299 +                               if (bad_block <= block_offset)
2300 +                                       return j;
2301 +                       }
2302 +               }
2303 +       }
2304 +
2305 +       printk("Error!! block_remap error\n");
2306 +       return -1;
2307 +}
2308 +
2309 +int
2310 +check_block_remap(struct mtd_info *mtd, int block)
2311 +{
2312 +       if (shift_on_bbt)
2313 +               return  block_remap(mtd, block);
2314 +       else
2315 +               return block;
2316 +}
2317 +EXPORT_SYMBOL(check_block_remap);
2318 +
2319 +
2320 +static int
2321 +write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk)
2322 +{
2323 +       struct nand_chip *chip = mtd->priv;
2324 +       int i, j, to_page = 0, first_page;
2325 +       char *buf, *oob;
2326 +       int start_blk = 0, end_blk;
2327 +       int mapped_block;
2328 +       int page_per_block_bit = chip->phys_erase_shift - chip->page_shift;
2329 +       int block = page >> page_per_block_bit;
2330 +
2331 +       // find next available block in the same MTD partition 
2332 +       mapped_block = block_remap(mtd, block);
2333 +       if (mapped_block == -1)
2334 +               return NAND_STATUS_FAIL;
2335 +
2336 +       get_start_end_block(mtd, block, &start_blk, &end_blk);
2337 +
2338 +       buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA);
2339 +       if (buf == NULL)
2340 +               return -1;
2341 +
2342 +       oob = buf + mtd->writesize;
2343 +       for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) {
2344 +               if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) {
2345 +                       int status;
2346 +                       status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit);
2347 +                       if (status & NAND_STATUS_FAIL)  {
2348 +                               mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift);
2349 +                               nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3);
2350 +                       } else {
2351 +                               /* good block */
2352 +                               to_page = (*to_blk) << page_per_block_bit;
2353 +                               break;
2354 +                       }
2355 +               }
2356 +       }
2357 +
2358 +       if (!to_page) {
2359 +               kfree(buf);
2360 +               return -1;
2361 +       }
2362 +
2363 +       first_page = (page >> page_per_block_bit) << page_per_block_bit;
2364 +       for (i = 0; i < (1 << page_per_block_bit); i++) {
2365 +               if ((first_page + i) != page) {
2366 +                       mtk_nand_read_oob_hw(mtd, chip, (first_page+i));
2367 +                       for (j = 0; j < mtd->oobsize; j++)
2368 +                               if (chip->oob_poi[j] != (unsigned char)0xff)
2369 +                                       break;
2370 +                       if (j < mtd->oobsize)   {
2371 +                               mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob);
2372 +                               memset(oob, 0xff, mtd->oobsize);
2373 +                               if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) {
2374 +                                       int ret, new_blk = 0;
2375 +                                       nand_bbt_set(mtd, to_page, 0x3);
2376 +                                       ret =  write_next_on_fail(mtd, buf, to_page + i, &new_blk);
2377 +                                       if (ret) {
2378 +                                               kfree(buf);
2379 +                                               mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2380 +                                               return ret;
2381 +                                       }
2382 +                                       mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2383 +                                       *to_blk = new_blk;
2384 +                                       to_page = ((*to_blk) <<  page_per_block_bit);
2385 +                               }
2386 +                       }
2387 +               } else {
2388 +                       memset(chip->oob_poi, 0xff, mtd->oobsize);
2389 +                       if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) {
2390 +                               int ret, new_blk = 0;
2391 +                               nand_bbt_set(mtd, to_page, 0x3);
2392 +                               ret =  write_next_on_fail(mtd, write_buf, to_page + i, &new_blk);
2393 +                               if (ret) {
2394 +                                       kfree(buf);
2395 +                                       mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2396 +                                       return ret;
2397 +                               }
2398 +                               mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2399 +                               *to_blk = new_blk;
2400 +                               to_page = ((*to_blk) <<  page_per_block_bit);
2401 +                       }
2402 +               }
2403 +       }
2404 +
2405 +       kfree(buf);
2406 +
2407 +       return 0;
2408 +}
2409 +
2410 +static int
2411 +mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset,
2412 +               int data_len, const u8 * buf, int oob_required, int page, int raw)
2413 +{
2414 +       int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2415 +       int block = page / page_per_block;
2416 +       u16 page_in_block = page % page_per_block;
2417 +       int mapped_block = block;
2418 +
2419 +#if defined(MTK_NAND_BMT)
2420 +       mapped_block = get_mapping_block_index(block);
2421 +       // write bad index into oob
2422 +       if (mapped_block != block)
2423 +               set_bad_index_to_oob(chip->oob_poi, block);
2424 +       else
2425 +               set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
2426 +#else
2427 +       if (shift_on_bbt) {
2428 +               mapped_block = block_remap(mtd, block);
2429 +               if (mapped_block == -1)
2430 +                       return NAND_STATUS_FAIL;
2431 +               if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2432 +                       return NAND_STATUS_FAIL;
2433 +       }
2434 +#endif
2435 +       do {
2436 +               if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) {
2437 +                       MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
2438 +#if defined(MTK_NAND_BMT)
2439 +                       if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
2440 +                               MSG(INIT, "Update BMT success\n");
2441 +                               return 0;
2442 +                       } else {
2443 +                               MSG(INIT, "Update BMT fail\n");
2444 +                               return -EIO;
2445 +                       }
2446 +#else
2447 +                       {
2448 +                               int new_blk;
2449 +                               nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
2450 +                               if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0)
2451 +                               {
2452 +                               mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2453 +                               return NAND_STATUS_FAIL;
2454 +                               }
2455 +                               mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2456 +                               break;
2457 +                       }
2458 +#endif
2459 +               } else
2460 +                       break;
2461 +       } while(1);
2462 +
2463 +       return 0;
2464 +}
2465 +
2466 +static void
2467 +mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
2468 +{
2469 +       struct nand_chip *nand = mtd->priv;
2470 +
2471 +       switch (command) {
2472 +       case NAND_CMD_SEQIN:
2473 +               memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
2474 +               g_kCMD.pDataBuf = NULL;
2475 +               g_kCMD.u4RowAddr = page_addr;
2476 +               g_kCMD.u4ColAddr = column;
2477 +               break;
2478 +
2479 +       case NAND_CMD_PAGEPROG:
2480 +               if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) {
2481 +                       u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
2482 +                       mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB);
2483 +                       g_kCMD.u4RowAddr = (u32) - 1;
2484 +                       g_kCMD.u4OOBRowAddr = (u32) - 1;
2485 +               }
2486 +               break;
2487 +
2488 +       case NAND_CMD_READOOB:
2489 +               g_kCMD.u4RowAddr = page_addr;
2490 +               g_kCMD.u4ColAddr = column + mtd->writesize;
2491 +               break;
2492 +
2493 +       case NAND_CMD_READ0:
2494 +               g_kCMD.u4RowAddr = page_addr;
2495 +               g_kCMD.u4ColAddr = column;
2496 +               break;
2497 +
2498 +       case NAND_CMD_ERASE1:
2499 +               nand->state=FL_ERASING;
2500 +               (void)mtk_nand_reset();
2501 +               mtk_nand_set_mode(CNFG_OP_ERASE);
2502 +               (void)mtk_nand_set_command(NAND_CMD_ERASE1);
2503 +               (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2);
2504 +               break;
2505 +
2506 +       case NAND_CMD_ERASE2:
2507 +               (void)mtk_nand_set_command(NAND_CMD_ERASE2);
2508 +               while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
2509 +                       ;
2510 +               break;
2511 +
2512 +       case NAND_CMD_STATUS:
2513 +               (void)mtk_nand_reset();
2514 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2515 +               mtk_nand_set_mode(CNFG_OP_SRD);
2516 +               mtk_nand_set_mode(CNFG_READ_EN);
2517 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2518 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2519 +               (void)mtk_nand_set_command(NAND_CMD_STATUS);
2520 +               NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
2521 +               mb();
2522 +               DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
2523 +               g_bcmdstatus = true;
2524 +               break;
2525 +
2526 +       case NAND_CMD_RESET:
2527 +               (void)mtk_nand_reset();
2528 +               DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN);
2529 +               (void)mtk_nand_set_command(NAND_CMD_RESET);
2530 +               DRV_WriteReg16(NFI_BASE+0x44, 0xF1);
2531 +               while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN))
2532 +                       ;
2533 +               break;
2534 +
2535 +       case NAND_CMD_READID:
2536 +               mtk_nand_reset();
2537 +               /* Disable HW ECC */
2538 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2539 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2540 +               NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW);
2541 +               (void)mtk_nand_reset();
2542 +               mb();
2543 +               mtk_nand_set_mode(CNFG_OP_SRD);
2544 +               (void)mtk_nand_set_command(NAND_CMD_READID);
2545 +               (void)mtk_nand_set_address(0, 0, 1, 0);
2546 +               DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD);
2547 +               while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE)
2548 +                       ;
2549 +               break;
2550 +
2551 +       default:
2552 +               BUG();
2553 +               break;
2554 +       }
2555 +}
2556 +
2557 +static void
2558 +mtk_nand_select_chip(struct mtd_info *mtd, int chip)
2559 +{
2560 +       if ((chip == -1) && (false == g_bInitDone)) {
2561 +               struct nand_chip *nand = mtd->priv;
2562 +               struct mtk_nand_host *host = nand->priv;
2563 +               struct mtk_nand_host_hw *hw = host->hw;
2564 +               u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512);
2565 +               u32 ecc_bit = 4;
2566 +               u32 spare_bit = PAGEFMT_SPARE_16;
2567 +
2568 +               if (spare_per_sector >= 28) {
2569 +                       spare_bit = PAGEFMT_SPARE_28;
2570 +                       ecc_bit = 12;
2571 +                       spare_per_sector = 28;
2572 +               } else if (spare_per_sector >= 27) {
2573 +                       spare_bit = PAGEFMT_SPARE_27;
2574 +                       ecc_bit = 8;
2575 +                       spare_per_sector = 27;
2576 +               } else if (spare_per_sector >= 26) {
2577 +                       spare_bit = PAGEFMT_SPARE_26;
2578 +                       ecc_bit = 8;
2579 +                       spare_per_sector = 26;
2580 +               } else if (spare_per_sector >= 16) {
2581 +                       spare_bit = PAGEFMT_SPARE_16;
2582 +                       ecc_bit = 4;
2583 +                       spare_per_sector = 16;
2584 +               } else {
2585 +                       MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector);
2586 +                       ASSERT(0);
2587 +               }
2588 +               mtd->oobsize = spare_per_sector*(mtd->writesize/512);
2589 +               MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector);
2590 +               /* Setup PageFormat */
2591 +               if (4096 == mtd->writesize) {
2592 +                       NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
2593 +                       nand->cmdfunc = mtk_nand_command_bp;
2594 +               } else if (2048 == mtd->writesize) {
2595 +                       NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
2596 +                       nand->cmdfunc = mtk_nand_command_bp;
2597 +               }
2598 +               ECC_Config(hw,ecc_bit);
2599 +               g_bInitDone = true;
2600 +       }
2601 +       switch (chip) {
2602 +       case -1:
2603 +               break;
2604 +       case 0:
2605 +       case 1:
2606 +               /*  Jun Shen, 2011.04.13  */
2607 +               /* Note: MT6577 EVB NAND  is mounted on CS0, but FPGA is CS1  */
2608 +               DRV_WriteReg16(NFI_CSEL_REG16, chip);
2609 +               /*  Jun Shen, 2011.04.13 */
2610 +               break;
2611 +       }
2612 +}
2613 +
2614 +static uint8_t
2615 +mtk_nand_read_byte(struct mtd_info *mtd)
2616 +{
2617 +       uint8_t retval = 0;
2618 +
2619 +       if (!mtk_nand_pio_ready()) {
2620 +               printk("pio ready timeout\n");
2621 +               retval = false;
2622 +       }
2623 +
2624 +       if (g_bcmdstatus) {
2625 +               retval = DRV_Reg8(NFI_DATAR_REG32);
2626 +               NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
2627 +               mtk_nand_reset();
2628 +               if (g_bHwEcc) {
2629 +                       NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2630 +               } else {
2631 +                       NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2632 +               }
2633 +               g_bcmdstatus = false;
2634 +       } else
2635 +               retval = DRV_Reg8(NFI_DATAR_REG32);
2636 +
2637 +       return retval;
2638 +}
2639 +
2640 +static void
2641 +mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len)
2642 +{
2643 +       struct nand_chip *nand = (struct nand_chip *)mtd->priv;
2644 +       struct NAND_CMD *pkCMD = &g_kCMD;
2645 +       u32 u4ColAddr = pkCMD->u4ColAddr;
2646 +       u32 u4PageSize = mtd->writesize;
2647 +
2648 +       if (u4ColAddr < u4PageSize) {
2649 +               if ((u4ColAddr == 0) && (len >= u4PageSize)) {
2650 +                       mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB);
2651 +                       if (len > u4PageSize) {
2652 +                               u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
2653 +                               memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
2654 +                       }
2655 +               } else {
2656 +                       mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
2657 +                       memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
2658 +               }
2659 +               pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
2660 +       } else {
2661 +               u32 u4Offset = u4ColAddr - u4PageSize;
2662 +               u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
2663 +               if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) {
2664 +                       mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
2665 +                       pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
2666 +               }
2667 +               memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
2668 +       }
2669 +       pkCMD->u4ColAddr += len;
2670 +}
2671 +
2672 +static void
2673 +mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
2674 +{
2675 +       struct NAND_CMD *pkCMD = &g_kCMD;
2676 +       u32 u4ColAddr = pkCMD->u4ColAddr;
2677 +       u32 u4PageSize = mtd->writesize;
2678 +       int i4Size, i;
2679 +
2680 +       if (u4ColAddr >= u4PageSize) {
2681 +               u32 u4Offset = u4ColAddr - u4PageSize;
2682 +               u8 *pOOB = pkCMD->au1OOB + u4Offset;
2683 +               i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
2684 +               for (i = 0; i < i4Size; i++) {
2685 +                       pOOB[i] &= buf[i];
2686 +               }
2687 +       } else {
2688 +               pkCMD->pDataBuf = (u8 *) buf;
2689 +       }
2690 +
2691 +       pkCMD->u4ColAddr += len;
2692 +}
2693 +
2694 +static int
2695 +mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required, int page)
2696 +{
2697 +       mtk_nand_write_buf(mtd, buf, mtd->writesize);
2698 +       mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
2699 +       return 0;
2700 +}
2701 +
2702 +static int
2703 +mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page)
2704 +{
2705 +       struct NAND_CMD *pkCMD = &g_kCMD;
2706 +       u32 u4ColAddr = pkCMD->u4ColAddr;
2707 +       u32 u4PageSize = mtd->writesize;
2708 +
2709 +       if (u4ColAddr == 0) {
2710 +               mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
2711 +               pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
2712 +       }
2713 +
2714 +       return 0;
2715 +}
2716 +
2717 +static int
2718 +mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page)
2719 +{
2720 +       int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2721 +       int block = page / page_per_block;
2722 +       u16 page_in_block = page % page_per_block;
2723 +       int mapped_block = block;
2724 +
2725 +#if defined (MTK_NAND_BMT)
2726 +       mapped_block = get_mapping_block_index(block);
2727 +       if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block,
2728 +                       mtd->writesize, buf, chip->oob_poi))
2729 +               return 0;
2730 +#else
2731 +       if (shift_on_bbt) {
2732 +               mapped_block = block_remap(mtd, block);
2733 +               if (mapped_block == -1)
2734 +                       return NAND_STATUS_FAIL;
2735 +               if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2736 +                       return NAND_STATUS_FAIL;
2737 +       }
2738 +
2739 +       if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi))
2740 +               return 0;
2741 +       else
2742 +               return -EIO;
2743 +#endif
2744 +}
2745 +
2746 +int
2747 +mtk_nand_erase_hw(struct mtd_info *mtd, int page)
2748 +{
2749 +       struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2750 +
2751 +       chip->erase(mtd, page);
2752 +
2753 +       return chip->waitfunc(mtd, chip);
2754 +}
2755 +
2756 +static int
2757 +mtk_nand_erase(struct mtd_info *mtd, int page)
2758 +{
2759 +       // get mapping 
2760 +       struct nand_chip *chip = mtd->priv;
2761 +       int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2762 +       int page_in_block = page % page_per_block;
2763 +       int block = page / page_per_block;
2764 +       int mapped_block = block;
2765 +
2766 +#if defined(MTK_NAND_BMT)    
2767 +       mapped_block = get_mapping_block_index(block);
2768 +#else
2769 +       if (shift_on_bbt) {
2770 +               mapped_block = block_remap(mtd, block);
2771 +               if (mapped_block == -1)
2772 +                       return NAND_STATUS_FAIL;
2773 +               if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2774 +                       return NAND_STATUS_FAIL;
2775 +       }
2776 +#endif
2777 +
2778 +       do {
2779 +               int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
2780 +
2781 +               if (status & NAND_STATUS_FAIL) {
2782 +#if defined (MTK_NAND_BMT)     
2783 +                       if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift,
2784 +                                       UPDATE_ERASE_FAIL, NULL, NULL))
2785 +                       {
2786 +                               MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
2787 +                               return 0;
2788 +                       } else {
2789 +                               MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
2790 +                               return NAND_STATUS_FAIL;
2791 +                       }
2792 +#else
2793 +                       mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2794 +                       nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
2795 +                       if (shift_on_bbt) {
2796 +                               mapped_block = block_remap(mtd, block);
2797 +                               if (mapped_block == -1)
2798 +                                       return NAND_STATUS_FAIL;
2799 +                               if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2800 +                                       return NAND_STATUS_FAIL;
2801 +                       } else
2802 +                               return NAND_STATUS_FAIL;
2803 +#endif
2804 +               } else
2805 +                       break;
2806 +       } while(1);
2807 +
2808 +       return 0;
2809 +}
2810 +
2811 +static int
2812 +mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len)
2813 +{
2814 +       struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2815 +       u32 col_addr = 0;
2816 +       u32 sector = 0;
2817 +       int res = 0;
2818 +       u32 colnob = 2, rawnob = devinfo.addr_cycle - 2;
2819 +       int randomread = 0;
2820 +       int read_len = 0;
2821 +       int sec_num = 1<<(chip->page_shift-9);
2822 +       int spare_per_sector = mtd->oobsize/sec_num;
2823 +
2824 +       if (len >  NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
2825 +               printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
2826 +               return -EINVAL;
2827 +       }
2828 +       if (len > spare_per_sector)
2829 +               randomread = 1;
2830 +       if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) {
2831 +               while (len > 0) {
2832 +                       read_len = min(len, spare_per_sector);
2833 +                       col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16
2834 +                       if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) {
2835 +                               printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n");
2836 +                               res = -EIO;
2837 +                               goto error;
2838 +                       }
2839 +                       if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2840 +                               printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n");
2841 +                               res = -EIO;
2842 +                               goto error;
2843 +                       }
2844 +                       mtk_nand_check_RW_count(read_len);
2845 +                       mtk_nand_stop_read();
2846 +                       sector++;
2847 +                       len -= read_len;
2848 +               }
2849 +       } else {
2850 +               col_addr = NAND_SECTOR_SIZE;
2851 +               if (chip->options & NAND_BUSWIDTH_16)
2852 +                       col_addr /= 2;
2853 +               if (!mtk_nand_reset())
2854 +                       goto error;
2855 +               mtk_nand_set_mode(0x6000);
2856 +               NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
2857 +               DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
2858 +
2859 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2860 +               NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2861 +
2862 +               mtk_nand_set_autoformat(false);
2863 +
2864 +               if (!mtk_nand_set_command(NAND_CMD_READ0))
2865 +                       goto error;
2866 +               //1 FIXED ME: For Any Kind of AddrCycle
2867 +               if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob))
2868 +                       goto error;
2869 +               if (!mtk_nand_set_command(NAND_CMD_READSTART))
2870 +                       goto error;
2871 +               if (!mtk_nand_status_ready(STA_NAND_BUSY))
2872 +                       goto error;
2873 +               read_len = min(len, spare_per_sector);
2874 +               if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2875 +                       printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
2876 +                       res = -EIO;
2877 +                       goto error;
2878 +               }
2879 +               sector++;
2880 +               len -= read_len;
2881 +               mtk_nand_stop_read();
2882 +               while (len > 0) {
2883 +                       read_len = min(len,  spare_per_sector);
2884 +                       if (!mtk_nand_set_command(0x05))
2885 +                               goto error;
2886 +                       col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector);
2887 +                       if (chip->options & NAND_BUSWIDTH_16)
2888 +                               col_addr /= 2;
2889 +                       DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
2890 +                       DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
2891 +                       DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
2892 +                       if (!mtk_nand_status_ready(STA_ADDR_STATE))
2893 +                               goto error;
2894 +                       if (!mtk_nand_set_command(0xE0))
2895 +                               goto error;
2896 +                       if (!mtk_nand_status_ready(STA_NAND_BUSY))
2897 +                               goto error;
2898 +                       if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2899 +                               printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
2900 +                               res = -EIO;
2901 +                               goto error;
2902 +                       }
2903 +                       mtk_nand_stop_read();
2904 +                       sector++;
2905 +                       len -= read_len;
2906 +               }
2907 +       }
2908 +error:
2909 +       NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
2910 +       return res;
2911 +}
2912 +
2913 +static int
2914 +mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len)
2915 +{
2916 +       struct nand_chip *chip = mtd->priv;
2917 +       u32 col_addr = 0;
2918 +       u32 sector = 0;
2919 +       int write_len = 0;
2920 +       int status;
2921 +       int sec_num = 1<<(chip->page_shift-9);
2922 +       int spare_per_sector = mtd->oobsize/sec_num;
2923 +
2924 +       if (len >  NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
2925 +               printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
2926 +               return -EINVAL;
2927 +       }
2928 +
2929 +       while (len > 0) {
2930 +               write_len = min(len,  spare_per_sector);
2931 +               col_addr = sector * (NAND_SECTOR_SIZE +  spare_per_sector) + NAND_SECTOR_SIZE;
2932 +               if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
2933 +                       return -EIO;
2934 +               if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len))
2935 +                       return -EIO;
2936 +               (void)mtk_nand_check_RW_count(write_len);
2937 +               NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
2938 +               (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
2939 +               while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
2940 +                       ;
2941 +               status = chip->waitfunc(mtd, chip);
2942 +               if (status & NAND_STATUS_FAIL) {
2943 +                       printk(KERN_INFO "status: %d\n", status);
2944 +                       return -EIO;
2945 +               }
2946 +               len -= write_len;
2947 +               sector++;
2948 +       }
2949 +
2950 +       return 0;
2951 +}
2952 +
2953 +static int
2954 +mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
2955 +{
2956 +       int i, iter;
2957 +       int sec_num = 1<<(chip->page_shift-9);
2958 +       int spare_per_sector = mtd->oobsize/sec_num;
2959 +
2960 +       memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
2961 +
2962 +       // copy ecc data
2963 +       for (i = 0; i < layout->eccbytes; i++) {
2964 +               iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) *  spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
2965 +               local_oob_buf[iter] = chip->oob_poi[layout->eccpos[i]];
2966 +       }
2967 +
2968 +       // copy FDM data
2969 +       for (i = 0; i < sec_num; i++)
2970 +               memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
2971 +
2972 +       return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
2973 +}
2974 +
2975 +static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
2976 +{
2977 +       int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2978 +       int block = page / page_per_block;
2979 +       u16 page_in_block = page % page_per_block;
2980 +       int mapped_block = block;
2981 +
2982 +#if defined(MTK_NAND_BMT)
2983 +       mapped_block = get_mapping_block_index(block);
2984 +       // write bad index into oob
2985 +       if (mapped_block != block)
2986 +               set_bad_index_to_oob(chip->oob_poi, block);
2987 +       else
2988 +               set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
2989 +#else
2990 +       if (shift_on_bbt)
2991 +       {
2992 +               mapped_block = block_remap(mtd, block);
2993 +               if (mapped_block == -1)
2994 +                       return NAND_STATUS_FAIL;
2995 +               if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2996 +                       return NAND_STATUS_FAIL;
2997 +       }
2998 +#endif
2999 +       do {
3000 +               if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) {
3001 +                       MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
3002 +#if defined(MTK_NAND_BMT)      
3003 +                       if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift,
3004 +                                       UPDATE_WRITE_FAIL, NULL, chip->oob_poi))
3005 +                       {
3006 +                               MSG(INIT, "Update BMT success\n");
3007 +                               return 0;
3008 +                       } else {
3009 +                               MSG(INIT, "Update BMT fail\n");
3010 +                               return -EIO;
3011 +                       }
3012 +#else
3013 +                       mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
3014 +                       nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
3015 +                       if (shift_on_bbt) {
3016 +                               mapped_block = block_remap(mtd, mapped_block);
3017 +                               if (mapped_block == -1)
3018 +                                       return NAND_STATUS_FAIL;
3019 +                               if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
3020 +                                       return NAND_STATUS_FAIL;
3021 +                       } else {
3022 +                               return NAND_STATUS_FAIL;
3023 +                       }
3024 +#endif
3025 +               } else
3026 +                       break;
3027 +       } while (1);
3028 +
3029 +       return 0;
3030 +}
3031 +
3032 +int
3033 +mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset)
3034 +{
3035 +       struct nand_chip *chip = mtd->priv;
3036 +       int block = (int)offset >> chip->phys_erase_shift;
3037 +       int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
3038 +       u8 buf[8];
3039 +
3040 +       memset(buf, 0xFF, 8);
3041 +       buf[0] = 0;
3042 +       return  mtk_nand_write_oob_raw(mtd, buf, page, 8);
3043 +}
3044 +
3045 +static int
3046 +mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset)
3047 +{
3048 +       struct nand_chip *chip = mtd->priv;
3049 +       int block = (int)offset >> chip->phys_erase_shift;
3050 +       int ret;
3051 +       int mapped_block = block;
3052 +
3053 +       nand_get_device(chip, mtd, FL_WRITING);
3054 +
3055 +#if defined(MTK_NAND_BMT)    
3056 +       mapped_block = get_mapping_block_index(block);
3057 +       ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
3058 +#else
3059 +       if (shift_on_bbt) {
3060 +               mapped_block = block_remap(mtd, block);
3061 +               if (mapped_block == -1) {
3062 +                       printk("NAND mark bad failed\n");
3063 +                       nand_release_device(mtd);
3064 +                       return NAND_STATUS_FAIL;
3065 +               }
3066 +       }
3067 +       ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
3068 +#endif
3069 +       nand_release_device(mtd);
3070 +
3071 +       return ret;
3072 +}
3073 +
3074 +int
3075 +mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
3076 +{
3077 +       int i;
3078 +       u8 iter = 0;
3079 +
3080 +       int sec_num = 1<<(chip->page_shift-9);
3081 +       int spare_per_sector = mtd->oobsize/sec_num;
3082 +
3083 +       if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) {
3084 +               printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__);
3085 +               return -EIO;
3086 +       }
3087 +
3088 +       // adjust to ecc physical layout to memory layout
3089 +       /*********************************************************/
3090 +       /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
3091 +       /*  8B  |  8B  |  8B  |  8B  |  8B  |  8B  |  8B  |  8B  */
3092 +       /*********************************************************/
3093 +
3094 +       memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
3095 +       // copy ecc data
3096 +       for (i = 0; i < layout->eccbytes; i++) {
3097 +               iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) *  spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
3098 +               chip->oob_poi[layout->eccpos[i]] = local_oob_buf[iter];
3099 +       }
3100 +
3101 +       // copy FDM data
3102 +       for (i = 0; i < sec_num; i++) {
3103 +               memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i *  spare_per_sector], OOB_AVAI_PER_SECTOR);
3104 +       }
3105 +
3106 +       return 0;
3107 +}
3108 +
3109 +static int
3110 +mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
3111 +{
3112 +       int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3113 +       int block = page / page_per_block;
3114 +       u16 page_in_block = page % page_per_block;
3115 +       int mapped_block = block;
3116 +
3117 +#if defined (MTK_NAND_BMT)
3118 +       mapped_block = get_mapping_block_index(block);
3119 +       mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block);
3120 +#else
3121 +       if (shift_on_bbt) {
3122 +               mapped_block = block_remap(mtd, block);
3123 +               if (mapped_block == -1)
3124 +                       return NAND_STATUS_FAIL;
3125 +               // allow to read oob even if the block is bad
3126 +       }
3127 +       if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0)
3128 +               return -1;
3129 +#endif
3130 +       return 0;
3131 +}
3132 +
3133 +int
3134 +mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs)
3135 +{
3136 +       struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3137 +       int page_addr = (int)(ofs >> chip->page_shift);
3138 +       unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3139 +       unsigned char oob_buf[8];
3140 +
3141 +       page_addr &= ~(page_per_block - 1);
3142 +       if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) {
3143 +               printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n");
3144 +               return 1;
3145 +       }
3146 +
3147 +       if (oob_buf[0] != 0xff) {
3148 +               printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]);
3149 +               // dump_nfi();
3150 +               return 1;
3151 +       }
3152 +
3153 +       return 0;
3154 +}
3155 +
3156 +static int
3157 +mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
3158 +{
3159 +       struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3160 +       int block = (int)ofs >> chip->phys_erase_shift;
3161 +       int mapped_block = block;
3162 +       int ret;
3163 +
3164 +#if defined(MTK_NAND_BMT)    
3165 +       mapped_block = get_mapping_block_index(block);
3166 +#else
3167 +       if (shift_on_bbt) {
3168 +               mapped_block = block_remap(mtd, block);
3169 +       }
3170 +#endif
3171 +
3172 +       ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift);
3173 +#if defined (MTK_NAND_BMT)     
3174 +       if (ret) {
3175 +               MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block);
3176 +               if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) {
3177 +                       MSG(INIT, "Update BMT success\n");
3178 +                       ret = 0;
3179 +               } else {
3180 +                       MSG(INIT, "Update BMT fail\n");
3181 +                       ret = 1;
3182 +               }
3183 +       }
3184 +#endif
3185 +
3186 +       return ret;
3187 +}
3188 +
3189 +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
3190 +char gacBuf[4096 + 288];
3191 +
3192 +static int
3193 +mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
3194 +{
3195 +       struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3196 +       struct NAND_CMD *pkCMD = &g_kCMD;
3197 +       u32 u4PageSize = mtd->writesize;
3198 +       u32 *pSrc, *pDst;
3199 +       int i;
3200 +
3201 +       mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
3202 +
3203 +       pSrc = (u32 *) buf;
3204 +       pDst = (u32 *) gacBuf;
3205 +       len = len / sizeof(u32);
3206 +       for (i = 0; i < len; ++i) {
3207 +               if (*pSrc != *pDst) {
3208 +                       MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
3209 +                       return -1;
3210 +               }
3211 +               pSrc++;
3212 +               pDst++;
3213 +       }
3214 +
3215 +       pSrc = (u32 *) chip->oob_poi;
3216 +       pDst = (u32 *) (gacBuf + u4PageSize);
3217 +
3218 +       if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) {
3219 +       // TODO: Ask Designer Why?
3220 +       //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7])) 
3221 +               MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
3222 +               MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
3223 +               MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
3224 +               return -1;
3225 +       }
3226 +       return 0;
3227 +}
3228 +#endif
3229 +
3230 +static void
3231 +mtk_nand_init_hw(struct mtk_nand_host *host) {
3232 +       struct mtk_nand_host_hw *hw = host->hw;
3233 +       u32 data;
3234 +
3235 +       data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
3236 +       data &= ~((0x3<<18)|(0x3<<16));
3237 +       data |= ((0x2<<18) |(0x2<<16));
3238 +       DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
3239 +
3240 +       MSG(INIT, "Enable NFI Clock\n");
3241 +       nand_enable_clock();
3242 +
3243 +       g_bInitDone = false;
3244 +       g_kCMD.u4OOBRowAddr = (u32) - 1;
3245 +
3246 +       /* Set default NFI access timing control */
3247 +       DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing);
3248 +       DRV_WriteReg16(NFI_CNFG_REG16, 0);
3249 +       DRV_WriteReg16(NFI_PAGEFMT_REG16, 0);
3250 +
3251 +       /* Reset the state machine and data FIFO, because flushing FIFO */
3252 +       (void)mtk_nand_reset();
3253 +
3254 +       /* Set the ECC engine */
3255 +       if (hw->nand_ecc_mode == NAND_ECC_HW) {
3256 +               MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
3257 +               if (g_bHwEcc)
3258 +                       NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
3259 +               ECC_Config(host->hw,4);
3260 +               mtk_nand_configure_fdm(8);
3261 +               mtk_nand_configure_lock();
3262 +       }
3263 +
3264 +       NFI_SET_REG16(NFI_IOCON_REG16, 0x47);
3265 +}
3266 +
3267 +static int mtk_nand_dev_ready(struct mtd_info *mtd)
3268 +{
3269 +       return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
3270 +}
3271 +
3272 +#define FACT_BBT_BLOCK_NUM  32 // use the latest 32 BLOCK for factory bbt table
3273 +#define FACT_BBT_OOB_SIGNATURE  1
3274 +#define FACT_BBT_SIGNATURE_LEN  7
3275 +const u8 oob_signature[] = "mtknand";
3276 +static u8 *fact_bbt = 0;
3277 +static u32 bbt_size = 0;
3278 +
3279 +static int
3280 +read_fact_bbt(struct mtd_info *mtd, unsigned int page)
3281 +{
3282 +       struct nand_chip *chip = mtd->priv;
3283 +
3284 +       // read oob
3285 +       if (mtk_nand_read_oob_hw(mtd, chip, page)==0)
3286 +       {
3287 +               if (chip->oob_poi[nand_badblock_offset] != 0xFF)
3288 +               {
3289 +                       printk("Bad Block on Page %x\n", page);
3290 +                       return -1;
3291 +               }
3292 +               if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0)
3293 +               {
3294 +                       printk("compare signature failed %x\n", page);
3295 +                       return -1;
3296 +               }
3297 +               if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi))
3298 +               {
3299 +                       printk("Signature matched and data read!\n");
3300 +                       memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize);
3301 +                       return 0;
3302 +               }
3303 +
3304 +       }
3305 +       printk("failed at page %x\n", page);
3306 +       return -1;
3307 +}
3308 +
3309 +static int
3310 +load_fact_bbt(struct mtd_info *mtd)
3311 +{
3312 +       struct nand_chip *chip = mtd->priv;
3313 +       int i;
3314 +       u32 total_block;
3315 +
3316 +       total_block = 1 << (chip->chip_shift - chip->phys_erase_shift);
3317 +       bbt_size = total_block >> 2;
3318 +
3319 +       if ((!fact_bbt) && (bbt_size))
3320 +               fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL);
3321 +       if (!fact_bbt)
3322 +               return -1;
3323 +
3324 +       for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--)
3325 +       {
3326 +               if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0)
3327 +               {
3328 +                       printk("load_fact_bbt success %d\n", i);
3329 +                       return 0;
3330 +               }
3331 +
3332 +       }
3333 +       printk("load_fact_bbt failed\n");
3334 +       return -1;
3335 +}
3336 +
3337 +static int oob_mtk_ooblayout_ecc(struct mtd_info *mtd, int section,
3338 +                               struct mtd_oob_region *oobregion)
3339 +{
3340 +       oobregion->length = 8;
3341 +       oobregion->offset = layout->eccpos[section * 8];
3342 +
3343 +       return 0;
3344 +}
3345 +
3346 +static int oob_mtk_ooblayout_free(struct mtd_info *mtd, int section,
3347 +                                struct mtd_oob_region *oobregion)
3348 +{
3349 +       if (section >= (layout->eccbytes / 8)) {
3350 +               return -ERANGE;
3351 +       }
3352 +       oobregion->offset = layout->oobfree[section].offset;
3353 +       oobregion->length = layout->oobfree[section].length;
3354 +
3355 +       return 0;
3356 +}
3357 +
3358 +
3359 +static const struct mtd_ooblayout_ops oob_mtk_ops = {
3360 +       .ecc = oob_mtk_ooblayout_ecc,
3361 +       .free = oob_mtk_ooblayout_free,
3362 +};
3363 +
3364 +static int
3365 +mtk_nand_probe(struct platform_device *pdev)
3366 +{
3367 +       struct mtd_part_parser_data ppdata;
3368 +       struct mtk_nand_host_hw *hw;
3369 +       struct nand_chip *nand_chip;
3370 +       struct mtd_info *mtd;
3371 +       u8 ext_id1, ext_id2, ext_id3;
3372 +       int err = 0;
3373 +       int id;
3374 +       u32 ext_id;
3375 +       int i;
3376 +       u32 data;
3377 +
3378 +       data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
3379 +       data &= ~((0x3<<18)|(0x3<<16));
3380 +       data |= ((0x2<<18) |(0x2<<16));
3381 +       DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
3382 +
3383 +       hw = &mt7621_nand_hw;
3384 +       BUG_ON(!hw);
3385 +       /* Allocate memory for the device structure (and zero it) */
3386 +       host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL);
3387 +       if (!host) {
3388 +               MSG(INIT, "mtk_nand: failed to allocate device structure.\n");
3389 +               return -ENOMEM;
3390 +       }
3391 +
3392 +       /* Allocate memory for 16 byte aligned buffer */
3393 +       local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16);
3394 +       printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
3395 +       host->hw = hw;
3396 +
3397 +       /* init mtd data structure */
3398 +       nand_chip = &host->nand_chip;
3399 +       nand_chip->priv = host;     /* link the private data structures */
3400 +
3401 +       mtd = host->mtd = &nand_chip->mtd;
3402 +       mtd->priv = nand_chip;
3403 +       mtd->owner = THIS_MODULE;
3404 +       mtd->name  = "MT7621-NAND";
3405 +
3406 +       hw->nand_ecc_mode = NAND_ECC_HW;
3407 +
3408 +       /* Set address of NAND IO lines */
3409 +       nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32;
3410 +       nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32;
3411 +       nand_chip->chip_delay = 20; /* 20us command delay time */
3412 +       nand_chip->ecc.mode = hw->nand_ecc_mode;    /* enable ECC */
3413 +       nand_chip->ecc.strength = 1;
3414 +       nand_chip->read_byte = mtk_nand_read_byte;
3415 +       nand_chip->read_buf = mtk_nand_read_buf;
3416 +       nand_chip->write_buf = mtk_nand_write_buf;
3417 +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
3418 +       nand_chip->verify_buf = mtk_nand_verify_buf;
3419 +#endif
3420 +       nand_chip->select_chip = mtk_nand_select_chip;
3421 +       nand_chip->dev_ready = mtk_nand_dev_ready;
3422 +       nand_chip->cmdfunc = mtk_nand_command_bp;
3423 +       nand_chip->ecc.read_page = mtk_nand_read_page_hwecc;
3424 +       nand_chip->ecc.write_page = mtk_nand_write_page_hwecc;
3425 +
3426 +       mtd_set_ooblayout(mtd, &oob_mtk_ops);
3427 +       nand_chip->ecc.size = hw->nand_ecc_size;    //2048
3428 +       nand_chip->ecc.bytes = hw->nand_ecc_bytes;  //32
3429 +
3430 +       // For BMT, we need to revise driver architecture
3431 +       nand_chip->write_page = mtk_nand_write_page;
3432 +       nand_chip->ecc.write_oob = mtk_nand_write_oob;
3433 +       nand_chip->block_markbad = mtk_nand_block_markbad;   // need to add nand_get_device()/nand_release_device().
3434 +       nand_chip->erase_mtk = mtk_nand_erase;
3435 +       nand_chip->read_page = mtk_nand_read_page;
3436 +       nand_chip->ecc.read_oob = mtk_nand_read_oob;
3437 +       nand_chip->block_bad = mtk_nand_block_bad;
3438 +        nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl;
3439 +
3440 +       //Qwert:Add for Uboot
3441 +       mtk_nand_init_hw(host);
3442 +       /* Select the device */
3443 +       nand_chip->select_chip(mtd, NFI_DEFAULT_CS);
3444 +
3445 +       /*
3446 +       * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3447 +       * after power-up
3448 +       */
3449 +       nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
3450 +
3451 +       memset(&devinfo, 0 , sizeof(flashdev_info));
3452 +
3453 +       /* Send the command for reading device ID */
3454 +
3455 +       nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3456 +
3457 +       /* Read manufacturer and device IDs */
3458 +       manu_id = nand_chip->read_byte(mtd);
3459 +       dev_id = nand_chip->read_byte(mtd);
3460 +       id = dev_id | (manu_id << 8);
3461 +               ext_id1 = nand_chip->read_byte(mtd);
3462 +                   ext_id2 = nand_chip->read_byte(mtd);
3463 +                       ext_id3 = nand_chip->read_byte(mtd);
3464 +                           ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3;
3465 +       if (!get_device_info(id, ext_id, &devinfo)) {
3466 +               u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F;
3467 +               MSG(INIT, "Not Support this Device! \r\n");
3468 +               memset(&devinfo, 0 , sizeof(flashdev_info));
3469 +               MSG(INIT, "chip_mode=%08X\n",chip_mode);
3470 +
3471 +               /* apply bootstrap first */
3472 +               devinfo.addr_cycle = 5;
3473 +               devinfo.iowidth = 8;
3474 +
3475 +               switch (chip_mode) {
3476 +               case 10:
3477 +                       devinfo.pagesize = 2048;
3478 +                       devinfo.sparesize = 128;
3479 +                       devinfo.totalsize = 128;
3480 +                       devinfo.blocksize = 128;
3481 +                       break;
3482 +               case 11:
3483 +                       devinfo.pagesize = 4096;
3484 +                       devinfo.sparesize = 128;
3485 +                       devinfo.totalsize = 1024;
3486 +                       devinfo.blocksize = 256;
3487 +                       break;
3488 +               case 12:
3489 +                       devinfo.pagesize = 4096;
3490 +                       devinfo.sparesize = 224;
3491 +                       devinfo.totalsize = 2048;
3492 +                       devinfo.blocksize = 512;
3493 +                       break;
3494 +               default:
3495 +               case 1:
3496 +                       devinfo.pagesize = 2048;
3497 +                       devinfo.sparesize = 64;
3498 +                       devinfo.totalsize = 128;
3499 +                       devinfo.blocksize = 128;
3500 +                       break;
3501 +               }
3502 +
3503 +               devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING;
3504 +               devinfo.devciename[0] = 'U';
3505 +               devinfo.advancedmode = 0;
3506 +       }
3507 +       mtd->writesize = devinfo.pagesize;
3508 +       mtd->erasesize = (devinfo.blocksize<<10);
3509 +       mtd->oobsize = devinfo.sparesize;
3510 +
3511 +       nand_chip->chipsize = (devinfo.totalsize<<20);
3512 +       nand_chip->page_shift = ffs(mtd->writesize) - 1;
3513 +       nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1;
3514 +       nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
3515 +       nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1;
3516 +        nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl;
3517 +
3518 +       /* allocate buffers or call select_chip here or a bit earlier*/
3519 +       {
3520 +               struct nand_buffers *nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize + mtd->oobsize * 3, GFP_KERNEL);
3521 +               if (!nbuf) {
3522 +                       return -ENOMEM;
3523 +               }
3524 +               nbuf->ecccalc = (uint8_t *)(nbuf + 1);
3525 +               nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
3526 +               nbuf->databuf = nbuf->ecccode + mtd->oobsize;
3527 +
3528 +               nand_chip->buffers = nbuf;
3529 +               nand_chip->options |= NAND_OWN_BUFFERS;
3530 +       }
3531 +
3532 +       nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize;
3533 +       nand_chip->badblockpos = 0;
3534 +
3535 +       if (devinfo.pagesize == 4096)
3536 +               layout = &nand_oob_128;
3537 +       else if (devinfo.pagesize == 2048)
3538 +               layout = &nand_oob_64;
3539 +       else if (devinfo.pagesize == 512)
3540 +               layout = &nand_oob_16;
3541 +
3542 +       layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE);
3543 +       for (i = 0; i < layout->eccbytes; i++)
3544 +               layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i;
3545 +
3546 +       MSG(INIT, "Support this Device in MTK table! %x \r\n", id);
3547 +       hw->nfi_bus_width = devinfo.iowidth;
3548 +       DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting);
3549 +
3550 +       /* 16-bit bus width */
3551 +       if (hw->nfi_bus_width == 16) {
3552 +               MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
3553 +               nand_chip->options |= NAND_BUSWIDTH_16;
3554 +       }
3555 +       mtd->oobsize = devinfo.sparesize;
3556 +       hw->nfi_cs_num = 1;
3557 +
3558 +       /* Scan to find existance of the device */
3559 +       if (nand_scan(mtd, hw->nfi_cs_num)) {
3560 +               MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
3561 +               err = -ENXIO;
3562 +               goto out;
3563 +       }
3564 +
3565 +       g_page_size = mtd->writesize;
3566 +       platform_set_drvdata(pdev, host);
3567 +       if (hw->nfi_bus_width == 16) {
3568 +               NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
3569 +       }
3570 +
3571 +       nand_chip->select_chip(mtd, 0);
3572 +#if defined(MTK_NAND_BMT)  
3573 +       nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift;
3574 +#endif
3575 +       mtd->size = nand_chip->chipsize;
3576 +
3577 +       CFG_BLOCKSIZE = mtd->erasesize;
3578 +
3579 +#if defined(MTK_NAND_BMT)
3580 +       if (!g_bmt) {
3581 +               if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) {
3582 +                       MSG(INIT, "Error: init bmt failed\n");
3583 +                       return 0;
3584 +               }
3585 +       }
3586 +#endif
3587 +
3588 +       nand_set_flash_node(nand_chip, pdev->dev.of_node);
3589 +       err = mtd_device_parse_register(mtd, probe_types, &ppdata,
3590 +                                       NULL, 0);
3591 +       if (!err) {
3592 +               MSG(INIT, "[mtk_nand] probe successfully!\n");
3593 +               nand_disable_clock();
3594 +               shift_on_bbt = 1;
3595 +               if (load_fact_bbt(mtd) == 0) {
3596 +                       int i;
3597 +                       for (i = 0; i < 0x100; i++)
3598 +                               nand_chip->bbt[i] |= fact_bbt[i];
3599 +               }
3600 +
3601 +               return err;
3602 +       }
3603 +
3604 +out:
3605 +       MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err);
3606 +       nand_release(mtd);
3607 +       platform_set_drvdata(pdev, NULL);
3608 +       if ( NULL != nand_chip->buffers) {
3609 +               kfree(nand_chip->buffers);
3610 +       }
3611 +       kfree(host);
3612 +       nand_disable_clock();
3613 +       return err;
3614 +}
3615 +
3616 +static int
3617 +mtk_nand_remove(struct platform_device *pdev)
3618 +{
3619 +       struct mtk_nand_host *host = platform_get_drvdata(pdev);
3620 +       struct mtd_info *mtd = host->mtd;
3621 +       struct nand_chip *nand_chip = &host->nand_chip;
3622 +
3623 +       nand_release(mtd);
3624 +       if ( NULL != nand_chip->buffers) {
3625 +               kfree(nand_chip->buffers);
3626 +       }
3627 +       kfree(host);
3628 +       nand_disable_clock();
3629 +
3630 +       return 0;
3631 +}
3632 +
3633 +static const struct of_device_id mt7621_nand_match[] = {
3634 +       { .compatible = "mtk,mt7621-nand" },
3635 +       {},
3636 +};
3637 +MODULE_DEVICE_TABLE(of, mt7621_nand_match);
3638 +
3639 +static struct platform_driver mtk_nand_driver = {
3640 +       .probe = mtk_nand_probe,
3641 +       .remove = mtk_nand_remove,
3642 +       .driver = {
3643 +               .name = "MT7621-NAND",
3644 +               .owner = THIS_MODULE,
3645 +               .of_match_table = mt7621_nand_match,
3646 +       },
3647 +};
3648 +
3649 +static int __init
3650 +mtk_nand_init(void)
3651 +{
3652 +       printk("MediaTek Nand driver init, version %s\n", VERSION);
3653 +
3654 +       return platform_driver_register(&mtk_nand_driver);
3655 +}
3656 +
3657 +static void __exit
3658 +mtk_nand_exit(void)
3659 +{
3660 +       platform_driver_unregister(&mtk_nand_driver);
3661 +}
3662 +
3663 +module_init(mtk_nand_init);
3664 +module_exit(mtk_nand_exit);
3665 +MODULE_LICENSE("GPL");
3666 --- /dev/null
3667 +++ b/drivers/mtd/nand/mtk_nand2.h
3668 @@ -0,0 +1,452 @@
3669 +#ifndef __MTK_NAND_H
3670 +#define __MTK_NAND_H
3671 +
3672 +#define RALINK_NAND_CTRL_BASE         0xBE003000
3673 +#define RALINK_SYSCTL_BASE            0xBE000000
3674 +#define RALINK_NANDECC_CTRL_BASE      0xBE003800
3675 +/*******************************************************************************
3676 + * NFI Register Definition 
3677 + *******************************************************************************/
3678 +
3679 +#define NFI_CNFG_REG16         ((volatile P_U16)(NFI_BASE+0x0000))
3680 +#define NFI_PAGEFMT_REG16   ((volatile P_U16)(NFI_BASE+0x0004))
3681 +#define NFI_CON_REG16          ((volatile P_U16)(NFI_BASE+0x0008))
3682 +#define NFI_ACCCON_REG32       ((volatile P_U32)(NFI_BASE+0x000C))
3683 +#define NFI_INTR_EN_REG16   ((volatile P_U16)(NFI_BASE+0x0010))
3684 +#define NFI_INTR_REG16      ((volatile P_U16)(NFI_BASE+0x0014))
3685 +
3686 +#define NFI_CMD_REG16          ((volatile P_U16)(NFI_BASE+0x0020))
3687 +
3688 +#define NFI_ADDRNOB_REG16   ((volatile P_U16)(NFI_BASE+0x0030))
3689 +#define NFI_COLADDR_REG32      ((volatile P_U32)(NFI_BASE+0x0034))
3690 +#define NFI_ROWADDR_REG32      ((volatile P_U32)(NFI_BASE+0x0038))
3691 +
3692 +#define NFI_STRDATA_REG16   ((volatile P_U16)(NFI_BASE+0x0040))
3693 +
3694 +#define NFI_DATAW_REG32        ((volatile P_U32)(NFI_BASE+0x0050))
3695 +#define NFI_DATAR_REG32        ((volatile P_U32)(NFI_BASE+0x0054))
3696 +#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058))
3697 +
3698 +#define NFI_STA_REG32          ((volatile P_U32)(NFI_BASE+0x0060))
3699 +#define NFI_FIFOSTA_REG16   ((volatile P_U16)(NFI_BASE+0x0064))
3700 +#define NFI_LOCKSTA_REG16   ((volatile P_U16)(NFI_BASE+0x0068))
3701 +
3702 +#define NFI_ADDRCNTR_REG16  ((volatile P_U16)(NFI_BASE+0x0070))
3703 +
3704 +#define NFI_STRADDR_REG32      ((volatile P_U32)(NFI_BASE+0x0080))
3705 +#define NFI_BYTELEN_REG16   ((volatile P_U16)(NFI_BASE+0x0084))
3706 +
3707 +#define NFI_CSEL_REG16      ((volatile P_U16)(NFI_BASE+0x0090))
3708 +#define NFI_IOCON_REG16     ((volatile P_U16)(NFI_BASE+0x0094))
3709 +
3710 +#define NFI_FDM0L_REG32        ((volatile P_U32)(NFI_BASE+0x00A0))
3711 +#define NFI_FDM0M_REG32        ((volatile P_U32)(NFI_BASE+0x00A4))
3712 +
3713 +#define NFI_LOCK_REG16         ((volatile P_U16)(NFI_BASE+0x0100))
3714 +#define NFI_LOCKCON_REG32      ((volatile P_U32)(NFI_BASE+0x0104))
3715 +#define NFI_LOCKANOB_REG16  ((volatile P_U16)(NFI_BASE+0x0108))
3716 +#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110))
3717 +#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114))
3718 +#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118))
3719 +#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C))
3720 +#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120))
3721 +#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124))
3722 +#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128))
3723 +#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C))
3724 +#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130))
3725 +#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134))
3726 +#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138))
3727 +#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C))
3728 +#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140))
3729 +#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144))
3730 +#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148))
3731 +#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C))
3732 +#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150))
3733 +#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154))
3734 +#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158))
3735 +#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C))
3736 +#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160))
3737 +#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164))
3738 +#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168))
3739 +#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C))
3740 +#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170))
3741 +#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174))
3742 +#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178))
3743 +#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C))
3744 +#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180))
3745 +#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184))
3746 +#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188))
3747 +#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C))
3748 +
3749 +#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190))
3750 +#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194))
3751 +#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198))
3752 +#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C))
3753 +#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210))
3754 +
3755 +
3756 +/*******************************************************************************
3757 + * NFI Register Field Definition 
3758 + *******************************************************************************/
3759 +
3760 +/* NFI_CNFG */
3761 +#define CNFG_AHB             (0x0001)
3762 +#define CNFG_READ_EN         (0x0002)
3763 +#define CNFG_DMA_BURST_EN    (0x0004)
3764 +#define CNFG_BYTE_RW         (0x0040)
3765 +#define CNFG_HW_ECC_EN       (0x0100)
3766 +#define CNFG_AUTO_FMT_EN     (0x0200)
3767 +#define CNFG_OP_IDLE         (0x0000)
3768 +#define CNFG_OP_READ         (0x1000)
3769 +#define CNFG_OP_SRD          (0x2000)
3770 +#define CNFG_OP_PRGM         (0x3000)
3771 +#define CNFG_OP_ERASE        (0x4000)
3772 +#define CNFG_OP_RESET        (0x5000)
3773 +#define CNFG_OP_CUST         (0x6000)
3774 +#define CNFG_OP_MODE_MASK    (0x7000)
3775 +#define CNFG_OP_MODE_SHIFT   (12)
3776 +
3777 +/* NFI_PAGEFMT */
3778 +#define PAGEFMT_512          (0x0000)
3779 +#define PAGEFMT_2K           (0x0001)
3780 +#define PAGEFMT_4K           (0x0002)
3781 +
3782 +#define PAGEFMT_PAGE_MASK    (0x0003)
3783 +
3784 +#define PAGEFMT_DBYTE_EN     (0x0008)
3785 +
3786 +#define PAGEFMT_SPARE_16     (0x0000)
3787 +#define PAGEFMT_SPARE_26     (0x0001)
3788 +#define PAGEFMT_SPARE_27     (0x0002)
3789 +#define PAGEFMT_SPARE_28     (0x0003)
3790 +#define PAGEFMT_SPARE_MASK   (0x0030)
3791 +#define PAGEFMT_SPARE_SHIFT  (4)
3792 +
3793 +#define PAGEFMT_FDM_MASK     (0x0F00)
3794 +#define PAGEFMT_FDM_SHIFT    (8)
3795 +
3796 +#define PAGEFMT_FDM_ECC_MASK  (0xF000)
3797 +#define PAGEFMT_FDM_ECC_SHIFT (12)
3798 +
3799 +/* NFI_CON */
3800 +#define CON_FIFO_FLUSH       (0x0001)
3801 +#define CON_NFI_RST          (0x0002)
3802 +#define CON_NFI_SRD          (0x0010)
3803 +
3804 +#define CON_NFI_NOB_MASK     (0x0060)
3805 +#define CON_NFI_NOB_SHIFT    (5)
3806 +
3807 +#define CON_NFI_BRD          (0x0100)
3808 +#define CON_NFI_BWR          (0x0200)
3809 +
3810 +#define CON_NFI_SEC_MASK     (0xF000)
3811 +#define CON_NFI_SEC_SHIFT    (12)
3812 +
3813 +/* NFI_ACCCON */
3814 +#define ACCCON_SETTING       ()
3815 +
3816 +/* NFI_INTR_EN */
3817 +#define INTR_RD_DONE_EN      (0x0001)
3818 +#define INTR_WR_DONE_EN      (0x0002)
3819 +#define INTR_RST_DONE_EN     (0x0004)
3820 +#define INTR_ERASE_DONE_EN   (0x0008)
3821 +#define INTR_BSY_RTN_EN      (0x0010)
3822 +#define INTR_ACC_LOCK_EN     (0x0020)
3823 +#define INTR_AHB_DONE_EN     (0x0040)
3824 +#define INTR_ALL_INTR_DE     (0x0000)
3825 +#define INTR_ALL_INTR_EN     (0x007F)
3826 +
3827 +/* NFI_INTR */
3828 +#define INTR_RD_DONE         (0x0001)
3829 +#define INTR_WR_DONE         (0x0002)
3830 +#define INTR_RST_DONE        (0x0004)
3831 +#define INTR_ERASE_DONE      (0x0008)
3832 +#define INTR_BSY_RTN         (0x0010)
3833 +#define INTR_ACC_LOCK        (0x0020)
3834 +#define INTR_AHB_DONE        (0x0040)
3835 +
3836 +/* NFI_ADDRNOB */
3837 +#define ADDR_COL_NOB_MASK    (0x0003)
3838 +#define ADDR_COL_NOB_SHIFT   (0)
3839 +#define ADDR_ROW_NOB_MASK    (0x0030)
3840 +#define ADDR_ROW_NOB_SHIFT   (4)
3841 +
3842 +/* NFI_STA */
3843 +#define STA_READ_EMPTY       (0x00001000)
3844 +#define STA_ACC_LOCK         (0x00000010)
3845 +#define STA_CMD_STATE        (0x00000001)
3846 +#define STA_ADDR_STATE       (0x00000002)
3847 +#define STA_DATAR_STATE      (0x00000004)
3848 +#define STA_DATAW_STATE      (0x00000008)
3849 +
3850 +#define STA_NAND_FSM_MASK    (0x1F000000)
3851 +#define STA_NAND_BUSY        (0x00000100)
3852 +#define STA_NAND_BUSY_RETURN (0x00000200)
3853 +#define STA_NFI_FSM_MASK     (0x000F0000)
3854 +#define STA_NFI_OP_MASK      (0x0000000F)
3855 +
3856 +/* NFI_FIFOSTA */
3857 +#define FIFO_RD_EMPTY        (0x0040)
3858 +#define FIFO_RD_FULL         (0x0080)
3859 +#define FIFO_WR_FULL         (0x8000)
3860 +#define FIFO_WR_EMPTY        (0x4000)
3861 +#define FIFO_RD_REMAIN(x)    (0x1F&(x))
3862 +#define FIFO_WR_REMAIN(x)    ((0x1F00&(x))>>8)
3863 +
3864 +/* NFI_ADDRCNTR */
3865 +#define ADDRCNTR_CNTR(x)     ((0xF000&(x))>>12)
3866 +#define ADDRCNTR_OFFSET(x)   (0x03FF&(x))
3867 +
3868 +/* NFI_LOCK */
3869 +#define NFI_LOCK_ON          (0x0001)
3870 +
3871 +/* NFI_LOCKANOB */
3872 +#define PROG_RADD_NOB_MASK   (0x7000)
3873 +#define PROG_RADD_NOB_SHIFT  (12)
3874 +#define PROG_CADD_NOB_MASK   (0x0300)
3875 +#define PROG_CADD_NOB_SHIFT  (8)
3876 +#define ERASE_RADD_NOB_MASK   (0x0070)
3877 +#define ERASE_RADD_NOB_SHIFT  (4)
3878 +#define ERASE_CADD_NOB_MASK   (0x0007)
3879 +#define ERASE_CADD_NOB_SHIFT  (0)
3880 +
3881 +/*******************************************************************************
3882 + * ECC Register Definition 
3883 + *******************************************************************************/
3884 +
3885 +#define ECC_ENCCON_REG16       ((volatile P_U16)(NFIECC_BASE+0x0000))
3886 +#define ECC_ENCCNFG_REG32      ((volatile P_U32)(NFIECC_BASE+0x0004))
3887 +#define ECC_ENCDIADDR_REG32    ((volatile P_U32)(NFIECC_BASE+0x0008))
3888 +#define ECC_ENCIDLE_REG32      ((volatile P_U32)(NFIECC_BASE+0x000C))
3889 +#define ECC_ENCPAR0_REG32   ((volatile P_U32)(NFIECC_BASE+0x0010))
3890 +#define ECC_ENCPAR1_REG32   ((volatile P_U32)(NFIECC_BASE+0x0014))
3891 +#define ECC_ENCPAR2_REG32   ((volatile P_U32)(NFIECC_BASE+0x0018))
3892 +#define ECC_ENCPAR3_REG32   ((volatile P_U32)(NFIECC_BASE+0x001C))
3893 +#define ECC_ENCPAR4_REG32   ((volatile P_U32)(NFIECC_BASE+0x0020))
3894 +#define ECC_ENCSTA_REG32    ((volatile P_U32)(NFIECC_BASE+0x0024))
3895 +#define ECC_ENCIRQEN_REG16  ((volatile P_U16)(NFIECC_BASE+0x0028))
3896 +#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C))
3897 +
3898 +#define ECC_DECCON_REG16    ((volatile P_U16)(NFIECC_BASE+0x0100))
3899 +#define ECC_DECCNFG_REG32   ((volatile P_U32)(NFIECC_BASE+0x0104))
3900 +#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108))
3901 +#define ECC_DECIDLE_REG16   ((volatile P_U16)(NFIECC_BASE+0x010C))
3902 +#define ECC_DECFER_REG16    ((volatile P_U16)(NFIECC_BASE+0x0110))
3903 +#define ECC_DECENUM_REG32   ((volatile P_U32)(NFIECC_BASE+0x0114))
3904 +#define ECC_DECDONE_REG16   ((volatile P_U16)(NFIECC_BASE+0x0118))
3905 +#define ECC_DECEL0_REG32    ((volatile P_U32)(NFIECC_BASE+0x011C))
3906 +#define ECC_DECEL1_REG32    ((volatile P_U32)(NFIECC_BASE+0x0120))
3907 +#define ECC_DECEL2_REG32    ((volatile P_U32)(NFIECC_BASE+0x0124))
3908 +#define ECC_DECEL3_REG32    ((volatile P_U32)(NFIECC_BASE+0x0128))
3909 +#define ECC_DECEL4_REG32    ((volatile P_U32)(NFIECC_BASE+0x012C))
3910 +#define ECC_DECEL5_REG32    ((volatile P_U32)(NFIECC_BASE+0x0130))
3911 +#define ECC_DECIRQEN_REG16  ((volatile P_U16)(NFIECC_BASE+0x0134))
3912 +#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138))
3913 +#define ECC_FDMADDR_REG32   ((volatile P_U32)(NFIECC_BASE+0x013C))
3914 +#define ECC_DECFSM_REG32    ((volatile P_U32)(NFIECC_BASE+0x0140))
3915 +#define ECC_SYNSTA_REG32    ((volatile P_U32)(NFIECC_BASE+0x0144))
3916 +#define ECC_DECNFIDI_REG32  ((volatile P_U32)(NFIECC_BASE+0x0148))
3917 +#define ECC_SYN0_REG32      ((volatile P_U32)(NFIECC_BASE+0x014C))
3918 +
3919 +/*******************************************************************************
3920 + * ECC register definition
3921 + *******************************************************************************/
3922 +/* ECC_ENCON */
3923 +#define ENC_EN                         (0x0001)
3924 +#define ENC_DE                         (0x0000)
3925 +
3926 +/* ECC_ENCCNFG */
3927 +#define ECC_CNFG_ECC4                  (0x0000)
3928 +#define ECC_CNFG_ECC6                  (0x0001)
3929 +#define ECC_CNFG_ECC8                  (0x0002)
3930 +#define ECC_CNFG_ECC10                 (0x0003)
3931 +#define ECC_CNFG_ECC12                 (0x0004)
3932 +#define ECC_CNFG_ECC_MASK              (0x00000007)
3933 +
3934 +#define ENC_CNFG_NFI                   (0x0010)
3935 +#define ENC_CNFG_MODE_MASK             (0x0010)
3936 +
3937 +#define ENC_CNFG_META6                 (0x10300000)
3938 +#define ENC_CNFG_META8                 (0x10400000)
3939 +
3940 +#define ENC_CNFG_MSG_MASK              (0x1FFF0000)
3941 +#define ENC_CNFG_MSG_SHIFT             (0x10)
3942 +
3943 +/* ECC_ENCIDLE */
3944 +#define ENC_IDLE                       (0x0001)
3945 +
3946 +/* ECC_ENCSTA */
3947 +#define STA_FSM                        (0x001F)
3948 +#define STA_COUNT_PS                   (0xFF10)
3949 +#define STA_COUNT_MS                   (0x3FFF0000)
3950 +
3951 +/* ECC_ENCIRQEN */
3952 +#define ENC_IRQEN                      (0x0001)
3953 +
3954 +/* ECC_ENCIRQSTA */
3955 +#define ENC_IRQSTA                     (0x0001)
3956 +
3957 +/* ECC_DECCON */
3958 +#define DEC_EN                         (0x0001)
3959 +#define DEC_DE                         (0x0000)
3960 +
3961 +/* ECC_ENCCNFG */
3962 +#define DEC_CNFG_ECC4          (0x0000)
3963 +//#define DEC_CNFG_ECC6          (0x0001)
3964 +//#define DEC_CNFG_ECC12         (0x0002)
3965 +#define DEC_CNFG_NFI           (0x0010)
3966 +//#define DEC_CNFG_META6         (0x10300000)
3967 +//#define DEC_CNFG_META8         (0x10400000)
3968 +
3969 +#define DEC_CNFG_FER           (0x01000)
3970 +#define DEC_CNFG_EL            (0x02000)
3971 +#define DEC_CNFG_CORRECT       (0x03000)
3972 +#define DEC_CNFG_TYPE_MASK     (0x03000)
3973 +
3974 +#define DEC_CNFG_EMPTY_EN      (0x80000000)
3975 +
3976 +#define DEC_CNFG_CODE_MASK     (0x1FFF0000)
3977 +#define DEC_CNFG_CODE_SHIFT    (0x10)
3978 +
3979 +/* ECC_DECIDLE */
3980 +#define DEC_IDLE                       (0x0001)
3981 +
3982 +/* ECC_DECFER */
3983 +#define DEC_FER0               (0x0001)
3984 +#define DEC_FER1               (0x0002)
3985 +#define DEC_FER2               (0x0004)
3986 +#define DEC_FER3               (0x0008)
3987 +#define DEC_FER4               (0x0010)
3988 +#define DEC_FER5               (0x0020)
3989 +#define DEC_FER6               (0x0040)
3990 +#define DEC_FER7               (0x0080)
3991 +
3992 +/* ECC_DECENUM */
3993 +#define ERR_NUM0               (0x0000000F)
3994 +#define ERR_NUM1               (0x000000F0)
3995 +#define ERR_NUM2               (0x00000F00)
3996 +#define ERR_NUM3               (0x0000F000)
3997 +#define ERR_NUM4               (0x000F0000)
3998 +#define ERR_NUM5               (0x00F00000)
3999 +#define ERR_NUM6               (0x0F000000)
4000 +#define ERR_NUM7               (0xF0000000)
4001 +
4002 +/* ECC_DECDONE */
4003 +#define DEC_DONE0               (0x0001)
4004 +#define DEC_DONE1               (0x0002)
4005 +#define DEC_DONE2               (0x0004)
4006 +#define DEC_DONE3               (0x0008)
4007 +#define DEC_DONE4               (0x0010)
4008 +#define DEC_DONE5               (0x0020)
4009 +#define DEC_DONE6               (0x0040)
4010 +#define DEC_DONE7               (0x0080)
4011 +
4012 +/* ECC_DECIRQEN */
4013 +#define DEC_IRQEN                      (0x0001)
4014 +
4015 +/* ECC_DECIRQSTA */
4016 +#define DEC_IRQSTA                     (0x0001)
4017 +
4018 +#define CHIPVER_ECO_1           (0x8a00)
4019 +#define CHIPVER_ECO_2           (0x8a01)
4020 +
4021 +//#define NAND_PFM
4022 +
4023 +/*******************************************************************************
4024 + * Data Structure Definition
4025 + *******************************************************************************/
4026 +struct mtk_nand_host 
4027 +{
4028 +       struct nand_chip                nand_chip;
4029 +       struct mtd_info                 *mtd;
4030 +       struct mtk_nand_host_hw *hw;
4031 +};
4032 +
4033 +struct NAND_CMD
4034 +{
4035 +       u32     u4ColAddr;
4036 +       u32 u4RowAddr;
4037 +       u32 u4OOBRowAddr;
4038 +       u8      au1OOB[288];
4039 +       u8*     pDataBuf;
4040 +#ifdef NAND_PFM        
4041 +       u32 pureReadOOB;
4042 +       u32 pureReadOOBNum;
4043 +#endif
4044 +};
4045 +
4046 +/*
4047 + *     ECC layout control structure. Exported to userspace for
4048 + *  diagnosis and to allow creation of raw images
4049 +struct nand_ecclayout {
4050 +       uint32_t eccbytes;
4051 +       uint32_t eccpos[64];
4052 +       uint32_t oobavail;
4053 +       struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES];
4054 +};
4055 +*/
4056 +#define __DEBUG_NAND           1                       /* Debug information on/off */
4057 +
4058 +/* Debug message event */
4059 +#define DBG_EVT_NONE           0x00000000      /* No event */
4060 +#define DBG_EVT_INIT           0x00000001      /* Initial related event */
4061 +#define DBG_EVT_VERIFY         0x00000002      /* Verify buffer related event */
4062 +#define DBG_EVT_PERFORMANCE    0x00000004      /* Performance related event */
4063 +#define DBG_EVT_READ           0x00000008      /* Read related event */
4064 +#define DBG_EVT_WRITE          0x00000010      /* Write related event */
4065 +#define DBG_EVT_ERASE          0x00000020      /* Erase related event */
4066 +#define DBG_EVT_BADBLOCK       0x00000040      /* Badblock related event */
4067 +#define DBG_EVT_POWERCTL       0x00000080      /* Suspend/Resume related event */
4068 +
4069 +#define DBG_EVT_ALL                    0xffffffff
4070 +
4071 +#define DBG_EVT_MASK           (DBG_EVT_INIT)
4072 +
4073 +#if __DEBUG_NAND
4074 +#define MSG(evt, fmt, args...) \
4075 +do {   \
4076 +       if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \
4077 +               printk(fmt, ##args); \
4078 +       } \
4079 +} while(0)
4080 +
4081 +#define MSG_FUNC_ENTRY(f)      MSG(FUC, "<FUN_ENT>: %s\n", __FUNCTION__)
4082 +#else
4083 +#define MSG(evt, fmt, args...) do{}while(0)
4084 +#define MSG_FUNC_ENTRY(f)         do{}while(0)
4085 +#endif
4086 +
4087 +#define RAMDOM_READ 1<<0
4088 +#define CACHE_READ  1<<1
4089 +
4090 +typedef struct
4091 +{
4092 +   u16 id;          //deviceid+menuid
4093 +   u32 ext_id; 
4094 +   u8  addr_cycle;
4095 +   u8  iowidth;
4096 +   u16 totalsize;   
4097 +   u16 blocksize;
4098 +   u16 pagesize;
4099 +   u16 sparesize;
4100 +   u32 timmingsetting;
4101 +   char devciename[14];
4102 +   u32 advancedmode;   //
4103 +}flashdev_info,*pflashdev_info;
4104 +
4105 +/* NAND driver */
4106 +#if 0
4107 +struct mtk_nand_host_hw {
4108 +    unsigned int nfi_bus_width;                    /* NFI_BUS_WIDTH */ 
4109 +       unsigned int nfi_access_timing;         /* NFI_ACCESS_TIMING */  
4110 +       unsigned int nfi_cs_num;                        /* NFI_CS_NUM */
4111 +       unsigned int nand_sec_size;                     /* NAND_SECTOR_SIZE */
4112 +       unsigned int nand_sec_shift;            /* NAND_SECTOR_SHIFT */
4113 +       unsigned int nand_ecc_size;
4114 +       unsigned int nand_ecc_bytes;
4115 +       unsigned int nand_ecc_mode;
4116 +};
4117 +extern struct mtk_nand_host_hw mt7621_nand_hw;
4118 +extern u32     CFG_BLOCKSIZE;
4119 +#endif
4120 +#endif
4121 --- a/drivers/mtd/nand/nand_base.c
4122 +++ b/drivers/mtd/nand/nand_base.c
4123 @@ -48,7 +48,7 @@
4124  #include <linux/mtd/partitions.h>
4125  #include <linux/of.h>
4126  
4127 -static int nand_get_device(struct mtd_info *mtd, int new_state);
4128 +int nand_get_device(struct mtd_info *mtd, int new_state);
4129  
4130  static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
4131                              struct mtd_oob_ops *ops);
4132 @@ -240,7 +240,7 @@ static int check_offs_len(struct mtd_inf
4133   *
4134   * Release chip lock and wake up anyone waiting on the device.
4135   */
4136 -static void nand_release_device(struct mtd_info *mtd)
4137 +void nand_release_device(struct mtd_info *mtd)
4138  {
4139         struct nand_chip *chip = mtd_to_nand(mtd);
4140  
4141 @@ -968,7 +968,7 @@ static void panic_nand_get_device(struct
4142   *
4143   * Get the device and lock it for exclusive access
4144   */
4145 -static int
4146 +int
4147  nand_get_device(struct mtd_info *mtd, int new_state)
4148  {
4149         struct nand_chip *chip = mtd_to_nand(mtd);
4150 --- a/drivers/mtd/nand/nand_bbt.c
4151 +++ b/drivers/mtd/nand/nand_bbt.c
4152 @@ -1215,6 +1215,25 @@ err:
4153         return res;
4154  }
4155  
4156 +void nand_bbt_set(struct mtd_info *mtd, int page, int flag)
4157 +{
4158 +       struct nand_chip *this = mtd->priv;
4159 +       int block;
4160 +
4161 +       block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
4162 +       this->bbt[block >> 3] &= ~(0x03 << (block & 0x6));
4163 +       this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6);
4164 +}
4165 +
4166 +int nand_bbt_get(struct mtd_info *mtd, int page)
4167 +{
4168 +       struct nand_chip *this = mtd->priv;
4169 +       int block;
4170 +
4171 +       block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
4172 +       return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
4173 +}
4174 +
4175  /**
4176   * nand_update_bbt - update bad block table(s)
4177   * @mtd: MTD device structure
4178 --- /dev/null
4179 +++ b/drivers/mtd/nand/nand_def.h
4180 @@ -0,0 +1,123 @@
4181 +#ifndef __NAND_DEF_H__
4182 +#define __NAND_DEF_H__
4183 +
4184 +#define VERSION        "v2.1 Fix AHB virt2phys error"
4185 +#define MODULE_NAME    "# MTK NAND #"
4186 +#define PROCNAME    "driver/nand"
4187 +
4188 +#undef TESTTIME
4189 +//#define __UBOOT_NAND__                       1
4190 +#define __KERNEL_NAND__                1
4191 +//#define __PRELOADER_NAND__   1
4192 +//#define PMT 1
4193 +//#define _MTK_NAND_DUMMY_DRIVER
4194 +//#define CONFIG_BADBLOCK_CHECK        1
4195 +//#ifdef CONFIG_BADBLOCK_CHECK
4196 +//#define MTK_NAND_BMT 1
4197 +//#endif
4198 +#define ECC_ENABLE             1
4199 +#define MANUAL_CORRECT 1
4200 +//#define __INTERNAL_USE_AHB_MODE__    (0)
4201 +#define SKIP_BAD_BLOCK
4202 +#define FACT_BBT
4203 +
4204 +#ifndef NAND_OTP_SUPPORT
4205 +#define NAND_OTP_SUPPORT 0
4206 +#endif
4207 +
4208 +/*******************************************************************************
4209 + * Macro definition 
4210 + *******************************************************************************/
4211 +//#define NFI_SET_REG32(reg, value)   (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value))) 
4212 +//#define NFI_SET_REG16(reg, value)   (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value)))
4213 +//#define NFI_CLN_REG32(reg, value)   (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value))))
4214 +//#define NFI_CLN_REG16(reg, value)   (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value))))
4215 +
4216 +#if defined (__KERNEL_NAND__)
4217 +#define NFI_SET_REG32(reg, value) \
4218 +do {   \
4219 +       g_value = (DRV_Reg32(reg) | (value));\
4220 +       DRV_WriteReg32(reg, g_value); \
4221 +} while(0)
4222 +
4223 +#define NFI_SET_REG16(reg, value) \
4224 +do {   \
4225 +       g_value = (DRV_Reg16(reg) | (value));\
4226 +       DRV_WriteReg16(reg, g_value); \
4227 +} while(0)
4228 +
4229 +#define NFI_CLN_REG32(reg, value) \
4230 +do {   \
4231 +       g_value = (DRV_Reg32(reg) & (~(value)));\
4232 +       DRV_WriteReg32(reg, g_value); \
4233 +} while(0)
4234 +
4235 +#define NFI_CLN_REG16(reg, value) \
4236 +do {   \
4237 +       g_value = (DRV_Reg16(reg) & (~(value)));\
4238 +       DRV_WriteReg16(reg, g_value); \
4239 +} while(0)
4240 +#endif
4241 +
4242 +#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
4243 +#define NFI_WAIT_TO_READY()  do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
4244 +
4245 +
4246 +#define NAND_SECTOR_SIZE (512)
4247 +#define OOB_PER_SECTOR      (16)
4248 +#define OOB_AVAI_PER_SECTOR (8)
4249 +
4250 +#ifndef PART_SIZE_BMTPOOL
4251 +#define BMT_POOL_SIZE       (80)
4252 +#else
4253 +#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
4254 +#endif
4255 +
4256 +#define PMT_POOL_SIZE  (2)
4257 +
4258 +#define TIMEOUT_1   0x1fff
4259 +#define TIMEOUT_2   0x8ff
4260 +#define TIMEOUT_3   0xffff
4261 +#define TIMEOUT_4   0xffff//5000   //PIO
4262 +
4263 +
4264 +/* temporarity definiation */
4265 +#if !defined (__KERNEL_NAND__) 
4266 +#define KERN_INFO
4267 +#define KERN_WARNING
4268 +#define KERN_ERR
4269 +#define PAGE_SIZE      (4096)
4270 +#endif
4271 +#define AddStorageTrace                                //AddStorageTrace
4272 +#define STORAGE_LOGGER_MSG_NAND                0
4273 +#define NFI_BASE                                       RALINK_NAND_CTRL_BASE
4274 +#define NFIECC_BASE                            RALINK_NANDECC_CTRL_BASE
4275 +
4276 +#ifdef __INTERNAL_USE_AHB_MODE__
4277 +#define MT65xx_POLARITY_LOW   0
4278 +#define MT65XX_PDN_PERI_NFI   0
4279 +#define MT65xx_EDGE_SENSITIVE 0
4280 +#define MT6575_NFI_IRQ_ID                    (58)
4281 +#endif
4282 +
4283 +#if defined (__KERNEL_NAND__)
4284 +#define RALINK_REG(x)          (*((volatile u32 *)(x)))        
4285 +#define __virt_to_phys(x)      virt_to_phys((volatile void*)x)
4286 +#else
4287 +#define CONFIG_MTD_NAND_VERIFY_WRITE   (1)
4288 +#define printk printf
4289 +#define ra_dbg printf
4290 +#define BUG()                                                  //BUG()
4291 +#define BUG_ON(x)                                              //BUG_ON()
4292 +#define NUM_PARTITIONS                                 1
4293 +#endif
4294 +
4295 +#define NFI_DEFAULT_ACCESS_TIMING        (0x30C77fff)  //(0x44333)
4296 +
4297 +//uboot only support 1 cs
4298 +#define NFI_CS_NUM                  (1)
4299 +#define NFI_DEFAULT_CS              (0)
4300 +
4301 +#include "mt6575_typedefs.h"
4302 +
4303 +#endif /* __NAND_DEF_H__ */
4304 --- /dev/null
4305 +++ b/drivers/mtd/nand/nand_device_list.h
4306 @@ -0,0 +1,56 @@
4307 +/* Copyright Statement:
4308 + *
4309 + * This software/firmware and related documentation ("MediaTek Software") are
4310 + * protected under relevant copyright laws. The information contained herein
4311 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
4312 + * Without the prior written permission of MediaTek inc. and/or its licensors,
4313 + * any reproduction, modification, use or disclosure of MediaTek Software,
4314 + * and information contained herein, in whole or in part, shall be strictly prohibited.
4315 + */
4316 +/* MediaTek Inc. (C) 2010. All rights reserved.
4317 + *
4318 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
4319 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
4320 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
4321 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
4322 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
4323 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
4324 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
4325 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
4326 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
4327 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
4328 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
4329 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
4330 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
4331 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
4332 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
4333 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
4334 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
4335 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
4336 + *
4337 + * The following software/firmware and/or related documentation ("MediaTek Software")
4338 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
4339 + * applicable license agreements with MediaTek Inc.
4340 + */
4341 +
4342 +#ifndef __NAND_DEVICE_LIST_H__
4343 +#define __NAND_DEVICE_LIST_H__
4344 +
4345 +static const flashdev_info gen_FlashTable[]={
4346 +       {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0},
4347 +       {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0},
4348 +       {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0},
4349 +       {0x2CDA, 0x909506, 5, 8,  256, 128, 2048, 64, 0x30C77fff, "MT29F2G08ABAE", 0},
4350 +       {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0},
4351 +    {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0},
4352 +    {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0},
4353 +       {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0},
4354 +    {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0},
4355 +    {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0},
4356 +    {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0},
4357 +    {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0},
4358 +       {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0},
4359 +};
4360 +
4361 +
4362 +#endif
4363 --- /dev/null
4364 +++ b/drivers/mtd/nand/partition.h
4365 @@ -0,0 +1,115 @@
4366 +/* Copyright Statement:
4367 + *
4368 + * This software/firmware and related documentation ("MediaTek Software") are
4369 + * protected under relevant copyright laws. The information contained herein
4370 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
4371 + * Without the prior written permission of MediaTek inc. and/or its licensors,
4372 + * any reproduction, modification, use or disclosure of MediaTek Software,
4373 + * and information contained herein, in whole or in part, shall be strictly prohibited.
4374 + */
4375 +/* MediaTek Inc. (C) 2010. All rights reserved.
4376 + *
4377 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
4378 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
4379 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
4380 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
4381 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
4382 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
4383 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
4384 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
4385 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
4386 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
4387 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
4388 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
4389 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
4390 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
4391 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
4392 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
4393 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
4394 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
4395 + *
4396 + * The following software/firmware and/or related documentation ("MediaTek Software")
4397 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
4398 + * applicable license agreements with MediaTek Inc.
4399 + */
4400 +
4401 +#include <linux/mtd/mtd.h>
4402 +#include <linux/mtd/rawnand.h>
4403 +#include <linux/mtd/partitions.h>
4404 +
4405 +#define RECONFIG_PARTITION_SIZE 1
4406 +
4407 +#define MTD_BOOT_PART_SIZE  0x80000
4408 +#define MTD_CONFIG_PART_SIZE    0x20000
4409 +#define MTD_FACTORY_PART_SIZE   0x20000
4410 +
4411 +extern unsigned int  CFG_BLOCKSIZE;
4412 +#define LARGE_MTD_BOOT_PART_SIZE       (CFG_BLOCKSIZE<<2)
4413 +#define LARGE_MTD_CONFIG_PART_SIZE     (CFG_BLOCKSIZE<<2)
4414 +#define LARGE_MTD_FACTORY_PART_SIZE    (CFG_BLOCKSIZE<<1)
4415 +
4416 +/*=======================================================================*/
4417 +/* NAND PARTITION Mapping                                                  */
4418 +/*=======================================================================*/
4419 +//#ifdef CONFIG_MTD_PARTITIONS
4420 +static struct mtd_partition g_pasStatic_Partition[] = {
4421 +       {
4422 +                name:           "ALL",
4423 +                size:           MTDPART_SIZ_FULL,
4424 +                offset:         0,
4425 +        },
4426 +        /* Put your own partition definitions here */
4427 +        {
4428 +                name:           "Bootloader",
4429 +                size:           MTD_BOOT_PART_SIZE,
4430 +                offset:         0,
4431 +        }, {
4432 +                name:           "Config",
4433 +                size:           MTD_CONFIG_PART_SIZE,
4434 +                offset:         MTDPART_OFS_APPEND
4435 +        }, {
4436 +                name:           "Factory",
4437 +                size:           MTD_FACTORY_PART_SIZE,
4438 +                offset:         MTDPART_OFS_APPEND
4439 +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
4440 +        }, {
4441 +                name:           "Kernel",
4442 +                size:           MTD_KERN_PART_SIZE,
4443 +                offset:         MTDPART_OFS_APPEND,
4444 +        }, {
4445 +                name:           "RootFS",
4446 +                size:           MTD_ROOTFS_PART_SIZE,
4447 +                offset:         MTDPART_OFS_APPEND,
4448 +#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING
4449 +        }, {
4450 +                name:           "Kernel_RootFS",
4451 +                size:           MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE,
4452 +                offset:         MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE,
4453 +#endif
4454 +#else //CONFIG_RT2880_ROOTFS_IN_RAM
4455 +        }, {
4456 +                name:           "Kernel",
4457 +                size:           0x10000,
4458 +                offset:         MTDPART_OFS_APPEND,
4459 +#endif
4460 +#ifdef CONFIG_DUAL_IMAGE
4461 +        }, {
4462 +                name:           "Kernel2",
4463 +                size:           MTD_KERN2_PART_SIZE,
4464 +                offset:         MTD_KERN2_PART_OFFSET,
4465 +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
4466 +        }, {
4467 +                name:           "RootFS2",
4468 +                size:           MTD_ROOTFS2_PART_SIZE,
4469 +                offset:         MTD_ROOTFS2_PART_OFFSET,
4470 +#endif
4471 +#endif
4472 +        }
4473 +
4474 +};
4475 +
4476 +#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition)
4477 +extern int part_num;   // = NUM_PARTITIONS;
4478 +//#endif
4479 +#undef RECONFIG_PARTITION_SIZE
4480 +