Merge git://git.denx.de/u-boot-samsung
[oweals/u-boot.git] / drivers / mtd / mtdconcat.c
1 /*
2  * MTD device concatenation layer
3  *
4  * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5  * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
6  *
7  * NAND support by Christian Gan <cgan@iders.ca>
8  *
9  * SPDX-License-Identifier:     GPL-2.0+
10  *
11  */
12
13 #ifndef __UBOOT__
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/types.h>
19 #include <linux/backing-dev.h>
20 #include <asm/div64.h>
21 #else
22 #include <div64.h>
23 #include <linux/compat.h>
24 #endif
25
26 #include <linux/mtd/mtd.h>
27 #include <linux/mtd/concat.h>
28
29 #include <ubi_uboot.h>
30
31 /*
32  * Our storage structure:
33  * Subdev points to an array of pointers to struct mtd_info objects
34  * which is allocated along with this structure
35  *
36  */
37 struct mtd_concat {
38         struct mtd_info mtd;
39         int num_subdev;
40         struct mtd_info **subdev;
41 };
42
43 /*
44  * how to calculate the size required for the above structure,
45  * including the pointer array subdev points to:
46  */
47 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)    \
48         ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
49
50 /*
51  * Given a pointer to the MTD object in the mtd_concat structure,
52  * we can retrieve the pointer to that structure with this macro.
53  */
54 #define CONCAT(x)  ((struct mtd_concat *)(x))
55
56 /*
57  * MTD methods which look up the relevant subdevice, translate the
58  * effective address and pass through to the subdevice.
59  */
60
61 static int
62 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
63             size_t * retlen, u_char * buf)
64 {
65         struct mtd_concat *concat = CONCAT(mtd);
66         int ret = 0, err;
67         int i;
68
69 #ifdef __UBOOT__
70         *retlen = 0;
71 #endif
72
73         for (i = 0; i < concat->num_subdev; i++) {
74                 struct mtd_info *subdev = concat->subdev[i];
75                 size_t size, retsize;
76
77                 if (from >= subdev->size) {
78                         /* Not destined for this subdev */
79                         size = 0;
80                         from -= subdev->size;
81                         continue;
82                 }
83                 if (from + len > subdev->size)
84                         /* First part goes into this subdev */
85                         size = subdev->size - from;
86                 else
87                         /* Entire transaction goes into this subdev */
88                         size = len;
89
90                 err = mtd_read(subdev, from, size, &retsize, buf);
91
92                 /* Save information about bitflips! */
93                 if (unlikely(err)) {
94                         if (mtd_is_eccerr(err)) {
95                                 mtd->ecc_stats.failed++;
96                                 ret = err;
97                         } else if (mtd_is_bitflip(err)) {
98                                 mtd->ecc_stats.corrected++;
99                                 /* Do not overwrite -EBADMSG !! */
100                                 if (!ret)
101                                         ret = err;
102                         } else
103                                 return err;
104                 }
105
106                 *retlen += retsize;
107                 len -= size;
108                 if (len == 0)
109                         return ret;
110
111                 buf += size;
112                 from = 0;
113         }
114         return -EINVAL;
115 }
116
117 static int
118 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
119              size_t * retlen, const u_char * buf)
120 {
121         struct mtd_concat *concat = CONCAT(mtd);
122         int err = -EINVAL;
123         int i;
124
125 #ifdef __UBOOT__
126         *retlen = 0;
127 #endif
128
129         for (i = 0; i < concat->num_subdev; i++) {
130                 struct mtd_info *subdev = concat->subdev[i];
131                 size_t size, retsize;
132
133                 if (to >= subdev->size) {
134                         size = 0;
135                         to -= subdev->size;
136                         continue;
137                 }
138                 if (to + len > subdev->size)
139                         size = subdev->size - to;
140                 else
141                         size = len;
142
143                 err = mtd_write(subdev, to, size, &retsize, buf);
144                 if (err)
145                         break;
146
147                 *retlen += retsize;
148                 len -= size;
149                 if (len == 0)
150                         break;
151
152                 err = -EINVAL;
153                 buf += size;
154                 to = 0;
155         }
156         return err;
157 }
158
159 #ifndef __UBOOT__
160 static int
161 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
162                 unsigned long count, loff_t to, size_t * retlen)
163 {
164         struct mtd_concat *concat = CONCAT(mtd);
165         struct kvec *vecs_copy;
166         unsigned long entry_low, entry_high;
167         size_t total_len = 0;
168         int i;
169         int err = -EINVAL;
170
171         /* Calculate total length of data */
172         for (i = 0; i < count; i++)
173                 total_len += vecs[i].iov_len;
174
175         /* Check alignment */
176         if (mtd->writesize > 1) {
177                 uint64_t __to = to;
178                 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
179                         return -EINVAL;
180         }
181
182         /* make a copy of vecs */
183         vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
184         if (!vecs_copy)
185                 return -ENOMEM;
186
187         entry_low = 0;
188         for (i = 0; i < concat->num_subdev; i++) {
189                 struct mtd_info *subdev = concat->subdev[i];
190                 size_t size, wsize, retsize, old_iov_len;
191
192                 if (to >= subdev->size) {
193                         to -= subdev->size;
194                         continue;
195                 }
196
197                 size = min_t(uint64_t, total_len, subdev->size - to);
198                 wsize = size; /* store for future use */
199
200                 entry_high = entry_low;
201                 while (entry_high < count) {
202                         if (size <= vecs_copy[entry_high].iov_len)
203                                 break;
204                         size -= vecs_copy[entry_high++].iov_len;
205                 }
206
207                 old_iov_len = vecs_copy[entry_high].iov_len;
208                 vecs_copy[entry_high].iov_len = size;
209
210                 err = mtd_writev(subdev, &vecs_copy[entry_low],
211                                  entry_high - entry_low + 1, to, &retsize);
212
213                 vecs_copy[entry_high].iov_len = old_iov_len - size;
214                 vecs_copy[entry_high].iov_base += size;
215
216                 entry_low = entry_high;
217
218                 if (err)
219                         break;
220
221                 *retlen += retsize;
222                 total_len -= wsize;
223
224                 if (total_len == 0)
225                         break;
226
227                 err = -EINVAL;
228                 to = 0;
229         }
230
231         kfree(vecs_copy);
232         return err;
233 }
234 #endif
235
236 static int
237 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
238 {
239         struct mtd_concat *concat = CONCAT(mtd);
240         struct mtd_oob_ops devops = *ops;
241         int i, err, ret = 0;
242
243         ops->retlen = ops->oobretlen = 0;
244
245         for (i = 0; i < concat->num_subdev; i++) {
246                 struct mtd_info *subdev = concat->subdev[i];
247
248                 if (from >= subdev->size) {
249                         from -= subdev->size;
250                         continue;
251                 }
252
253                 /* partial read ? */
254                 if (from + devops.len > subdev->size)
255                         devops.len = subdev->size - from;
256
257                 err = mtd_read_oob(subdev, from, &devops);
258                 ops->retlen += devops.retlen;
259                 ops->oobretlen += devops.oobretlen;
260
261                 /* Save information about bitflips! */
262                 if (unlikely(err)) {
263                         if (mtd_is_eccerr(err)) {
264                                 mtd->ecc_stats.failed++;
265                                 ret = err;
266                         } else if (mtd_is_bitflip(err)) {
267                                 mtd->ecc_stats.corrected++;
268                                 /* Do not overwrite -EBADMSG !! */
269                                 if (!ret)
270                                         ret = err;
271                         } else
272                                 return err;
273                 }
274
275                 if (devops.datbuf) {
276                         devops.len = ops->len - ops->retlen;
277                         if (!devops.len)
278                                 return ret;
279                         devops.datbuf += devops.retlen;
280                 }
281                 if (devops.oobbuf) {
282                         devops.ooblen = ops->ooblen - ops->oobretlen;
283                         if (!devops.ooblen)
284                                 return ret;
285                         devops.oobbuf += ops->oobretlen;
286                 }
287
288                 from = 0;
289         }
290         return -EINVAL;
291 }
292
293 static int
294 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
295 {
296         struct mtd_concat *concat = CONCAT(mtd);
297         struct mtd_oob_ops devops = *ops;
298         int i, err;
299
300         if (!(mtd->flags & MTD_WRITEABLE))
301                 return -EROFS;
302
303         ops->retlen = ops->oobretlen = 0;
304
305         for (i = 0; i < concat->num_subdev; i++) {
306                 struct mtd_info *subdev = concat->subdev[i];
307
308                 if (to >= subdev->size) {
309                         to -= subdev->size;
310                         continue;
311                 }
312
313                 /* partial write ? */
314                 if (to + devops.len > subdev->size)
315                         devops.len = subdev->size - to;
316
317                 err = mtd_write_oob(subdev, to, &devops);
318                 ops->retlen += devops.oobretlen;
319                 if (err)
320                         return err;
321
322                 if (devops.datbuf) {
323                         devops.len = ops->len - ops->retlen;
324                         if (!devops.len)
325                                 return 0;
326                         devops.datbuf += devops.retlen;
327                 }
328                 if (devops.oobbuf) {
329                         devops.ooblen = ops->ooblen - ops->oobretlen;
330                         if (!devops.ooblen)
331                                 return 0;
332                         devops.oobbuf += devops.oobretlen;
333                 }
334                 to = 0;
335         }
336         return -EINVAL;
337 }
338
339 static void concat_erase_callback(struct erase_info *instr)
340 {
341         /* Nothing to do here in U-Boot */
342 #ifndef __UBOOT__
343         wake_up((wait_queue_head_t *) instr->priv);
344 #endif
345 }
346
347 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
348 {
349         int err;
350         wait_queue_head_t waitq;
351         DECLARE_WAITQUEUE(wait, current);
352
353         /*
354          * This code was stol^H^H^H^Hinspired by mtdchar.c
355          */
356         init_waitqueue_head(&waitq);
357
358         erase->mtd = mtd;
359         erase->callback = concat_erase_callback;
360         erase->priv = (unsigned long) &waitq;
361
362         /*
363          * FIXME: Allow INTERRUPTIBLE. Which means
364          * not having the wait_queue head on the stack.
365          */
366         err = mtd_erase(mtd, erase);
367         if (!err) {
368                 set_current_state(TASK_UNINTERRUPTIBLE);
369                 add_wait_queue(&waitq, &wait);
370                 if (erase->state != MTD_ERASE_DONE
371                     && erase->state != MTD_ERASE_FAILED)
372                         schedule();
373                 remove_wait_queue(&waitq, &wait);
374                 set_current_state(TASK_RUNNING);
375
376                 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
377         }
378         return err;
379 }
380
381 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
382 {
383         struct mtd_concat *concat = CONCAT(mtd);
384         struct mtd_info *subdev;
385         int i, err;
386         uint64_t length, offset = 0;
387         struct erase_info *erase;
388
389         /*
390          * Check for proper erase block alignment of the to-be-erased area.
391          * It is easier to do this based on the super device's erase
392          * region info rather than looking at each particular sub-device
393          * in turn.
394          */
395         if (!concat->mtd.numeraseregions) {
396                 /* the easy case: device has uniform erase block size */
397                 if (instr->addr & (concat->mtd.erasesize - 1))
398                         return -EINVAL;
399                 if (instr->len & (concat->mtd.erasesize - 1))
400                         return -EINVAL;
401         } else {
402                 /* device has variable erase size */
403                 struct mtd_erase_region_info *erase_regions =
404                     concat->mtd.eraseregions;
405
406                 /*
407                  * Find the erase region where the to-be-erased area begins:
408                  */
409                 for (i = 0; i < concat->mtd.numeraseregions &&
410                      instr->addr >= erase_regions[i].offset; i++) ;
411                 --i;
412
413                 /*
414                  * Now erase_regions[i] is the region in which the
415                  * to-be-erased area begins. Verify that the starting
416                  * offset is aligned to this region's erase size:
417                  */
418                 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
419                         return -EINVAL;
420
421                 /*
422                  * now find the erase region where the to-be-erased area ends:
423                  */
424                 for (; i < concat->mtd.numeraseregions &&
425                      (instr->addr + instr->len) >= erase_regions[i].offset;
426                      ++i) ;
427                 --i;
428                 /*
429                  * check if the ending offset is aligned to this region's erase size
430                  */
431                 if (i < 0 || ((instr->addr + instr->len) &
432                                         (erase_regions[i].erasesize - 1)))
433                         return -EINVAL;
434         }
435
436         /* make a local copy of instr to avoid modifying the caller's struct */
437         erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
438
439         if (!erase)
440                 return -ENOMEM;
441
442         *erase = *instr;
443         length = instr->len;
444
445         /*
446          * find the subdevice where the to-be-erased area begins, adjust
447          * starting offset to be relative to the subdevice start
448          */
449         for (i = 0; i < concat->num_subdev; i++) {
450                 subdev = concat->subdev[i];
451                 if (subdev->size <= erase->addr) {
452                         erase->addr -= subdev->size;
453                         offset += subdev->size;
454                 } else {
455                         break;
456                 }
457         }
458
459         /* must never happen since size limit has been verified above */
460         BUG_ON(i >= concat->num_subdev);
461
462         /* now do the erase: */
463         err = 0;
464         for (; length > 0; i++) {
465                 /* loop for all subdevices affected by this request */
466                 subdev = concat->subdev[i];     /* get current subdevice */
467
468                 /* limit length to subdevice's size: */
469                 if (erase->addr + length > subdev->size)
470                         erase->len = subdev->size - erase->addr;
471                 else
472                         erase->len = length;
473
474                 length -= erase->len;
475                 if ((err = concat_dev_erase(subdev, erase))) {
476                         /* sanity check: should never happen since
477                          * block alignment has been checked above */
478                         BUG_ON(err == -EINVAL);
479                         if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
480                                 instr->fail_addr = erase->fail_addr + offset;
481                         break;
482                 }
483                 /*
484                  * erase->addr specifies the offset of the area to be
485                  * erased *within the current subdevice*. It can be
486                  * non-zero only the first time through this loop, i.e.
487                  * for the first subdevice where blocks need to be erased.
488                  * All the following erases must begin at the start of the
489                  * current subdevice, i.e. at offset zero.
490                  */
491                 erase->addr = 0;
492                 offset += subdev->size;
493         }
494         instr->state = erase->state;
495         kfree(erase);
496         if (err)
497                 return err;
498
499         if (instr->callback)
500                 instr->callback(instr);
501         return 0;
502 }
503
504 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
505 {
506         struct mtd_concat *concat = CONCAT(mtd);
507         int i, err = -EINVAL;
508
509         for (i = 0; i < concat->num_subdev; i++) {
510                 struct mtd_info *subdev = concat->subdev[i];
511                 uint64_t size;
512
513                 if (ofs >= subdev->size) {
514                         size = 0;
515                         ofs -= subdev->size;
516                         continue;
517                 }
518                 if (ofs + len > subdev->size)
519                         size = subdev->size - ofs;
520                 else
521                         size = len;
522
523                 err = mtd_lock(subdev, ofs, size);
524                 if (err)
525                         break;
526
527                 len -= size;
528                 if (len == 0)
529                         break;
530
531                 err = -EINVAL;
532                 ofs = 0;
533         }
534
535         return err;
536 }
537
538 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
539 {
540         struct mtd_concat *concat = CONCAT(mtd);
541         int i, err = 0;
542
543         for (i = 0; i < concat->num_subdev; i++) {
544                 struct mtd_info *subdev = concat->subdev[i];
545                 uint64_t size;
546
547                 if (ofs >= subdev->size) {
548                         size = 0;
549                         ofs -= subdev->size;
550                         continue;
551                 }
552                 if (ofs + len > subdev->size)
553                         size = subdev->size - ofs;
554                 else
555                         size = len;
556
557                 err = mtd_unlock(subdev, ofs, size);
558                 if (err)
559                         break;
560
561                 len -= size;
562                 if (len == 0)
563                         break;
564
565                 err = -EINVAL;
566                 ofs = 0;
567         }
568
569         return err;
570 }
571
572 static void concat_sync(struct mtd_info *mtd)
573 {
574         struct mtd_concat *concat = CONCAT(mtd);
575         int i;
576
577         for (i = 0; i < concat->num_subdev; i++) {
578                 struct mtd_info *subdev = concat->subdev[i];
579                 mtd_sync(subdev);
580         }
581 }
582
583 #ifndef __UBOOT__
584 static int concat_suspend(struct mtd_info *mtd)
585 {
586         struct mtd_concat *concat = CONCAT(mtd);
587         int i, rc = 0;
588
589         for (i = 0; i < concat->num_subdev; i++) {
590                 struct mtd_info *subdev = concat->subdev[i];
591                 if ((rc = mtd_suspend(subdev)) < 0)
592                         return rc;
593         }
594         return rc;
595 }
596
597 static void concat_resume(struct mtd_info *mtd)
598 {
599         struct mtd_concat *concat = CONCAT(mtd);
600         int i;
601
602         for (i = 0; i < concat->num_subdev; i++) {
603                 struct mtd_info *subdev = concat->subdev[i];
604                 mtd_resume(subdev);
605         }
606 }
607 #endif
608
609 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
610 {
611         struct mtd_concat *concat = CONCAT(mtd);
612         int i, res = 0;
613
614         if (!mtd_can_have_bb(concat->subdev[0]))
615                 return res;
616
617         for (i = 0; i < concat->num_subdev; i++) {
618                 struct mtd_info *subdev = concat->subdev[i];
619
620                 if (ofs >= subdev->size) {
621                         ofs -= subdev->size;
622                         continue;
623                 }
624
625                 res = mtd_block_isbad(subdev, ofs);
626                 break;
627         }
628
629         return res;
630 }
631
632 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
633 {
634         struct mtd_concat *concat = CONCAT(mtd);
635         int i, err = -EINVAL;
636
637         for (i = 0; i < concat->num_subdev; i++) {
638                 struct mtd_info *subdev = concat->subdev[i];
639
640                 if (ofs >= subdev->size) {
641                         ofs -= subdev->size;
642                         continue;
643                 }
644
645                 err = mtd_block_markbad(subdev, ofs);
646                 if (!err)
647                         mtd->ecc_stats.badblocks++;
648                 break;
649         }
650
651         return err;
652 }
653
654 /*
655  * try to support NOMMU mmaps on concatenated devices
656  * - we don't support subdev spanning as we can't guarantee it'll work
657  */
658 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
659                                               unsigned long len,
660                                               unsigned long offset,
661                                               unsigned long flags)
662 {
663         struct mtd_concat *concat = CONCAT(mtd);
664         int i;
665
666         for (i = 0; i < concat->num_subdev; i++) {
667                 struct mtd_info *subdev = concat->subdev[i];
668
669                 if (offset >= subdev->size) {
670                         offset -= subdev->size;
671                         continue;
672                 }
673
674                 return mtd_get_unmapped_area(subdev, len, offset, flags);
675         }
676
677         return (unsigned long) -ENOSYS;
678 }
679
680 /*
681  * This function constructs a virtual MTD device by concatenating
682  * num_devs MTD devices. A pointer to the new device object is
683  * stored to *new_dev upon success. This function does _not_
684  * register any devices: this is the caller's responsibility.
685  */
686 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],   /* subdevices to concatenate */
687                                    int num_devs,        /* number of subdevices      */
688 #ifndef __UBOOT__
689                                    const char *name)
690 #else
691                                    char *name)
692 #endif
693 {                               /* name for the new device   */
694         int i;
695         size_t size;
696         struct mtd_concat *concat;
697         uint32_t max_erasesize, curr_erasesize;
698         int num_erase_region;
699         int max_writebufsize = 0;
700
701         debug("Concatenating MTD devices:\n");
702         for (i = 0; i < num_devs; i++)
703                 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
704         debug("into device \"%s\"\n", name);
705
706         /* allocate the device structure */
707         size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
708         concat = kzalloc(size, GFP_KERNEL);
709         if (!concat) {
710                 printk
711                     ("memory allocation error while creating concatenated device \"%s\"\n",
712                      name);
713                 return NULL;
714         }
715         concat->subdev = (struct mtd_info **) (concat + 1);
716
717         /*
718          * Set up the new "super" device's MTD object structure, check for
719          * incompatibilities between the subdevices.
720          */
721         concat->mtd.type = subdev[0]->type;
722         concat->mtd.flags = subdev[0]->flags;
723         concat->mtd.size = subdev[0]->size;
724         concat->mtd.erasesize = subdev[0]->erasesize;
725         concat->mtd.writesize = subdev[0]->writesize;
726
727         for (i = 0; i < num_devs; i++)
728                 if (max_writebufsize < subdev[i]->writebufsize)
729                         max_writebufsize = subdev[i]->writebufsize;
730         concat->mtd.writebufsize = max_writebufsize;
731
732         concat->mtd.subpage_sft = subdev[0]->subpage_sft;
733         concat->mtd.oobsize = subdev[0]->oobsize;
734         concat->mtd.oobavail = subdev[0]->oobavail;
735 #ifndef __UBOOT__
736         if (subdev[0]->_writev)
737                 concat->mtd._writev = concat_writev;
738 #endif
739         if (subdev[0]->_read_oob)
740                 concat->mtd._read_oob = concat_read_oob;
741         if (subdev[0]->_write_oob)
742                 concat->mtd._write_oob = concat_write_oob;
743         if (subdev[0]->_block_isbad)
744                 concat->mtd._block_isbad = concat_block_isbad;
745         if (subdev[0]->_block_markbad)
746                 concat->mtd._block_markbad = concat_block_markbad;
747
748         concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
749
750 #ifndef __UBOOT__
751         concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
752 #endif
753
754         concat->subdev[0] = subdev[0];
755
756         for (i = 1; i < num_devs; i++) {
757                 if (concat->mtd.type != subdev[i]->type) {
758                         kfree(concat);
759                         printk("Incompatible device type on \"%s\"\n",
760                                subdev[i]->name);
761                         return NULL;
762                 }
763                 if (concat->mtd.flags != subdev[i]->flags) {
764                         /*
765                          * Expect all flags except MTD_WRITEABLE to be
766                          * equal on all subdevices.
767                          */
768                         if ((concat->mtd.flags ^ subdev[i]->
769                              flags) & ~MTD_WRITEABLE) {
770                                 kfree(concat);
771                                 printk("Incompatible device flags on \"%s\"\n",
772                                        subdev[i]->name);
773                                 return NULL;
774                         } else
775                                 /* if writeable attribute differs,
776                                    make super device writeable */
777                                 concat->mtd.flags |=
778                                     subdev[i]->flags & MTD_WRITEABLE;
779                 }
780
781 #ifndef __UBOOT__
782                 /* only permit direct mapping if the BDIs are all the same
783                  * - copy-mapping is still permitted
784                  */
785                 if (concat->mtd.backing_dev_info !=
786                     subdev[i]->backing_dev_info)
787                         concat->mtd.backing_dev_info =
788                                 &default_backing_dev_info;
789 #endif
790
791                 concat->mtd.size += subdev[i]->size;
792                 concat->mtd.ecc_stats.badblocks +=
793                         subdev[i]->ecc_stats.badblocks;
794                 if (concat->mtd.writesize   !=  subdev[i]->writesize ||
795                     concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
796                     concat->mtd.oobsize    !=  subdev[i]->oobsize ||
797                     !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
798                     !concat->mtd._write_oob != !subdev[i]->_write_oob) {
799                         kfree(concat);
800                         printk("Incompatible OOB or ECC data on \"%s\"\n",
801                                subdev[i]->name);
802                         return NULL;
803                 }
804                 concat->subdev[i] = subdev[i];
805
806         }
807
808         concat->mtd.ecclayout = subdev[0]->ecclayout;
809
810         concat->num_subdev = num_devs;
811         concat->mtd.name = name;
812
813         concat->mtd._erase = concat_erase;
814         concat->mtd._read = concat_read;
815         concat->mtd._write = concat_write;
816         concat->mtd._sync = concat_sync;
817         concat->mtd._lock = concat_lock;
818         concat->mtd._unlock = concat_unlock;
819 #ifndef __UBOOT__
820         concat->mtd._suspend = concat_suspend;
821         concat->mtd._resume = concat_resume;
822 #endif
823         concat->mtd._get_unmapped_area = concat_get_unmapped_area;
824
825         /*
826          * Combine the erase block size info of the subdevices:
827          *
828          * first, walk the map of the new device and see how
829          * many changes in erase size we have
830          */
831         max_erasesize = curr_erasesize = subdev[0]->erasesize;
832         num_erase_region = 1;
833         for (i = 0; i < num_devs; i++) {
834                 if (subdev[i]->numeraseregions == 0) {
835                         /* current subdevice has uniform erase size */
836                         if (subdev[i]->erasesize != curr_erasesize) {
837                                 /* if it differs from the last subdevice's erase size, count it */
838                                 ++num_erase_region;
839                                 curr_erasesize = subdev[i]->erasesize;
840                                 if (curr_erasesize > max_erasesize)
841                                         max_erasesize = curr_erasesize;
842                         }
843                 } else {
844                         /* current subdevice has variable erase size */
845                         int j;
846                         for (j = 0; j < subdev[i]->numeraseregions; j++) {
847
848                                 /* walk the list of erase regions, count any changes */
849                                 if (subdev[i]->eraseregions[j].erasesize !=
850                                     curr_erasesize) {
851                                         ++num_erase_region;
852                                         curr_erasesize =
853                                             subdev[i]->eraseregions[j].
854                                             erasesize;
855                                         if (curr_erasesize > max_erasesize)
856                                                 max_erasesize = curr_erasesize;
857                                 }
858                         }
859                 }
860         }
861
862         if (num_erase_region == 1) {
863                 /*
864                  * All subdevices have the same uniform erase size.
865                  * This is easy:
866                  */
867                 concat->mtd.erasesize = curr_erasesize;
868                 concat->mtd.numeraseregions = 0;
869         } else {
870                 uint64_t tmp64;
871
872                 /*
873                  * erase block size varies across the subdevices: allocate
874                  * space to store the data describing the variable erase regions
875                  */
876                 struct mtd_erase_region_info *erase_region_p;
877                 uint64_t begin, position;
878
879                 concat->mtd.erasesize = max_erasesize;
880                 concat->mtd.numeraseregions = num_erase_region;
881                 concat->mtd.eraseregions = erase_region_p =
882                     kmalloc(num_erase_region *
883                             sizeof (struct mtd_erase_region_info), GFP_KERNEL);
884                 if (!erase_region_p) {
885                         kfree(concat);
886                         printk
887                             ("memory allocation error while creating erase region list"
888                              " for device \"%s\"\n", name);
889                         return NULL;
890                 }
891
892                 /*
893                  * walk the map of the new device once more and fill in
894                  * in erase region info:
895                  */
896                 curr_erasesize = subdev[0]->erasesize;
897                 begin = position = 0;
898                 for (i = 0; i < num_devs; i++) {
899                         if (subdev[i]->numeraseregions == 0) {
900                                 /* current subdevice has uniform erase size */
901                                 if (subdev[i]->erasesize != curr_erasesize) {
902                                         /*
903                                          *  fill in an mtd_erase_region_info structure for the area
904                                          *  we have walked so far:
905                                          */
906                                         erase_region_p->offset = begin;
907                                         erase_region_p->erasesize =
908                                             curr_erasesize;
909                                         tmp64 = position - begin;
910                                         do_div(tmp64, curr_erasesize);
911                                         erase_region_p->numblocks = tmp64;
912                                         begin = position;
913
914                                         curr_erasesize = subdev[i]->erasesize;
915                                         ++erase_region_p;
916                                 }
917                                 position += subdev[i]->size;
918                         } else {
919                                 /* current subdevice has variable erase size */
920                                 int j;
921                                 for (j = 0; j < subdev[i]->numeraseregions; j++) {
922                                         /* walk the list of erase regions, count any changes */
923                                         if (subdev[i]->eraseregions[j].
924                                             erasesize != curr_erasesize) {
925                                                 erase_region_p->offset = begin;
926                                                 erase_region_p->erasesize =
927                                                     curr_erasesize;
928                                                 tmp64 = position - begin;
929                                                 do_div(tmp64, curr_erasesize);
930                                                 erase_region_p->numblocks = tmp64;
931                                                 begin = position;
932
933                                                 curr_erasesize =
934                                                     subdev[i]->eraseregions[j].
935                                                     erasesize;
936                                                 ++erase_region_p;
937                                         }
938                                         position +=
939                                             subdev[i]->eraseregions[j].
940                                             numblocks * (uint64_t)curr_erasesize;
941                                 }
942                         }
943                 }
944                 /* Now write the final entry */
945                 erase_region_p->offset = begin;
946                 erase_region_p->erasesize = curr_erasesize;
947                 tmp64 = position - begin;
948                 do_div(tmp64, curr_erasesize);
949                 erase_region_p->numblocks = tmp64;
950         }
951
952         return &concat->mtd;
953 }
954
955 /*
956  * This function destroys an MTD object obtained from concat_mtd_devs()
957  */
958
959 void mtd_concat_destroy(struct mtd_info *mtd)
960 {
961         struct mtd_concat *concat = CONCAT(mtd);
962         if (concat->mtd.numeraseregions)
963                 kfree(concat->mtd.eraseregions);
964         kfree(concat);
965 }
966
967 EXPORT_SYMBOL(mtd_concat_create);
968 EXPORT_SYMBOL(mtd_concat_destroy);
969
970 MODULE_LICENSE("GPL");
971 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
972 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");