Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / drivers / mtd / mtdconcat.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * MTD device concatenation layer
4  *
5  * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
6  * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
7  *
8  * NAND support by Christian Gan <cgan@iders.ca>
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/types.h>
16 #include <linux/backing-dev.h>
17
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/concat.h>
20
21 #include <asm/div64.h>
22
23 /*
24  * Our storage structure:
25  * Subdev points to an array of pointers to struct mtd_info objects
26  * which is allocated along with this structure
27  *
28  */
29 struct mtd_concat {
30         struct mtd_info mtd;
31         int num_subdev;
32         struct mtd_info **subdev;
33 };
34
35 /*
36  * how to calculate the size required for the above structure,
37  * including the pointer array subdev points to:
38  */
39 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)    \
40         ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
41
42 /*
43  * Given a pointer to the MTD object in the mtd_concat structure,
44  * we can retrieve the pointer to that structure with this macro.
45  */
46 #define CONCAT(x)  ((struct mtd_concat *)(x))
47
48 /*
49  * MTD methods which look up the relevant subdevice, translate the
50  * effective address and pass through to the subdevice.
51  */
52
53 static int
54 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
55             size_t * retlen, u_char * buf)
56 {
57         struct mtd_concat *concat = CONCAT(mtd);
58         int ret = 0, err;
59         int i;
60
61         for (i = 0; i < concat->num_subdev; i++) {
62                 struct mtd_info *subdev = concat->subdev[i];
63                 size_t size, retsize;
64
65                 if (from >= subdev->size) {
66                         /* Not destined for this subdev */
67                         size = 0;
68                         from -= subdev->size;
69                         continue;
70                 }
71                 if (from + len > subdev->size)
72                         /* First part goes into this subdev */
73                         size = subdev->size - from;
74                 else
75                         /* Entire transaction goes into this subdev */
76                         size = len;
77
78                 err = mtd_read(subdev, from, size, &retsize, buf);
79
80                 /* Save information about bitflips! */
81                 if (unlikely(err)) {
82                         if (mtd_is_eccerr(err)) {
83                                 mtd->ecc_stats.failed++;
84                                 ret = err;
85                         } else if (mtd_is_bitflip(err)) {
86                                 mtd->ecc_stats.corrected++;
87                                 /* Do not overwrite -EBADMSG !! */
88                                 if (!ret)
89                                         ret = err;
90                         } else
91                                 return err;
92                 }
93
94                 *retlen += retsize;
95                 len -= size;
96                 if (len == 0)
97                         return ret;
98
99                 buf += size;
100                 from = 0;
101         }
102         return -EINVAL;
103 }
104
105 static int
106 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
107              size_t * retlen, const u_char * buf)
108 {
109         struct mtd_concat *concat = CONCAT(mtd);
110         int err = -EINVAL;
111         int i;
112
113         for (i = 0; i < concat->num_subdev; i++) {
114                 struct mtd_info *subdev = concat->subdev[i];
115                 size_t size, retsize;
116
117                 if (to >= subdev->size) {
118                         size = 0;
119                         to -= subdev->size;
120                         continue;
121                 }
122                 if (to + len > subdev->size)
123                         size = subdev->size - to;
124                 else
125                         size = len;
126
127                 err = mtd_write(subdev, to, size, &retsize, buf);
128                 if (err)
129                         break;
130
131                 *retlen += retsize;
132                 len -= size;
133                 if (len == 0)
134                         break;
135
136                 err = -EINVAL;
137                 buf += size;
138                 to = 0;
139         }
140         return err;
141 }
142
143 static int
144 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
145                 unsigned long count, loff_t to, size_t * retlen)
146 {
147         struct mtd_concat *concat = CONCAT(mtd);
148         struct kvec *vecs_copy;
149         unsigned long entry_low, entry_high;
150         size_t total_len = 0;
151         int i;
152         int err = -EINVAL;
153
154         /* Calculate total length of data */
155         for (i = 0; i < count; i++)
156                 total_len += vecs[i].iov_len;
157
158         /* Check alignment */
159         if (mtd->writesize > 1) {
160                 uint64_t __to = to;
161                 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
162                         return -EINVAL;
163         }
164
165         /* make a copy of vecs */
166         vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
167         if (!vecs_copy)
168                 return -ENOMEM;
169
170         entry_low = 0;
171         for (i = 0; i < concat->num_subdev; i++) {
172                 struct mtd_info *subdev = concat->subdev[i];
173                 size_t size, wsize, retsize, old_iov_len;
174
175                 if (to >= subdev->size) {
176                         to -= subdev->size;
177                         continue;
178                 }
179
180                 size = min_t(uint64_t, total_len, subdev->size - to);
181                 wsize = size; /* store for future use */
182
183                 entry_high = entry_low;
184                 while (entry_high < count) {
185                         if (size <= vecs_copy[entry_high].iov_len)
186                                 break;
187                         size -= vecs_copy[entry_high++].iov_len;
188                 }
189
190                 old_iov_len = vecs_copy[entry_high].iov_len;
191                 vecs_copy[entry_high].iov_len = size;
192
193                 err = mtd_writev(subdev, &vecs_copy[entry_low],
194                                  entry_high - entry_low + 1, to, &retsize);
195
196                 vecs_copy[entry_high].iov_len = old_iov_len - size;
197                 vecs_copy[entry_high].iov_base += size;
198
199                 entry_low = entry_high;
200
201                 if (err)
202                         break;
203
204                 *retlen += retsize;
205                 total_len -= wsize;
206
207                 if (total_len == 0)
208                         break;
209
210                 err = -EINVAL;
211                 to = 0;
212         }
213
214         kfree(vecs_copy);
215         return err;
216 }
217
218 static int
219 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
220 {
221         struct mtd_concat *concat = CONCAT(mtd);
222         struct mtd_oob_ops devops = *ops;
223         int i, err, ret = 0;
224
225         ops->retlen = ops->oobretlen = 0;
226
227         for (i = 0; i < concat->num_subdev; i++) {
228                 struct mtd_info *subdev = concat->subdev[i];
229
230                 if (from >= subdev->size) {
231                         from -= subdev->size;
232                         continue;
233                 }
234
235                 /* partial read ? */
236                 if (from + devops.len > subdev->size)
237                         devops.len = subdev->size - from;
238
239                 err = mtd_read_oob(subdev, from, &devops);
240                 ops->retlen += devops.retlen;
241                 ops->oobretlen += devops.oobretlen;
242
243                 /* Save information about bitflips! */
244                 if (unlikely(err)) {
245                         if (mtd_is_eccerr(err)) {
246                                 mtd->ecc_stats.failed++;
247                                 ret = err;
248                         } else if (mtd_is_bitflip(err)) {
249                                 mtd->ecc_stats.corrected++;
250                                 /* Do not overwrite -EBADMSG !! */
251                                 if (!ret)
252                                         ret = err;
253                         } else
254                                 return err;
255                 }
256
257                 if (devops.datbuf) {
258                         devops.len = ops->len - ops->retlen;
259                         if (!devops.len)
260                                 return ret;
261                         devops.datbuf += devops.retlen;
262                 }
263                 if (devops.oobbuf) {
264                         devops.ooblen = ops->ooblen - ops->oobretlen;
265                         if (!devops.ooblen)
266                                 return ret;
267                         devops.oobbuf += ops->oobretlen;
268                 }
269
270                 from = 0;
271         }
272         return -EINVAL;
273 }
274
275 static int
276 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
277 {
278         struct mtd_concat *concat = CONCAT(mtd);
279         struct mtd_oob_ops devops = *ops;
280         int i, err;
281
282         if (!(mtd->flags & MTD_WRITEABLE))
283                 return -EROFS;
284
285         ops->retlen = ops->oobretlen = 0;
286
287         for (i = 0; i < concat->num_subdev; i++) {
288                 struct mtd_info *subdev = concat->subdev[i];
289
290                 if (to >= subdev->size) {
291                         to -= subdev->size;
292                         continue;
293                 }
294
295                 /* partial write ? */
296                 if (to + devops.len > subdev->size)
297                         devops.len = subdev->size - to;
298
299                 err = mtd_write_oob(subdev, to, &devops);
300                 ops->retlen += devops.retlen;
301                 ops->oobretlen += devops.oobretlen;
302                 if (err)
303                         return err;
304
305                 if (devops.datbuf) {
306                         devops.len = ops->len - ops->retlen;
307                         if (!devops.len)
308                                 return 0;
309                         devops.datbuf += devops.retlen;
310                 }
311                 if (devops.oobbuf) {
312                         devops.ooblen = ops->ooblen - ops->oobretlen;
313                         if (!devops.ooblen)
314                                 return 0;
315                         devops.oobbuf += devops.oobretlen;
316                 }
317                 to = 0;
318         }
319         return -EINVAL;
320 }
321
322 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
323 {
324         struct mtd_concat *concat = CONCAT(mtd);
325         struct mtd_info *subdev;
326         int i, err;
327         uint64_t length, offset = 0;
328         struct erase_info *erase;
329
330         /*
331          * Check for proper erase block alignment of the to-be-erased area.
332          * It is easier to do this based on the super device's erase
333          * region info rather than looking at each particular sub-device
334          * in turn.
335          */
336         if (!concat->mtd.numeraseregions) {
337                 /* the easy case: device has uniform erase block size */
338                 if (instr->addr & (concat->mtd.erasesize - 1))
339                         return -EINVAL;
340                 if (instr->len & (concat->mtd.erasesize - 1))
341                         return -EINVAL;
342         } else {
343                 /* device has variable erase size */
344                 struct mtd_erase_region_info *erase_regions =
345                     concat->mtd.eraseregions;
346
347                 /*
348                  * Find the erase region where the to-be-erased area begins:
349                  */
350                 for (i = 0; i < concat->mtd.numeraseregions &&
351                      instr->addr >= erase_regions[i].offset; i++) ;
352                 --i;
353
354                 /*
355                  * Now erase_regions[i] is the region in which the
356                  * to-be-erased area begins. Verify that the starting
357                  * offset is aligned to this region's erase size:
358                  */
359                 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
360                         return -EINVAL;
361
362                 /*
363                  * now find the erase region where the to-be-erased area ends:
364                  */
365                 for (; i < concat->mtd.numeraseregions &&
366                      (instr->addr + instr->len) >= erase_regions[i].offset;
367                      ++i) ;
368                 --i;
369                 /*
370                  * check if the ending offset is aligned to this region's erase size
371                  */
372                 if (i < 0 || ((instr->addr + instr->len) &
373                                         (erase_regions[i].erasesize - 1)))
374                         return -EINVAL;
375         }
376
377         /* make a local copy of instr to avoid modifying the caller's struct */
378         erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
379
380         if (!erase)
381                 return -ENOMEM;
382
383         *erase = *instr;
384         length = instr->len;
385
386         /*
387          * find the subdevice where the to-be-erased area begins, adjust
388          * starting offset to be relative to the subdevice start
389          */
390         for (i = 0; i < concat->num_subdev; i++) {
391                 subdev = concat->subdev[i];
392                 if (subdev->size <= erase->addr) {
393                         erase->addr -= subdev->size;
394                         offset += subdev->size;
395                 } else {
396                         break;
397                 }
398         }
399
400         /* must never happen since size limit has been verified above */
401         BUG_ON(i >= concat->num_subdev);
402
403         /* now do the erase: */
404         err = 0;
405         for (; length > 0; i++) {
406                 /* loop for all subdevices affected by this request */
407                 subdev = concat->subdev[i];     /* get current subdevice */
408
409                 /* limit length to subdevice's size: */
410                 if (erase->addr + length > subdev->size)
411                         erase->len = subdev->size - erase->addr;
412                 else
413                         erase->len = length;
414
415                 length -= erase->len;
416                 if ((err = mtd_erase(subdev, erase))) {
417                         /* sanity check: should never happen since
418                          * block alignment has been checked above */
419                         BUG_ON(err == -EINVAL);
420                         if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
421                                 instr->fail_addr = erase->fail_addr + offset;
422                         break;
423                 }
424                 /*
425                  * erase->addr specifies the offset of the area to be
426                  * erased *within the current subdevice*. It can be
427                  * non-zero only the first time through this loop, i.e.
428                  * for the first subdevice where blocks need to be erased.
429                  * All the following erases must begin at the start of the
430                  * current subdevice, i.e. at offset zero.
431                  */
432                 erase->addr = 0;
433                 offset += subdev->size;
434         }
435         kfree(erase);
436
437         return err;
438 }
439
440 static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len,
441                          bool is_lock)
442 {
443         struct mtd_concat *concat = CONCAT(mtd);
444         int i, err = -EINVAL;
445
446         for (i = 0; i < concat->num_subdev; i++) {
447                 struct mtd_info *subdev = concat->subdev[i];
448                 uint64_t size;
449
450                 if (ofs >= subdev->size) {
451                         size = 0;
452                         ofs -= subdev->size;
453                         continue;
454                 }
455                 if (ofs + len > subdev->size)
456                         size = subdev->size - ofs;
457                 else
458                         size = len;
459
460                 if (is_lock)
461                         err = mtd_lock(subdev, ofs, size);
462                 else
463                         err = mtd_unlock(subdev, ofs, size);
464                 if (err)
465                         break;
466
467                 len -= size;
468                 if (len == 0)
469                         break;
470
471                 err = -EINVAL;
472                 ofs = 0;
473         }
474
475         return err;
476 }
477
478 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
479 {
480         return concat_xxlock(mtd, ofs, len, true);
481 }
482
483 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
484 {
485         return concat_xxlock(mtd, ofs, len, false);
486 }
487
488 static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
489 {
490         struct mtd_concat *concat = CONCAT(mtd);
491         int i, err = -EINVAL;
492
493         for (i = 0; i < concat->num_subdev; i++) {
494                 struct mtd_info *subdev = concat->subdev[i];
495
496                 if (ofs >= subdev->size) {
497                         ofs -= subdev->size;
498                         continue;
499                 }
500
501                 if (ofs + len > subdev->size)
502                         break;
503
504                 return mtd_is_locked(subdev, ofs, len);
505         }
506
507         return err;
508 }
509
510 static void concat_sync(struct mtd_info *mtd)
511 {
512         struct mtd_concat *concat = CONCAT(mtd);
513         int i;
514
515         for (i = 0; i < concat->num_subdev; i++) {
516                 struct mtd_info *subdev = concat->subdev[i];
517                 mtd_sync(subdev);
518         }
519 }
520
521 static int concat_suspend(struct mtd_info *mtd)
522 {
523         struct mtd_concat *concat = CONCAT(mtd);
524         int i, rc = 0;
525
526         for (i = 0; i < concat->num_subdev; i++) {
527                 struct mtd_info *subdev = concat->subdev[i];
528                 if ((rc = mtd_suspend(subdev)) < 0)
529                         return rc;
530         }
531         return rc;
532 }
533
534 static void concat_resume(struct mtd_info *mtd)
535 {
536         struct mtd_concat *concat = CONCAT(mtd);
537         int i;
538
539         for (i = 0; i < concat->num_subdev; i++) {
540                 struct mtd_info *subdev = concat->subdev[i];
541                 mtd_resume(subdev);
542         }
543 }
544
545 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
546 {
547         struct mtd_concat *concat = CONCAT(mtd);
548         int i, res = 0;
549
550         if (!mtd_can_have_bb(concat->subdev[0]))
551                 return res;
552
553         for (i = 0; i < concat->num_subdev; i++) {
554                 struct mtd_info *subdev = concat->subdev[i];
555
556                 if (ofs >= subdev->size) {
557                         ofs -= subdev->size;
558                         continue;
559                 }
560
561                 res = mtd_block_isbad(subdev, ofs);
562                 break;
563         }
564
565         return res;
566 }
567
568 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
569 {
570         struct mtd_concat *concat = CONCAT(mtd);
571         int i, err = -EINVAL;
572
573         for (i = 0; i < concat->num_subdev; i++) {
574                 struct mtd_info *subdev = concat->subdev[i];
575
576                 if (ofs >= subdev->size) {
577                         ofs -= subdev->size;
578                         continue;
579                 }
580
581                 err = mtd_block_markbad(subdev, ofs);
582                 if (!err)
583                         mtd->ecc_stats.badblocks++;
584                 break;
585         }
586
587         return err;
588 }
589
590 /*
591  * This function constructs a virtual MTD device by concatenating
592  * num_devs MTD devices. A pointer to the new device object is
593  * stored to *new_dev upon success. This function does _not_
594  * register any devices: this is the caller's responsibility.
595  */
596 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],   /* subdevices to concatenate */
597                                    int num_devs,        /* number of subdevices      */
598                                    const char *name)
599 {                               /* name for the new device   */
600         int i;
601         size_t size;
602         struct mtd_concat *concat;
603         uint32_t max_erasesize, curr_erasesize;
604         int num_erase_region;
605         int max_writebufsize = 0;
606
607         printk(KERN_NOTICE "Concatenating MTD devices:\n");
608         for (i = 0; i < num_devs; i++)
609                 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
610         printk(KERN_NOTICE "into device \"%s\"\n", name);
611
612         /* allocate the device structure */
613         size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
614         concat = kzalloc(size, GFP_KERNEL);
615         if (!concat) {
616                 printk
617                     ("memory allocation error while creating concatenated device \"%s\"\n",
618                      name);
619                 return NULL;
620         }
621         concat->subdev = (struct mtd_info **) (concat + 1);
622
623         /*
624          * Set up the new "super" device's MTD object structure, check for
625          * incompatibilities between the subdevices.
626          */
627         concat->mtd.type = subdev[0]->type;
628         concat->mtd.flags = subdev[0]->flags;
629         concat->mtd.size = subdev[0]->size;
630         concat->mtd.erasesize = subdev[0]->erasesize;
631         concat->mtd.writesize = subdev[0]->writesize;
632
633         for (i = 0; i < num_devs; i++)
634                 if (max_writebufsize < subdev[i]->writebufsize)
635                         max_writebufsize = subdev[i]->writebufsize;
636         concat->mtd.writebufsize = max_writebufsize;
637
638         concat->mtd.subpage_sft = subdev[0]->subpage_sft;
639         concat->mtd.oobsize = subdev[0]->oobsize;
640         concat->mtd.oobavail = subdev[0]->oobavail;
641         if (subdev[0]->_writev)
642                 concat->mtd._writev = concat_writev;
643         if (subdev[0]->_read_oob)
644                 concat->mtd._read_oob = concat_read_oob;
645         if (subdev[0]->_write_oob)
646                 concat->mtd._write_oob = concat_write_oob;
647         if (subdev[0]->_block_isbad)
648                 concat->mtd._block_isbad = concat_block_isbad;
649         if (subdev[0]->_block_markbad)
650                 concat->mtd._block_markbad = concat_block_markbad;
651
652         concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
653
654         concat->subdev[0] = subdev[0];
655
656         for (i = 1; i < num_devs; i++) {
657                 if (concat->mtd.type != subdev[i]->type) {
658                         kfree(concat);
659                         printk("Incompatible device type on \"%s\"\n",
660                                subdev[i]->name);
661                         return NULL;
662                 }
663                 if (concat->mtd.flags != subdev[i]->flags) {
664                         /*
665                          * Expect all flags except MTD_WRITEABLE to be
666                          * equal on all subdevices.
667                          */
668                         if ((concat->mtd.flags ^ subdev[i]->
669                              flags) & ~MTD_WRITEABLE) {
670                                 kfree(concat);
671                                 printk("Incompatible device flags on \"%s\"\n",
672                                        subdev[i]->name);
673                                 return NULL;
674                         } else
675                                 /* if writeable attribute differs,
676                                    make super device writeable */
677                                 concat->mtd.flags |=
678                                     subdev[i]->flags & MTD_WRITEABLE;
679                 }
680
681                 concat->mtd.size += subdev[i]->size;
682                 concat->mtd.ecc_stats.badblocks +=
683                         subdev[i]->ecc_stats.badblocks;
684                 if (concat->mtd.writesize   !=  subdev[i]->writesize ||
685                     concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
686                     concat->mtd.oobsize    !=  subdev[i]->oobsize ||
687                     !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
688                     !concat->mtd._write_oob != !subdev[i]->_write_oob) {
689                         kfree(concat);
690                         printk("Incompatible OOB or ECC data on \"%s\"\n",
691                                subdev[i]->name);
692                         return NULL;
693                 }
694                 concat->subdev[i] = subdev[i];
695
696         }
697
698         mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
699
700         concat->num_subdev = num_devs;
701         concat->mtd.name = name;
702
703         concat->mtd._erase = concat_erase;
704         concat->mtd._read = concat_read;
705         concat->mtd._write = concat_write;
706         concat->mtd._sync = concat_sync;
707         concat->mtd._lock = concat_lock;
708         concat->mtd._unlock = concat_unlock;
709         concat->mtd._is_locked = concat_is_locked;
710         concat->mtd._suspend = concat_suspend;
711         concat->mtd._resume = concat_resume;
712
713         /*
714          * Combine the erase block size info of the subdevices:
715          *
716          * first, walk the map of the new device and see how
717          * many changes in erase size we have
718          */
719         max_erasesize = curr_erasesize = subdev[0]->erasesize;
720         num_erase_region = 1;
721         for (i = 0; i < num_devs; i++) {
722                 if (subdev[i]->numeraseregions == 0) {
723                         /* current subdevice has uniform erase size */
724                         if (subdev[i]->erasesize != curr_erasesize) {
725                                 /* if it differs from the last subdevice's erase size, count it */
726                                 ++num_erase_region;
727                                 curr_erasesize = subdev[i]->erasesize;
728                                 if (curr_erasesize > max_erasesize)
729                                         max_erasesize = curr_erasesize;
730                         }
731                 } else {
732                         /* current subdevice has variable erase size */
733                         int j;
734                         for (j = 0; j < subdev[i]->numeraseregions; j++) {
735
736                                 /* walk the list of erase regions, count any changes */
737                                 if (subdev[i]->eraseregions[j].erasesize !=
738                                     curr_erasesize) {
739                                         ++num_erase_region;
740                                         curr_erasesize =
741                                             subdev[i]->eraseregions[j].
742                                             erasesize;
743                                         if (curr_erasesize > max_erasesize)
744                                                 max_erasesize = curr_erasesize;
745                                 }
746                         }
747                 }
748         }
749
750         if (num_erase_region == 1) {
751                 /*
752                  * All subdevices have the same uniform erase size.
753                  * This is easy:
754                  */
755                 concat->mtd.erasesize = curr_erasesize;
756                 concat->mtd.numeraseregions = 0;
757         } else {
758                 uint64_t tmp64;
759
760                 /*
761                  * erase block size varies across the subdevices: allocate
762                  * space to store the data describing the variable erase regions
763                  */
764                 struct mtd_erase_region_info *erase_region_p;
765                 uint64_t begin, position;
766
767                 concat->mtd.erasesize = max_erasesize;
768                 concat->mtd.numeraseregions = num_erase_region;
769                 concat->mtd.eraseregions = erase_region_p =
770                     kmalloc_array(num_erase_region,
771                                   sizeof(struct mtd_erase_region_info),
772                                   GFP_KERNEL);
773                 if (!erase_region_p) {
774                         kfree(concat);
775                         printk
776                             ("memory allocation error while creating erase region list"
777                              " for device \"%s\"\n", name);
778                         return NULL;
779                 }
780
781                 /*
782                  * walk the map of the new device once more and fill in
783                  * in erase region info:
784                  */
785                 curr_erasesize = subdev[0]->erasesize;
786                 begin = position = 0;
787                 for (i = 0; i < num_devs; i++) {
788                         if (subdev[i]->numeraseregions == 0) {
789                                 /* current subdevice has uniform erase size */
790                                 if (subdev[i]->erasesize != curr_erasesize) {
791                                         /*
792                                          *  fill in an mtd_erase_region_info structure for the area
793                                          *  we have walked so far:
794                                          */
795                                         erase_region_p->offset = begin;
796                                         erase_region_p->erasesize =
797                                             curr_erasesize;
798                                         tmp64 = position - begin;
799                                         do_div(tmp64, curr_erasesize);
800                                         erase_region_p->numblocks = tmp64;
801                                         begin = position;
802
803                                         curr_erasesize = subdev[i]->erasesize;
804                                         ++erase_region_p;
805                                 }
806                                 position += subdev[i]->size;
807                         } else {
808                                 /* current subdevice has variable erase size */
809                                 int j;
810                                 for (j = 0; j < subdev[i]->numeraseregions; j++) {
811                                         /* walk the list of erase regions, count any changes */
812                                         if (subdev[i]->eraseregions[j].
813                                             erasesize != curr_erasesize) {
814                                                 erase_region_p->offset = begin;
815                                                 erase_region_p->erasesize =
816                                                     curr_erasesize;
817                                                 tmp64 = position - begin;
818                                                 do_div(tmp64, curr_erasesize);
819                                                 erase_region_p->numblocks = tmp64;
820                                                 begin = position;
821
822                                                 curr_erasesize =
823                                                     subdev[i]->eraseregions[j].
824                                                     erasesize;
825                                                 ++erase_region_p;
826                                         }
827                                         position +=
828                                             subdev[i]->eraseregions[j].
829                                             numblocks * (uint64_t)curr_erasesize;
830                                 }
831                         }
832                 }
833                 /* Now write the final entry */
834                 erase_region_p->offset = begin;
835                 erase_region_p->erasesize = curr_erasesize;
836                 tmp64 = position - begin;
837                 do_div(tmp64, curr_erasesize);
838                 erase_region_p->numblocks = tmp64;
839         }
840
841         return &concat->mtd;
842 }
843
844 /*
845  * This function destroys an MTD object obtained from concat_mtd_devs()
846  */
847
848 void mtd_concat_destroy(struct mtd_info *mtd)
849 {
850         struct mtd_concat *concat = CONCAT(mtd);
851         if (concat->mtd.numeraseregions)
852                 kfree(concat->mtd.eraseregions);
853         kfree(concat);
854 }
855
856 EXPORT_SYMBOL(mtd_concat_create);
857 EXPORT_SYMBOL(mtd_concat_destroy);
858
859 MODULE_LICENSE("GPL");
860 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
861 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");