1 // SPDX-License-Identifier: GPL-2.0+
3 * Simple MTD partitioning layer
5 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
6 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
7 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/kmod.h>
22 #include <linux/errno.h>
23 #include <linux/compat.h>
24 #include <ubi_uboot.h>
26 #include <linux/mtd/mtd.h>
27 #include <linux/mtd/partitions.h>
28 #include <linux/err.h>
32 /* Our partition linked list */
33 static LIST_HEAD(mtd_partitions);
35 static DEFINE_MUTEX(mtd_partitions_mutex);
37 DEFINE_MUTEX(mtd_partitions_mutex);
40 /* Our partition node structure */
43 struct mtd_info *master;
45 struct list_head list;
49 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
50 * the pointer to that structure with this macro.
52 #define PART(x) ((struct mtd_part *)(x))
59 * kstrdup - allocate space for and copy an existing string
60 * @s: the string to duplicate
61 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
63 char *kstrdup(const char *s, gfp_t gfp)
72 buf = kmalloc(len, gfp);
80 * MTD methods which simply translate the effective address and pass through
81 * to the _real_ device.
84 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
85 size_t *retlen, u_char *buf)
87 struct mtd_part *part = PART(mtd);
88 struct mtd_ecc_stats stats;
91 stats = part->master->ecc_stats;
92 res = part->master->_read(part->master, from + part->offset, len,
94 if (unlikely(mtd_is_eccerr(res)))
95 mtd->ecc_stats.failed +=
96 part->master->ecc_stats.failed - stats.failed;
98 mtd->ecc_stats.corrected +=
99 part->master->ecc_stats.corrected - stats.corrected;
104 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
105 size_t *retlen, void **virt, resource_size_t *phys)
107 struct mtd_part *part = PART(mtd);
109 return part->master->_point(part->master, from + part->offset, len,
113 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
115 struct mtd_part *part = PART(mtd);
117 return part->master->_unpoint(part->master, from + part->offset, len);
121 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
123 unsigned long offset,
126 struct mtd_part *part = PART(mtd);
128 offset += part->offset;
129 return part->master->_get_unmapped_area(part->master, len, offset,
133 static int part_read_oob(struct mtd_info *mtd, loff_t from,
134 struct mtd_oob_ops *ops)
136 struct mtd_part *part = PART(mtd);
139 if (from >= mtd->size)
141 if (ops->datbuf && from + ops->len > mtd->size)
145 * If OOB is also requested, make sure that we do not read past the end
151 if (ops->mode == MTD_OPS_AUTO_OOB)
155 pages = mtd_div_by_ws(mtd->size, mtd);
156 pages -= mtd_div_by_ws(from, mtd);
157 if (ops->ooboffs + ops->ooblen > pages * len)
161 res = part->master->_read_oob(part->master, from + part->offset, ops);
163 if (mtd_is_bitflip(res))
164 mtd->ecc_stats.corrected++;
165 if (mtd_is_eccerr(res))
166 mtd->ecc_stats.failed++;
171 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
172 size_t len, size_t *retlen, u_char *buf)
174 struct mtd_part *part = PART(mtd);
175 return part->master->_read_user_prot_reg(part->master, from, len,
179 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
180 size_t *retlen, struct otp_info *buf)
182 struct mtd_part *part = PART(mtd);
183 return part->master->_get_user_prot_info(part->master, len, retlen,
187 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
188 size_t len, size_t *retlen, u_char *buf)
190 struct mtd_part *part = PART(mtd);
191 return part->master->_read_fact_prot_reg(part->master, from, len,
195 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
196 size_t *retlen, struct otp_info *buf)
198 struct mtd_part *part = PART(mtd);
199 return part->master->_get_fact_prot_info(part->master, len, retlen,
203 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
204 size_t *retlen, const u_char *buf)
206 struct mtd_part *part = PART(mtd);
207 return part->master->_write(part->master, to + part->offset, len,
211 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
212 size_t *retlen, const u_char *buf)
214 struct mtd_part *part = PART(mtd);
215 return part->master->_panic_write(part->master, to + part->offset, len,
219 static int part_write_oob(struct mtd_info *mtd, loff_t to,
220 struct mtd_oob_ops *ops)
222 struct mtd_part *part = PART(mtd);
226 if (ops->datbuf && to + ops->len > mtd->size)
228 return part->master->_write_oob(part->master, to + part->offset, ops);
231 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
232 size_t len, size_t *retlen, u_char *buf)
234 struct mtd_part *part = PART(mtd);
235 return part->master->_write_user_prot_reg(part->master, from, len,
239 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
242 struct mtd_part *part = PART(mtd);
243 return part->master->_lock_user_prot_reg(part->master, from, len);
247 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
248 unsigned long count, loff_t to, size_t *retlen)
250 struct mtd_part *part = PART(mtd);
251 return part->master->_writev(part->master, vecs, count,
252 to + part->offset, retlen);
256 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
258 struct mtd_part *part = PART(mtd);
261 instr->addr += part->offset;
262 ret = part->master->_erase(part->master, instr);
264 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
265 instr->fail_addr -= part->offset;
266 instr->addr -= part->offset;
271 void mtd_erase_callback(struct erase_info *instr)
273 if (instr->mtd->_erase == part_erase) {
274 struct mtd_part *part = PART(instr->mtd);
276 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
277 instr->fail_addr -= part->offset;
278 instr->addr -= part->offset;
281 instr->callback(instr);
283 EXPORT_SYMBOL_GPL(mtd_erase_callback);
285 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
287 struct mtd_part *part = PART(mtd);
288 return part->master->_lock(part->master, ofs + part->offset, len);
291 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
293 struct mtd_part *part = PART(mtd);
294 return part->master->_unlock(part->master, ofs + part->offset, len);
297 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
299 struct mtd_part *part = PART(mtd);
300 return part->master->_is_locked(part->master, ofs + part->offset, len);
303 static void part_sync(struct mtd_info *mtd)
305 struct mtd_part *part = PART(mtd);
306 part->master->_sync(part->master);
310 static int part_suspend(struct mtd_info *mtd)
312 struct mtd_part *part = PART(mtd);
313 return part->master->_suspend(part->master);
316 static void part_resume(struct mtd_info *mtd)
318 struct mtd_part *part = PART(mtd);
319 part->master->_resume(part->master);
323 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
325 struct mtd_part *part = PART(mtd);
327 return part->master->_block_isreserved(part->master, ofs);
330 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
332 struct mtd_part *part = PART(mtd);
334 return part->master->_block_isbad(part->master, ofs);
337 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
339 struct mtd_part *part = PART(mtd);
343 res = part->master->_block_markbad(part->master, ofs);
345 mtd->ecc_stats.badblocks++;
349 static inline void free_partition(struct mtd_part *p)
356 * This function unregisters and destroy all slave MTD objects which are
357 * attached to the given master MTD object.
360 int del_mtd_partitions(struct mtd_info *master)
362 struct mtd_part *slave, *next;
365 debug("Deleting MTD partitions on \"%s\":\n", master->name);
367 mutex_lock(&mtd_partitions_mutex);
368 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
369 if (slave->master == master) {
370 ret = del_mtd_device(&slave->mtd);
375 list_del(&slave->list);
376 free_partition(slave);
378 mutex_unlock(&mtd_partitions_mutex);
383 static struct mtd_part *allocate_partition(struct mtd_info *master,
384 const struct mtd_partition *part, int partno,
387 struct mtd_part *slave;
390 /* allocate the partition structure */
391 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
392 name = kstrdup(part->name, GFP_KERNEL);
393 if (!name || !slave) {
394 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
398 return ERR_PTR(-ENOMEM);
401 /* set up the MTD object for this partition */
402 slave->mtd.type = master->type;
403 slave->mtd.flags = master->flags & ~part->mask_flags;
404 slave->mtd.size = part->size;
405 slave->mtd.writesize = master->writesize;
406 slave->mtd.writebufsize = master->writebufsize;
407 slave->mtd.oobsize = master->oobsize;
408 slave->mtd.oobavail = master->oobavail;
409 slave->mtd.subpage_sft = master->subpage_sft;
411 slave->mtd.name = name;
412 slave->mtd.owner = master->owner;
414 slave->mtd.backing_dev_info = master->backing_dev_info;
416 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
417 * to have the same data be in two different partitions.
419 slave->mtd.dev.parent = master->dev.parent;
423 slave->mtd._read = part_read;
425 slave->mtd._write = part_write;
427 if (master->_panic_write)
428 slave->mtd._panic_write = part_panic_write;
431 if (master->_point && master->_unpoint) {
432 slave->mtd._point = part_point;
433 slave->mtd._unpoint = part_unpoint;
437 if (master->_get_unmapped_area)
438 slave->mtd._get_unmapped_area = part_get_unmapped_area;
439 if (master->_read_oob)
440 slave->mtd._read_oob = part_read_oob;
441 if (master->_write_oob)
442 slave->mtd._write_oob = part_write_oob;
443 if (master->_read_user_prot_reg)
444 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
445 if (master->_read_fact_prot_reg)
446 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
447 if (master->_write_user_prot_reg)
448 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
449 if (master->_lock_user_prot_reg)
450 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
451 if (master->_get_user_prot_info)
452 slave->mtd._get_user_prot_info = part_get_user_prot_info;
453 if (master->_get_fact_prot_info)
454 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
456 slave->mtd._sync = part_sync;
458 if (!partno && !master->dev.class && master->_suspend &&
460 slave->mtd._suspend = part_suspend;
461 slave->mtd._resume = part_resume;
464 slave->mtd._writev = part_writev;
467 slave->mtd._lock = part_lock;
469 slave->mtd._unlock = part_unlock;
470 if (master->_is_locked)
471 slave->mtd._is_locked = part_is_locked;
472 if (master->_block_isreserved)
473 slave->mtd._block_isreserved = part_block_isreserved;
474 if (master->_block_isbad)
475 slave->mtd._block_isbad = part_block_isbad;
476 if (master->_block_markbad)
477 slave->mtd._block_markbad = part_block_markbad;
478 slave->mtd._erase = part_erase;
479 slave->master = master;
480 slave->offset = part->offset;
482 if (slave->offset == MTDPART_OFS_APPEND)
483 slave->offset = cur_offset;
484 if (slave->offset == MTDPART_OFS_NXTBLK) {
485 slave->offset = cur_offset;
486 if (mtd_mod_by_eb(cur_offset, master) != 0) {
487 /* Round up to next erasesize */
488 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
489 debug("Moving partition %d: "
490 "0x%012llx -> 0x%012llx\n", partno,
491 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
494 if (slave->offset == MTDPART_OFS_RETAIN) {
495 slave->offset = cur_offset;
496 if (master->size - slave->offset >= slave->mtd.size) {
497 slave->mtd.size = master->size - slave->offset
500 debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
501 part->name, master->size - slave->offset,
503 /* register to preserve ordering */
507 if (slave->mtd.size == MTDPART_SIZ_FULL)
508 slave->mtd.size = master->size - slave->offset;
510 debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
511 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
513 /* let's do some sanity checks */
514 if (slave->offset >= master->size) {
515 /* let's register it anyway to preserve ordering */
518 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
522 if (slave->offset + slave->mtd.size > master->size) {
523 slave->mtd.size = master->size - slave->offset;
524 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
525 part->name, master->name, (unsigned long long)slave->mtd.size);
527 if (master->numeraseregions > 1) {
528 /* Deal with variable erase size stuff */
529 int i, max = master->numeraseregions;
530 u64 end = slave->offset + slave->mtd.size;
531 struct mtd_erase_region_info *regions = master->eraseregions;
533 /* Find the first erase regions which is part of this
535 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
537 /* The loop searched for the region _behind_ the first one */
541 /* Pick biggest erasesize */
542 for (; i < max && regions[i].offset < end; i++) {
543 if (slave->mtd.erasesize < regions[i].erasesize) {
544 slave->mtd.erasesize = regions[i].erasesize;
547 BUG_ON(slave->mtd.erasesize == 0);
549 /* Single erase size */
550 slave->mtd.erasesize = master->erasesize;
553 if ((slave->mtd.flags & MTD_WRITEABLE) &&
554 mtd_mod_by_eb(slave->offset, &slave->mtd)) {
555 /* Doesn't start on a boundary of major erase size */
556 /* FIXME: Let it be writable if it is on a boundary of
557 * _minor_ erase size though */
558 slave->mtd.flags &= ~MTD_WRITEABLE;
559 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
562 if ((slave->mtd.flags & MTD_WRITEABLE) &&
563 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
564 slave->mtd.flags &= ~MTD_WRITEABLE;
565 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
569 slave->mtd.ecclayout = master->ecclayout;
570 slave->mtd.ecc_step_size = master->ecc_step_size;
571 slave->mtd.ecc_strength = master->ecc_strength;
572 slave->mtd.bitflip_threshold = master->bitflip_threshold;
574 if (master->_block_isbad) {
577 while (offs < slave->mtd.size) {
578 if (mtd_block_isbad(master, offs + slave->offset))
579 slave->mtd.ecc_stats.badblocks++;
580 offs += slave->mtd.erasesize;
589 int mtd_add_partition(struct mtd_info *master, const char *name,
590 long long offset, long long length)
592 struct mtd_partition part;
593 struct mtd_part *p, *new;
597 /* the direct offset is expected */
598 if (offset == MTDPART_OFS_APPEND ||
599 offset == MTDPART_OFS_NXTBLK)
602 if (length == MTDPART_SIZ_FULL)
603 length = master->size - offset;
610 part.offset = offset;
612 part.ecclayout = NULL;
614 new = allocate_partition(master, &part, -1, offset);
619 end = offset + length;
621 mutex_lock(&mtd_partitions_mutex);
622 list_for_each_entry(p, &mtd_partitions, list)
623 if (p->master == master) {
624 if ((start >= p->offset) &&
625 (start < (p->offset + p->mtd.size)))
628 if ((end >= p->offset) &&
629 (end < (p->offset + p->mtd.size)))
633 list_add(&new->list, &mtd_partitions);
634 mutex_unlock(&mtd_partitions_mutex);
636 add_mtd_device(&new->mtd);
640 mutex_unlock(&mtd_partitions_mutex);
644 EXPORT_SYMBOL_GPL(mtd_add_partition);
646 int mtd_del_partition(struct mtd_info *master, int partno)
648 struct mtd_part *slave, *next;
651 mutex_lock(&mtd_partitions_mutex);
652 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
653 if ((slave->master == master) &&
654 (slave->mtd.index == partno)) {
655 ret = del_mtd_device(&slave->mtd);
659 list_del(&slave->list);
660 free_partition(slave);
663 mutex_unlock(&mtd_partitions_mutex);
667 EXPORT_SYMBOL_GPL(mtd_del_partition);
671 * This function, given a master MTD object and a partition table, creates
672 * and registers slave MTD objects which are bound to the master according to
673 * the partition definitions.
675 * We don't register the master, or expect the caller to have done so,
676 * for reasons of data integrity.
679 int add_mtd_partitions(struct mtd_info *master,
680 const struct mtd_partition *parts,
683 struct mtd_part *slave;
684 uint64_t cur_offset = 0;
689 * Need to init the list here, since LIST_INIT() does not
690 * work on platforms where relocation has problems (like MIPS
693 if (mtd_partitions.next == NULL)
694 INIT_LIST_HEAD(&mtd_partitions);
697 debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
699 for (i = 0; i < nbparts; i++) {
700 slave = allocate_partition(master, parts + i, i, cur_offset);
702 return PTR_ERR(slave);
704 mutex_lock(&mtd_partitions_mutex);
705 list_add(&slave->list, &mtd_partitions);
706 mutex_unlock(&mtd_partitions_mutex);
708 add_mtd_device(&slave->mtd);
710 cur_offset = slave->offset + slave->mtd.size;
717 static DEFINE_SPINLOCK(part_parser_lock);
718 static LIST_HEAD(part_parsers);
720 static struct mtd_part_parser *get_partition_parser(const char *name)
722 struct mtd_part_parser *p, *ret = NULL;
724 spin_lock(&part_parser_lock);
726 list_for_each_entry(p, &part_parsers, list)
727 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
732 spin_unlock(&part_parser_lock);
737 #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
739 void register_mtd_parser(struct mtd_part_parser *p)
741 spin_lock(&part_parser_lock);
742 list_add(&p->list, &part_parsers);
743 spin_unlock(&part_parser_lock);
745 EXPORT_SYMBOL_GPL(register_mtd_parser);
747 void deregister_mtd_parser(struct mtd_part_parser *p)
749 spin_lock(&part_parser_lock);
751 spin_unlock(&part_parser_lock);
753 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
756 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
757 * are changing this array!
759 static const char * const default_mtd_part_types[] = {
766 * parse_mtd_partitions - parse MTD partitions
767 * @master: the master partition (describes whole MTD device)
768 * @types: names of partition parsers to try or %NULL
769 * @pparts: array of partitions found is returned here
770 * @data: MTD partition parser-specific data
772 * This function tries to find partition on MTD device @master. It uses MTD
773 * partition parsers, specified in @types. However, if @types is %NULL, then
774 * the default list of parsers is used. The default list contains only the
775 * "cmdlinepart" and "ofpart" parsers ATM.
776 * Note: If there are more then one parser in @types, the kernel only takes the
777 * partitions parsed out by the first parser.
779 * This function may return:
780 * o a negative error code in case of failure
781 * o zero if no partitions were found
782 * o a positive number of found partitions, in which case on exit @pparts will
783 * point to an array containing this number of &struct mtd_info objects.
785 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
786 struct mtd_partition **pparts,
787 struct mtd_part_parser_data *data)
789 struct mtd_part_parser *parser;
793 types = default_mtd_part_types;
795 for ( ; ret <= 0 && *types; types++) {
796 parser = get_partition_parser(*types);
797 if (!parser && !request_module("%s", *types))
798 parser = get_partition_parser(*types);
801 ret = (*parser->parse_fn)(master, pparts, data);
802 put_partition_parser(parser);
804 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
805 ret, parser->name, master->name);
813 int mtd_is_partition(const struct mtd_info *mtd)
815 struct mtd_part *part;
818 mutex_lock(&mtd_partitions_mutex);
819 list_for_each_entry(part, &mtd_partitions, list)
820 if (&part->mtd == mtd) {
824 mutex_unlock(&mtd_partitions_mutex);
828 EXPORT_SYMBOL_GPL(mtd_is_partition);
830 /* Returns the size of the entire flash chip */
831 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
833 if (!mtd_is_partition(mtd))
836 return PART(mtd)->master->size;
838 EXPORT_SYMBOL_GPL(mtd_get_device_size);