1 From 0a9d72b69da6d8dae1abd7990c6c4c749846ef3e Mon Sep 17 00:00:00 2001
2 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
3 Date: Wed, 21 Jun 2017 08:26:44 +0200
4 Subject: [PATCH] mtd: partitions: rename "master" to the "parent" where
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
10 This prepares mtd subsystem for the new feature: subpartitions. In some
11 cases flash device partition can be a container with extra subpartitions
14 So far there was a flat structure implemented. One master (flash device)
15 could be partitioned into few partitions. Every partition got its master
16 and it was enough to get things running.
18 To support subpartitions we need to store pointer to the parent for each
19 partition. This is required to implement more natural tree structure and
20 handle all recursion and offsets calculation.
22 To make code consistent this patch renamed "master" to the "parent" in
23 places where we can be dealing with subpartitions.
25 Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
26 Signed-off-by: Brian Norris <computersforpeace@gmail.com>
28 drivers/mtd/mtdpart.c | 204 ++++++++++++++++++++++++++------------------------
29 1 file changed, 105 insertions(+), 99 deletions(-)
31 --- a/drivers/mtd/mtdpart.c
32 +++ b/drivers/mtd/mtdpart.c
34 static LIST_HEAD(mtd_partitions);
35 static DEFINE_MUTEX(mtd_partitions_mutex);
37 -/* Our partition node structure */
39 + * struct mtd_part - our partition node structure
41 + * @mtd: struct holding partition details
42 + * @parent: parent mtd - flash device or another partition
43 + * @offset: partition offset relative to the *flash device*
47 - struct mtd_info *master;
48 + struct mtd_info *parent;
50 struct list_head list;
52 @@ -67,15 +73,15 @@ static int part_read(struct mtd_info *mt
53 struct mtd_ecc_stats stats;
56 - stats = part->master->ecc_stats;
57 - res = part->master->_read(part->master, from + part->offset, len,
58 + stats = part->parent->ecc_stats;
59 + res = part->parent->_read(part->parent, from + part->offset, len,
61 if (unlikely(mtd_is_eccerr(res)))
62 mtd->ecc_stats.failed +=
63 - part->master->ecc_stats.failed - stats.failed;
64 + part->parent->ecc_stats.failed - stats.failed;
66 mtd->ecc_stats.corrected +=
67 - part->master->ecc_stats.corrected - stats.corrected;
68 + part->parent->ecc_stats.corrected - stats.corrected;
72 @@ -84,7 +90,7 @@ static int part_point(struct mtd_info *m
74 struct mtd_part *part = mtd_to_part(mtd);
76 - return part->master->_point(part->master, from + part->offset, len,
77 + return part->parent->_point(part->parent, from + part->offset, len,
81 @@ -92,7 +98,7 @@ static int part_unpoint(struct mtd_info
83 struct mtd_part *part = mtd_to_part(mtd);
85 - return part->master->_unpoint(part->master, from + part->offset, len);
86 + return part->parent->_unpoint(part->parent, from + part->offset, len);
89 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
90 @@ -103,7 +109,7 @@ static unsigned long part_get_unmapped_a
91 struct mtd_part *part = mtd_to_part(mtd);
93 offset += part->offset;
94 - return part->master->_get_unmapped_area(part->master, len, offset,
95 + return part->parent->_get_unmapped_area(part->parent, len, offset,
99 @@ -132,7 +138,7 @@ static int part_read_oob(struct mtd_info
103 - res = part->master->_read_oob(part->master, from + part->offset, ops);
104 + res = part->parent->_read_oob(part->parent, from + part->offset, ops);
106 if (mtd_is_bitflip(res))
107 mtd->ecc_stats.corrected++;
108 @@ -146,7 +152,7 @@ static int part_read_user_prot_reg(struc
109 size_t len, size_t *retlen, u_char *buf)
111 struct mtd_part *part = mtd_to_part(mtd);
112 - return part->master->_read_user_prot_reg(part->master, from, len,
113 + return part->parent->_read_user_prot_reg(part->parent, from, len,
117 @@ -154,7 +160,7 @@ static int part_get_user_prot_info(struc
118 size_t *retlen, struct otp_info *buf)
120 struct mtd_part *part = mtd_to_part(mtd);
121 - return part->master->_get_user_prot_info(part->master, len, retlen,
122 + return part->parent->_get_user_prot_info(part->parent, len, retlen,
126 @@ -162,7 +168,7 @@ static int part_read_fact_prot_reg(struc
127 size_t len, size_t *retlen, u_char *buf)
129 struct mtd_part *part = mtd_to_part(mtd);
130 - return part->master->_read_fact_prot_reg(part->master, from, len,
131 + return part->parent->_read_fact_prot_reg(part->parent, from, len,
135 @@ -170,7 +176,7 @@ static int part_get_fact_prot_info(struc
136 size_t *retlen, struct otp_info *buf)
138 struct mtd_part *part = mtd_to_part(mtd);
139 - return part->master->_get_fact_prot_info(part->master, len, retlen,
140 + return part->parent->_get_fact_prot_info(part->parent, len, retlen,
144 @@ -178,7 +184,7 @@ static int part_write(struct mtd_info *m
145 size_t *retlen, const u_char *buf)
147 struct mtd_part *part = mtd_to_part(mtd);
148 - return part->master->_write(part->master, to + part->offset, len,
149 + return part->parent->_write(part->parent, to + part->offset, len,
153 @@ -186,7 +192,7 @@ static int part_panic_write(struct mtd_i
154 size_t *retlen, const u_char *buf)
156 struct mtd_part *part = mtd_to_part(mtd);
157 - return part->master->_panic_write(part->master, to + part->offset, len,
158 + return part->parent->_panic_write(part->parent, to + part->offset, len,
162 @@ -199,14 +205,14 @@ static int part_write_oob(struct mtd_inf
164 if (ops->datbuf && to + ops->len > mtd->size)
166 - return part->master->_write_oob(part->master, to + part->offset, ops);
167 + return part->parent->_write_oob(part->parent, to + part->offset, ops);
170 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
171 size_t len, size_t *retlen, u_char *buf)
173 struct mtd_part *part = mtd_to_part(mtd);
174 - return part->master->_write_user_prot_reg(part->master, from, len,
175 + return part->parent->_write_user_prot_reg(part->parent, from, len,
179 @@ -214,14 +220,14 @@ static int part_lock_user_prot_reg(struc
182 struct mtd_part *part = mtd_to_part(mtd);
183 - return part->master->_lock_user_prot_reg(part->master, from, len);
184 + return part->parent->_lock_user_prot_reg(part->parent, from, len);
187 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
188 unsigned long count, loff_t to, size_t *retlen)
190 struct mtd_part *part = mtd_to_part(mtd);
191 - return part->master->_writev(part->master, vecs, count,
192 + return part->parent->_writev(part->parent, vecs, count,
193 to + part->offset, retlen);
196 @@ -231,7 +237,7 @@ static int part_erase(struct mtd_info *m
199 instr->addr += part->offset;
200 - ret = part->master->_erase(part->master, instr);
201 + ret = part->parent->_erase(part->parent, instr);
203 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
204 instr->fail_addr -= part->offset;
205 @@ -257,51 +263,51 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
206 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
208 struct mtd_part *part = mtd_to_part(mtd);
209 - return part->master->_lock(part->master, ofs + part->offset, len);
210 + return part->parent->_lock(part->parent, ofs + part->offset, len);
213 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
215 struct mtd_part *part = mtd_to_part(mtd);
216 - return part->master->_unlock(part->master, ofs + part->offset, len);
217 + return part->parent->_unlock(part->parent, ofs + part->offset, len);
220 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
222 struct mtd_part *part = mtd_to_part(mtd);
223 - return part->master->_is_locked(part->master, ofs + part->offset, len);
224 + return part->parent->_is_locked(part->parent, ofs + part->offset, len);
227 static void part_sync(struct mtd_info *mtd)
229 struct mtd_part *part = mtd_to_part(mtd);
230 - part->master->_sync(part->master);
231 + part->parent->_sync(part->parent);
234 static int part_suspend(struct mtd_info *mtd)
236 struct mtd_part *part = mtd_to_part(mtd);
237 - return part->master->_suspend(part->master);
238 + return part->parent->_suspend(part->parent);
241 static void part_resume(struct mtd_info *mtd)
243 struct mtd_part *part = mtd_to_part(mtd);
244 - part->master->_resume(part->master);
245 + part->parent->_resume(part->parent);
248 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
250 struct mtd_part *part = mtd_to_part(mtd);
252 - return part->master->_block_isreserved(part->master, ofs);
253 + return part->parent->_block_isreserved(part->parent, ofs);
256 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
258 struct mtd_part *part = mtd_to_part(mtd);
260 - return part->master->_block_isbad(part->master, ofs);
261 + return part->parent->_block_isbad(part->parent, ofs);
264 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
265 @@ -310,7 +316,7 @@ static int part_block_markbad(struct mtd
269 - res = part->master->_block_markbad(part->master, ofs);
270 + res = part->parent->_block_markbad(part->parent, ofs);
272 mtd->ecc_stats.badblocks++;
274 @@ -319,13 +325,13 @@ static int part_block_markbad(struct mtd
275 static int part_get_device(struct mtd_info *mtd)
277 struct mtd_part *part = mtd_to_part(mtd);
278 - return part->master->_get_device(part->master);
279 + return part->parent->_get_device(part->parent);
282 static void part_put_device(struct mtd_info *mtd)
284 struct mtd_part *part = mtd_to_part(mtd);
285 - part->master->_put_device(part->master);
286 + part->parent->_put_device(part->parent);
289 static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
290 @@ -333,7 +339,7 @@ static int part_ooblayout_ecc(struct mtd
292 struct mtd_part *part = mtd_to_part(mtd);
294 - return mtd_ooblayout_ecc(part->master, section, oobregion);
295 + return mtd_ooblayout_ecc(part->parent, section, oobregion);
298 static int part_ooblayout_free(struct mtd_info *mtd, int section,
299 @@ -341,7 +347,7 @@ static int part_ooblayout_free(struct mt
301 struct mtd_part *part = mtd_to_part(mtd);
303 - return mtd_ooblayout_free(part->master, section, oobregion);
304 + return mtd_ooblayout_free(part->parent, section, oobregion);
307 static const struct mtd_ooblayout_ops part_ooblayout_ops = {
308 @@ -353,7 +359,7 @@ static int part_max_bad_blocks(struct mt
310 struct mtd_part *part = mtd_to_part(mtd);
312 - return part->master->_max_bad_blocks(part->master,
313 + return part->parent->_max_bad_blocks(part->parent,
314 ofs + part->offset, len);
317 @@ -363,12 +369,12 @@ static inline void free_partition(struct
321 -static struct mtd_part *allocate_partition(struct mtd_info *master,
322 +static struct mtd_part *allocate_partition(struct mtd_info *parent,
323 const struct mtd_partition *part, int partno,
326 - int wr_alignment = (master->flags & MTD_NO_ERASE) ? master->writesize:
328 + int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize:
330 struct mtd_part *slave;
333 @@ -379,25 +385,25 @@ static struct mtd_part *allocate_partiti
334 name = kstrdup(part->name, GFP_KERNEL);
335 if (!name || !slave) {
336 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
341 return ERR_PTR(-ENOMEM);
344 /* set up the MTD object for this partition */
345 - slave->mtd.type = master->type;
346 - slave->mtd.flags = master->flags & ~part->mask_flags;
347 + slave->mtd.type = parent->type;
348 + slave->mtd.flags = parent->flags & ~part->mask_flags;
349 slave->mtd.size = part->size;
350 - slave->mtd.writesize = master->writesize;
351 - slave->mtd.writebufsize = master->writebufsize;
352 - slave->mtd.oobsize = master->oobsize;
353 - slave->mtd.oobavail = master->oobavail;
354 - slave->mtd.subpage_sft = master->subpage_sft;
355 - slave->mtd.pairing = master->pairing;
356 + slave->mtd.writesize = parent->writesize;
357 + slave->mtd.writebufsize = parent->writebufsize;
358 + slave->mtd.oobsize = parent->oobsize;
359 + slave->mtd.oobavail = parent->oobavail;
360 + slave->mtd.subpage_sft = parent->subpage_sft;
361 + slave->mtd.pairing = parent->pairing;
363 slave->mtd.name = name;
364 - slave->mtd.owner = master->owner;
365 + slave->mtd.owner = parent->owner;
367 /* NOTE: Historically, we didn't arrange MTDs as a tree out of
368 * concern for showing the same data in multiple partitions.
369 @@ -408,70 +414,70 @@ static struct mtd_part *allocate_partiti
370 * distinguish between the master and the partition in sysfs.
372 slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
374 - master->dev.parent;
376 + parent->dev.parent;
377 slave->mtd.dev.of_node = part->of_node;
379 slave->mtd._read = part_read;
380 slave->mtd._write = part_write;
382 - if (master->_panic_write)
383 + if (parent->_panic_write)
384 slave->mtd._panic_write = part_panic_write;
386 - if (master->_point && master->_unpoint) {
387 + if (parent->_point && parent->_unpoint) {
388 slave->mtd._point = part_point;
389 slave->mtd._unpoint = part_unpoint;
392 - if (master->_get_unmapped_area)
393 + if (parent->_get_unmapped_area)
394 slave->mtd._get_unmapped_area = part_get_unmapped_area;
395 - if (master->_read_oob)
396 + if (parent->_read_oob)
397 slave->mtd._read_oob = part_read_oob;
398 - if (master->_write_oob)
399 + if (parent->_write_oob)
400 slave->mtd._write_oob = part_write_oob;
401 - if (master->_read_user_prot_reg)
402 + if (parent->_read_user_prot_reg)
403 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
404 - if (master->_read_fact_prot_reg)
405 + if (parent->_read_fact_prot_reg)
406 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
407 - if (master->_write_user_prot_reg)
408 + if (parent->_write_user_prot_reg)
409 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
410 - if (master->_lock_user_prot_reg)
411 + if (parent->_lock_user_prot_reg)
412 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
413 - if (master->_get_user_prot_info)
414 + if (parent->_get_user_prot_info)
415 slave->mtd._get_user_prot_info = part_get_user_prot_info;
416 - if (master->_get_fact_prot_info)
417 + if (parent->_get_fact_prot_info)
418 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
421 slave->mtd._sync = part_sync;
422 - if (!partno && !master->dev.class && master->_suspend &&
424 + if (!partno && !parent->dev.class && parent->_suspend &&
426 slave->mtd._suspend = part_suspend;
427 slave->mtd._resume = part_resume;
429 - if (master->_writev)
430 + if (parent->_writev)
431 slave->mtd._writev = part_writev;
434 slave->mtd._lock = part_lock;
435 - if (master->_unlock)
436 + if (parent->_unlock)
437 slave->mtd._unlock = part_unlock;
438 - if (master->_is_locked)
439 + if (parent->_is_locked)
440 slave->mtd._is_locked = part_is_locked;
441 - if (master->_block_isreserved)
442 + if (parent->_block_isreserved)
443 slave->mtd._block_isreserved = part_block_isreserved;
444 - if (master->_block_isbad)
445 + if (parent->_block_isbad)
446 slave->mtd._block_isbad = part_block_isbad;
447 - if (master->_block_markbad)
448 + if (parent->_block_markbad)
449 slave->mtd._block_markbad = part_block_markbad;
450 - if (master->_max_bad_blocks)
451 + if (parent->_max_bad_blocks)
452 slave->mtd._max_bad_blocks = part_max_bad_blocks;
454 - if (master->_get_device)
455 + if (parent->_get_device)
456 slave->mtd._get_device = part_get_device;
457 - if (master->_put_device)
458 + if (parent->_put_device)
459 slave->mtd._put_device = part_put_device;
461 slave->mtd._erase = part_erase;
462 - slave->master = master;
463 + slave->parent = parent;
464 slave->offset = part->offset;
466 if (slave->offset == MTDPART_OFS_APPEND)
467 @@ -489,25 +495,25 @@ static struct mtd_part *allocate_partiti
469 if (slave->offset == MTDPART_OFS_RETAIN) {
470 slave->offset = cur_offset;
471 - if (master->size - slave->offset >= slave->mtd.size) {
472 - slave->mtd.size = master->size - slave->offset
473 + if (parent->size - slave->offset >= slave->mtd.size) {
474 + slave->mtd.size = parent->size - slave->offset
477 printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
478 - part->name, master->size - slave->offset,
479 + part->name, parent->size - slave->offset,
481 /* register to preserve ordering */
485 if (slave->mtd.size == MTDPART_SIZ_FULL)
486 - slave->mtd.size = master->size - slave->offset;
487 + slave->mtd.size = parent->size - slave->offset;
489 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
490 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
492 /* let's do some sanity checks */
493 - if (slave->offset >= master->size) {
494 + if (slave->offset >= parent->size) {
495 /* let's register it anyway to preserve ordering */
498 @@ -515,16 +521,16 @@ static struct mtd_part *allocate_partiti
502 - if (slave->offset + slave->mtd.size > master->size) {
503 - slave->mtd.size = master->size - slave->offset;
504 + if (slave->offset + slave->mtd.size > parent->size) {
505 + slave->mtd.size = parent->size - slave->offset;
506 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
507 - part->name, master->name, (unsigned long long)slave->mtd.size);
508 + part->name, parent->name, (unsigned long long)slave->mtd.size);
510 - if (master->numeraseregions > 1) {
511 + if (parent->numeraseregions > 1) {
512 /* Deal with variable erase size stuff */
513 - int i, max = master->numeraseregions;
514 + int i, max = parent->numeraseregions;
515 u64 end = slave->offset + slave->mtd.size;
516 - struct mtd_erase_region_info *regions = master->eraseregions;
517 + struct mtd_erase_region_info *regions = parent->eraseregions;
519 /* Find the first erase regions which is part of this
521 @@ -543,7 +549,7 @@ static struct mtd_part *allocate_partiti
522 BUG_ON(slave->mtd.erasesize == 0);
524 /* Single erase size */
525 - slave->mtd.erasesize = master->erasesize;
526 + slave->mtd.erasesize = parent->erasesize;
530 @@ -566,17 +572,17 @@ static struct mtd_part *allocate_partiti
533 mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
534 - slave->mtd.ecc_step_size = master->ecc_step_size;
535 - slave->mtd.ecc_strength = master->ecc_strength;
536 - slave->mtd.bitflip_threshold = master->bitflip_threshold;
537 + slave->mtd.ecc_step_size = parent->ecc_step_size;
538 + slave->mtd.ecc_strength = parent->ecc_strength;
539 + slave->mtd.bitflip_threshold = parent->bitflip_threshold;
541 - if (master->_block_isbad) {
542 + if (parent->_block_isbad) {
545 while (offs < slave->mtd.size) {
546 - if (mtd_block_isreserved(master, offs + slave->offset))
547 + if (mtd_block_isreserved(parent, offs + slave->offset))
548 slave->mtd.ecc_stats.bbtblocks++;
549 - else if (mtd_block_isbad(master, offs + slave->offset))
550 + else if (mtd_block_isbad(parent, offs + slave->offset))
551 slave->mtd.ecc_stats.badblocks++;
552 offs += slave->mtd.erasesize;
554 @@ -610,7 +616,7 @@ static int mtd_add_partition_attrs(struc
558 -int mtd_add_partition(struct mtd_info *master, const char *name,
559 +int mtd_add_partition(struct mtd_info *parent, const char *name,
560 long long offset, long long length)
562 struct mtd_partition part;
563 @@ -623,7 +629,7 @@ int mtd_add_partition(struct mtd_info *m
566 if (length == MTDPART_SIZ_FULL)
567 - length = master->size - offset;
568 + length = parent->size - offset;
572 @@ -633,7 +639,7 @@ int mtd_add_partition(struct mtd_info *m
574 part.offset = offset;
576 - new = allocate_partition(master, &part, -1, offset);
577 + new = allocate_partition(parent, &part, -1, offset);
581 @@ -683,7 +689,7 @@ int del_mtd_partitions(struct mtd_info *
583 mutex_lock(&mtd_partitions_mutex);
584 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
585 - if (slave->master == master) {
586 + if (slave->parent == master) {
587 ret = __mtd_del_partition(slave);
590 @@ -700,7 +706,7 @@ int mtd_del_partition(struct mtd_info *m
592 mutex_lock(&mtd_partitions_mutex);
593 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
594 - if ((slave->master == master) &&
595 + if ((slave->parent == master) &&
596 (slave->mtd.index == partno)) {
597 ret = __mtd_del_partition(slave);
599 @@ -933,6 +939,6 @@ uint64_t mtd_get_device_size(const struc
600 if (!mtd_is_partition(mtd))
603 - return mtd_to_part(mtd)->master->size;
604 + return mtd_to_part(mtd)->parent->size;
606 EXPORT_SYMBOL_GPL(mtd_get_device_size);