kernel: bump 4.9 to 4.9.206
[oweals/openwrt.git] / target / linux / generic / backport-4.9 / 065-v4.13-0005-mtd-partitions-rename-master-to-the-parent-where-app.patch
1 From 0a9d72b69da6d8dae1abd7990c6c4c749846ef3e Mon Sep 17 00:00:00 2001
2 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
3 Date: Wed, 21 Jun 2017 08:26:44 +0200
4 Subject: [PATCH] mtd: partitions: rename "master" to the "parent" where
5  appropriate
6 MIME-Version: 1.0
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
9
10 This prepares mtd subsystem for the new feature: subpartitions. In some
11 cases flash device partition can be a container with extra subpartitions
12 (volumes).
13
14 So far there was a flat structure implemented. One master (flash device)
15 could be partitioned into few partitions. Every partition got its master
16 and it was enough to get things running.
17
18 To support subpartitions we need to store pointer to the parent for each
19 partition. This is required to implement more natural tree structure and
20 handle all recursion and offsets calculation.
21
22 To make code consistent this patch renamed "master" to the "parent" in
23 places where we can be dealing with subpartitions.
24
25 Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
26 Signed-off-by: Brian Norris <computersforpeace@gmail.com>
27 ---
28  drivers/mtd/mtdpart.c | 204 ++++++++++++++++++++++++++------------------------
29  1 file changed, 105 insertions(+), 99 deletions(-)
30
31 --- a/drivers/mtd/mtdpart.c
32 +++ b/drivers/mtd/mtdpart.c
33 @@ -37,10 +37,16 @@
34  static LIST_HEAD(mtd_partitions);
35  static DEFINE_MUTEX(mtd_partitions_mutex);
36  
37 -/* Our partition node structure */
38 +/**
39 + * struct mtd_part - our partition node structure
40 + *
41 + * @mtd: struct holding partition details
42 + * @parent: parent mtd - flash device or another partition
43 + * @offset: partition offset relative to the *flash device*
44 + */
45  struct mtd_part {
46         struct mtd_info mtd;
47 -       struct mtd_info *master;
48 +       struct mtd_info *parent;
49         uint64_t offset;
50         struct list_head list;
51  };
52 @@ -67,15 +73,15 @@ static int part_read(struct mtd_info *mt
53         struct mtd_ecc_stats stats;
54         int res;
55  
56 -       stats = part->master->ecc_stats;
57 -       res = part->master->_read(part->master, from + part->offset, len,
58 +       stats = part->parent->ecc_stats;
59 +       res = part->parent->_read(part->parent, from + part->offset, len,
60                                   retlen, buf);
61         if (unlikely(mtd_is_eccerr(res)))
62                 mtd->ecc_stats.failed +=
63 -                       part->master->ecc_stats.failed - stats.failed;
64 +                       part->parent->ecc_stats.failed - stats.failed;
65         else
66                 mtd->ecc_stats.corrected +=
67 -                       part->master->ecc_stats.corrected - stats.corrected;
68 +                       part->parent->ecc_stats.corrected - stats.corrected;
69         return res;
70  }
71  
72 @@ -84,7 +90,7 @@ static int part_point(struct mtd_info *m
73  {
74         struct mtd_part *part = mtd_to_part(mtd);
75  
76 -       return part->master->_point(part->master, from + part->offset, len,
77 +       return part->parent->_point(part->parent, from + part->offset, len,
78                                     retlen, virt, phys);
79  }
80  
81 @@ -92,7 +98,7 @@ static int part_unpoint(struct mtd_info
82  {
83         struct mtd_part *part = mtd_to_part(mtd);
84  
85 -       return part->master->_unpoint(part->master, from + part->offset, len);
86 +       return part->parent->_unpoint(part->parent, from + part->offset, len);
87  }
88  
89  static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
90 @@ -103,7 +109,7 @@ static unsigned long part_get_unmapped_a
91         struct mtd_part *part = mtd_to_part(mtd);
92  
93         offset += part->offset;
94 -       return part->master->_get_unmapped_area(part->master, len, offset,
95 +       return part->parent->_get_unmapped_area(part->parent, len, offset,
96                                                 flags);
97  }
98  
99 @@ -132,7 +138,7 @@ static int part_read_oob(struct mtd_info
100                         return -EINVAL;
101         }
102  
103 -       res = part->master->_read_oob(part->master, from + part->offset, ops);
104 +       res = part->parent->_read_oob(part->parent, from + part->offset, ops);
105         if (unlikely(res)) {
106                 if (mtd_is_bitflip(res))
107                         mtd->ecc_stats.corrected++;
108 @@ -146,7 +152,7 @@ static int part_read_user_prot_reg(struc
109                 size_t len, size_t *retlen, u_char *buf)
110  {
111         struct mtd_part *part = mtd_to_part(mtd);
112 -       return part->master->_read_user_prot_reg(part->master, from, len,
113 +       return part->parent->_read_user_prot_reg(part->parent, from, len,
114                                                  retlen, buf);
115  }
116  
117 @@ -154,7 +160,7 @@ static int part_get_user_prot_info(struc
118                                    size_t *retlen, struct otp_info *buf)
119  {
120         struct mtd_part *part = mtd_to_part(mtd);
121 -       return part->master->_get_user_prot_info(part->master, len, retlen,
122 +       return part->parent->_get_user_prot_info(part->parent, len, retlen,
123                                                  buf);
124  }
125  
126 @@ -162,7 +168,7 @@ static int part_read_fact_prot_reg(struc
127                 size_t len, size_t *retlen, u_char *buf)
128  {
129         struct mtd_part *part = mtd_to_part(mtd);
130 -       return part->master->_read_fact_prot_reg(part->master, from, len,
131 +       return part->parent->_read_fact_prot_reg(part->parent, from, len,
132                                                  retlen, buf);
133  }
134  
135 @@ -170,7 +176,7 @@ static int part_get_fact_prot_info(struc
136                                    size_t *retlen, struct otp_info *buf)
137  {
138         struct mtd_part *part = mtd_to_part(mtd);
139 -       return part->master->_get_fact_prot_info(part->master, len, retlen,
140 +       return part->parent->_get_fact_prot_info(part->parent, len, retlen,
141                                                  buf);
142  }
143  
144 @@ -178,7 +184,7 @@ static int part_write(struct mtd_info *m
145                 size_t *retlen, const u_char *buf)
146  {
147         struct mtd_part *part = mtd_to_part(mtd);
148 -       return part->master->_write(part->master, to + part->offset, len,
149 +       return part->parent->_write(part->parent, to + part->offset, len,
150                                     retlen, buf);
151  }
152  
153 @@ -186,7 +192,7 @@ static int part_panic_write(struct mtd_i
154                 size_t *retlen, const u_char *buf)
155  {
156         struct mtd_part *part = mtd_to_part(mtd);
157 -       return part->master->_panic_write(part->master, to + part->offset, len,
158 +       return part->parent->_panic_write(part->parent, to + part->offset, len,
159                                           retlen, buf);
160  }
161  
162 @@ -199,14 +205,14 @@ static int part_write_oob(struct mtd_inf
163                 return -EINVAL;
164         if (ops->datbuf && to + ops->len > mtd->size)
165                 return -EINVAL;
166 -       return part->master->_write_oob(part->master, to + part->offset, ops);
167 +       return part->parent->_write_oob(part->parent, to + part->offset, ops);
168  }
169  
170  static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
171                 size_t len, size_t *retlen, u_char *buf)
172  {
173         struct mtd_part *part = mtd_to_part(mtd);
174 -       return part->master->_write_user_prot_reg(part->master, from, len,
175 +       return part->parent->_write_user_prot_reg(part->parent, from, len,
176                                                   retlen, buf);
177  }
178  
179 @@ -214,14 +220,14 @@ static int part_lock_user_prot_reg(struc
180                 size_t len)
181  {
182         struct mtd_part *part = mtd_to_part(mtd);
183 -       return part->master->_lock_user_prot_reg(part->master, from, len);
184 +       return part->parent->_lock_user_prot_reg(part->parent, from, len);
185  }
186  
187  static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
188                 unsigned long count, loff_t to, size_t *retlen)
189  {
190         struct mtd_part *part = mtd_to_part(mtd);
191 -       return part->master->_writev(part->master, vecs, count,
192 +       return part->parent->_writev(part->parent, vecs, count,
193                                      to + part->offset, retlen);
194  }
195  
196 @@ -231,7 +237,7 @@ static int part_erase(struct mtd_info *m
197         int ret;
198  
199         instr->addr += part->offset;
200 -       ret = part->master->_erase(part->master, instr);
201 +       ret = part->parent->_erase(part->parent, instr);
202         if (ret) {
203                 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
204                         instr->fail_addr -= part->offset;
205 @@ -257,51 +263,51 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
206  static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
207  {
208         struct mtd_part *part = mtd_to_part(mtd);
209 -       return part->master->_lock(part->master, ofs + part->offset, len);
210 +       return part->parent->_lock(part->parent, ofs + part->offset, len);
211  }
212  
213  static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
214  {
215         struct mtd_part *part = mtd_to_part(mtd);
216 -       return part->master->_unlock(part->master, ofs + part->offset, len);
217 +       return part->parent->_unlock(part->parent, ofs + part->offset, len);
218  }
219  
220  static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
221  {
222         struct mtd_part *part = mtd_to_part(mtd);
223 -       return part->master->_is_locked(part->master, ofs + part->offset, len);
224 +       return part->parent->_is_locked(part->parent, ofs + part->offset, len);
225  }
226  
227  static void part_sync(struct mtd_info *mtd)
228  {
229         struct mtd_part *part = mtd_to_part(mtd);
230 -       part->master->_sync(part->master);
231 +       part->parent->_sync(part->parent);
232  }
233  
234  static int part_suspend(struct mtd_info *mtd)
235  {
236         struct mtd_part *part = mtd_to_part(mtd);
237 -       return part->master->_suspend(part->master);
238 +       return part->parent->_suspend(part->parent);
239  }
240  
241  static void part_resume(struct mtd_info *mtd)
242  {
243         struct mtd_part *part = mtd_to_part(mtd);
244 -       part->master->_resume(part->master);
245 +       part->parent->_resume(part->parent);
246  }
247  
248  static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
249  {
250         struct mtd_part *part = mtd_to_part(mtd);
251         ofs += part->offset;
252 -       return part->master->_block_isreserved(part->master, ofs);
253 +       return part->parent->_block_isreserved(part->parent, ofs);
254  }
255  
256  static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
257  {
258         struct mtd_part *part = mtd_to_part(mtd);
259         ofs += part->offset;
260 -       return part->master->_block_isbad(part->master, ofs);
261 +       return part->parent->_block_isbad(part->parent, ofs);
262  }
263  
264  static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
265 @@ -310,7 +316,7 @@ static int part_block_markbad(struct mtd
266         int res;
267  
268         ofs += part->offset;
269 -       res = part->master->_block_markbad(part->master, ofs);
270 +       res = part->parent->_block_markbad(part->parent, ofs);
271         if (!res)
272                 mtd->ecc_stats.badblocks++;
273         return res;
274 @@ -319,13 +325,13 @@ static int part_block_markbad(struct mtd
275  static int part_get_device(struct mtd_info *mtd)
276  {
277         struct mtd_part *part = mtd_to_part(mtd);
278 -       return part->master->_get_device(part->master);
279 +       return part->parent->_get_device(part->parent);
280  }
281  
282  static void part_put_device(struct mtd_info *mtd)
283  {
284         struct mtd_part *part = mtd_to_part(mtd);
285 -       part->master->_put_device(part->master);
286 +       part->parent->_put_device(part->parent);
287  }
288  
289  static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
290 @@ -333,7 +339,7 @@ static int part_ooblayout_ecc(struct mtd
291  {
292         struct mtd_part *part = mtd_to_part(mtd);
293  
294 -       return mtd_ooblayout_ecc(part->master, section, oobregion);
295 +       return mtd_ooblayout_ecc(part->parent, section, oobregion);
296  }
297  
298  static int part_ooblayout_free(struct mtd_info *mtd, int section,
299 @@ -341,7 +347,7 @@ static int part_ooblayout_free(struct mt
300  {
301         struct mtd_part *part = mtd_to_part(mtd);
302  
303 -       return mtd_ooblayout_free(part->master, section, oobregion);
304 +       return mtd_ooblayout_free(part->parent, section, oobregion);
305  }
306  
307  static const struct mtd_ooblayout_ops part_ooblayout_ops = {
308 @@ -353,7 +359,7 @@ static int part_max_bad_blocks(struct mt
309  {
310         struct mtd_part *part = mtd_to_part(mtd);
311  
312 -       return part->master->_max_bad_blocks(part->master,
313 +       return part->parent->_max_bad_blocks(part->parent,
314                                              ofs + part->offset, len);
315  }
316  
317 @@ -363,12 +369,12 @@ static inline void free_partition(struct
318         kfree(p);
319  }
320  
321 -static struct mtd_part *allocate_partition(struct mtd_info *master,
322 +static struct mtd_part *allocate_partition(struct mtd_info *parent,
323                         const struct mtd_partition *part, int partno,
324                         uint64_t cur_offset)
325  {
326 -       int wr_alignment = (master->flags & MTD_NO_ERASE) ? master->writesize:
327 -                                                           master->erasesize;
328 +       int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize:
329 +                                                           parent->erasesize;
330         struct mtd_part *slave;
331         u32 remainder;
332         char *name;
333 @@ -379,25 +385,25 @@ static struct mtd_part *allocate_partiti
334         name = kstrdup(part->name, GFP_KERNEL);
335         if (!name || !slave) {
336                 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
337 -                      master->name);
338 +                      parent->name);
339                 kfree(name);
340                 kfree(slave);
341                 return ERR_PTR(-ENOMEM);
342         }
343  
344         /* set up the MTD object for this partition */
345 -       slave->mtd.type = master->type;
346 -       slave->mtd.flags = master->flags & ~part->mask_flags;
347 +       slave->mtd.type = parent->type;
348 +       slave->mtd.flags = parent->flags & ~part->mask_flags;
349         slave->mtd.size = part->size;
350 -       slave->mtd.writesize = master->writesize;
351 -       slave->mtd.writebufsize = master->writebufsize;
352 -       slave->mtd.oobsize = master->oobsize;
353 -       slave->mtd.oobavail = master->oobavail;
354 -       slave->mtd.subpage_sft = master->subpage_sft;
355 -       slave->mtd.pairing = master->pairing;
356 +       slave->mtd.writesize = parent->writesize;
357 +       slave->mtd.writebufsize = parent->writebufsize;
358 +       slave->mtd.oobsize = parent->oobsize;
359 +       slave->mtd.oobavail = parent->oobavail;
360 +       slave->mtd.subpage_sft = parent->subpage_sft;
361 +       slave->mtd.pairing = parent->pairing;
362  
363         slave->mtd.name = name;
364 -       slave->mtd.owner = master->owner;
365 +       slave->mtd.owner = parent->owner;
366  
367         /* NOTE: Historically, we didn't arrange MTDs as a tree out of
368          * concern for showing the same data in multiple partitions.
369 @@ -408,70 +414,70 @@ static struct mtd_part *allocate_partiti
370          * distinguish between the master and the partition in sysfs.
371          */
372         slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
373 -                               &master->dev :
374 -                               master->dev.parent;
375 +                               &parent->dev :
376 +                               parent->dev.parent;
377         slave->mtd.dev.of_node = part->of_node;
378  
379         slave->mtd._read = part_read;
380         slave->mtd._write = part_write;
381  
382 -       if (master->_panic_write)
383 +       if (parent->_panic_write)
384                 slave->mtd._panic_write = part_panic_write;
385  
386 -       if (master->_point && master->_unpoint) {
387 +       if (parent->_point && parent->_unpoint) {
388                 slave->mtd._point = part_point;
389                 slave->mtd._unpoint = part_unpoint;
390         }
391  
392 -       if (master->_get_unmapped_area)
393 +       if (parent->_get_unmapped_area)
394                 slave->mtd._get_unmapped_area = part_get_unmapped_area;
395 -       if (master->_read_oob)
396 +       if (parent->_read_oob)
397                 slave->mtd._read_oob = part_read_oob;
398 -       if (master->_write_oob)
399 +       if (parent->_write_oob)
400                 slave->mtd._write_oob = part_write_oob;
401 -       if (master->_read_user_prot_reg)
402 +       if (parent->_read_user_prot_reg)
403                 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
404 -       if (master->_read_fact_prot_reg)
405 +       if (parent->_read_fact_prot_reg)
406                 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
407 -       if (master->_write_user_prot_reg)
408 +       if (parent->_write_user_prot_reg)
409                 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
410 -       if (master->_lock_user_prot_reg)
411 +       if (parent->_lock_user_prot_reg)
412                 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
413 -       if (master->_get_user_prot_info)
414 +       if (parent->_get_user_prot_info)
415                 slave->mtd._get_user_prot_info = part_get_user_prot_info;
416 -       if (master->_get_fact_prot_info)
417 +       if (parent->_get_fact_prot_info)
418                 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
419 -       if (master->_sync)
420 +       if (parent->_sync)
421                 slave->mtd._sync = part_sync;
422 -       if (!partno && !master->dev.class && master->_suspend &&
423 -           master->_resume) {
424 +       if (!partno && !parent->dev.class && parent->_suspend &&
425 +           parent->_resume) {
426                         slave->mtd._suspend = part_suspend;
427                         slave->mtd._resume = part_resume;
428         }
429 -       if (master->_writev)
430 +       if (parent->_writev)
431                 slave->mtd._writev = part_writev;
432 -       if (master->_lock)
433 +       if (parent->_lock)
434                 slave->mtd._lock = part_lock;
435 -       if (master->_unlock)
436 +       if (parent->_unlock)
437                 slave->mtd._unlock = part_unlock;
438 -       if (master->_is_locked)
439 +       if (parent->_is_locked)
440                 slave->mtd._is_locked = part_is_locked;
441 -       if (master->_block_isreserved)
442 +       if (parent->_block_isreserved)
443                 slave->mtd._block_isreserved = part_block_isreserved;
444 -       if (master->_block_isbad)
445 +       if (parent->_block_isbad)
446                 slave->mtd._block_isbad = part_block_isbad;
447 -       if (master->_block_markbad)
448 +       if (parent->_block_markbad)
449                 slave->mtd._block_markbad = part_block_markbad;
450 -       if (master->_max_bad_blocks)
451 +       if (parent->_max_bad_blocks)
452                 slave->mtd._max_bad_blocks = part_max_bad_blocks;
453  
454 -       if (master->_get_device)
455 +       if (parent->_get_device)
456                 slave->mtd._get_device = part_get_device;
457 -       if (master->_put_device)
458 +       if (parent->_put_device)
459                 slave->mtd._put_device = part_put_device;
460  
461         slave->mtd._erase = part_erase;
462 -       slave->master = master;
463 +       slave->parent = parent;
464         slave->offset = part->offset;
465  
466         if (slave->offset == MTDPART_OFS_APPEND)
467 @@ -489,25 +495,25 @@ static struct mtd_part *allocate_partiti
468         }
469         if (slave->offset == MTDPART_OFS_RETAIN) {
470                 slave->offset = cur_offset;
471 -               if (master->size - slave->offset >= slave->mtd.size) {
472 -                       slave->mtd.size = master->size - slave->offset
473 +               if (parent->size - slave->offset >= slave->mtd.size) {
474 +                       slave->mtd.size = parent->size - slave->offset
475                                                         - slave->mtd.size;
476                 } else {
477                         printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
478 -                               part->name, master->size - slave->offset,
479 +                               part->name, parent->size - slave->offset,
480                                 slave->mtd.size);
481                         /* register to preserve ordering */
482                         goto out_register;
483                 }
484         }
485         if (slave->mtd.size == MTDPART_SIZ_FULL)
486 -               slave->mtd.size = master->size - slave->offset;
487 +               slave->mtd.size = parent->size - slave->offset;
488  
489         printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
490                 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
491  
492         /* let's do some sanity checks */
493 -       if (slave->offset >= master->size) {
494 +       if (slave->offset >= parent->size) {
495                 /* let's register it anyway to preserve ordering */
496                 slave->offset = 0;
497                 slave->mtd.size = 0;
498 @@ -515,16 +521,16 @@ static struct mtd_part *allocate_partiti
499                         part->name);
500                 goto out_register;
501         }
502 -       if (slave->offset + slave->mtd.size > master->size) {
503 -               slave->mtd.size = master->size - slave->offset;
504 +       if (slave->offset + slave->mtd.size > parent->size) {
505 +               slave->mtd.size = parent->size - slave->offset;
506                 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
507 -                       part->name, master->name, (unsigned long long)slave->mtd.size);
508 +                       part->name, parent->name, (unsigned long long)slave->mtd.size);
509         }
510 -       if (master->numeraseregions > 1) {
511 +       if (parent->numeraseregions > 1) {
512                 /* Deal with variable erase size stuff */
513 -               int i, max = master->numeraseregions;
514 +               int i, max = parent->numeraseregions;
515                 u64 end = slave->offset + slave->mtd.size;
516 -               struct mtd_erase_region_info *regions = master->eraseregions;
517 +               struct mtd_erase_region_info *regions = parent->eraseregions;
518  
519                 /* Find the first erase regions which is part of this
520                  * partition. */
521 @@ -543,7 +549,7 @@ static struct mtd_part *allocate_partiti
522                 BUG_ON(slave->mtd.erasesize == 0);
523         } else {
524                 /* Single erase size */
525 -               slave->mtd.erasesize = master->erasesize;
526 +               slave->mtd.erasesize = parent->erasesize;
527         }
528  
529         tmp = slave->offset;
530 @@ -566,17 +572,17 @@ static struct mtd_part *allocate_partiti
531         }
532  
533         mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
534 -       slave->mtd.ecc_step_size = master->ecc_step_size;
535 -       slave->mtd.ecc_strength = master->ecc_strength;
536 -       slave->mtd.bitflip_threshold = master->bitflip_threshold;
537 +       slave->mtd.ecc_step_size = parent->ecc_step_size;
538 +       slave->mtd.ecc_strength = parent->ecc_strength;
539 +       slave->mtd.bitflip_threshold = parent->bitflip_threshold;
540  
541 -       if (master->_block_isbad) {
542 +       if (parent->_block_isbad) {
543                 uint64_t offs = 0;
544  
545                 while (offs < slave->mtd.size) {
546 -                       if (mtd_block_isreserved(master, offs + slave->offset))
547 +                       if (mtd_block_isreserved(parent, offs + slave->offset))
548                                 slave->mtd.ecc_stats.bbtblocks++;
549 -                       else if (mtd_block_isbad(master, offs + slave->offset))
550 +                       else if (mtd_block_isbad(parent, offs + slave->offset))
551                                 slave->mtd.ecc_stats.badblocks++;
552                         offs += slave->mtd.erasesize;
553                 }
554 @@ -610,7 +616,7 @@ static int mtd_add_partition_attrs(struc
555         return ret;
556  }
557  
558 -int mtd_add_partition(struct mtd_info *master, const char *name,
559 +int mtd_add_partition(struct mtd_info *parent, const char *name,
560                       long long offset, long long length)
561  {
562         struct mtd_partition part;
563 @@ -623,7 +629,7 @@ int mtd_add_partition(struct mtd_info *m
564                 return -EINVAL;
565  
566         if (length == MTDPART_SIZ_FULL)
567 -               length = master->size - offset;
568 +               length = parent->size - offset;
569  
570         if (length <= 0)
571                 return -EINVAL;
572 @@ -633,7 +639,7 @@ int mtd_add_partition(struct mtd_info *m
573         part.size = length;
574         part.offset = offset;
575  
576 -       new = allocate_partition(master, &part, -1, offset);
577 +       new = allocate_partition(parent, &part, -1, offset);
578         if (IS_ERR(new))
579                 return PTR_ERR(new);
580  
581 @@ -694,7 +700,7 @@ int del_mtd_partitions(struct mtd_info *
582  
583         mutex_lock(&mtd_partitions_mutex);
584         list_for_each_entry_safe(slave, next, &mtd_partitions, list)
585 -               if (slave->master == master) {
586 +               if (slave->parent == master) {
587                         ret = __mtd_del_partition(slave);
588                         if (ret < 0)
589                                 err = ret;
590 @@ -711,7 +717,7 @@ int mtd_del_partition(struct mtd_info *m
591  
592         mutex_lock(&mtd_partitions_mutex);
593         list_for_each_entry_safe(slave, next, &mtd_partitions, list)
594 -               if ((slave->master == master) &&
595 +               if ((slave->parent == master) &&
596                     (slave->mtd.index == partno)) {
597                         ret = __mtd_del_partition(slave);
598                         break;
599 @@ -958,6 +964,6 @@ uint64_t mtd_get_device_size(const struc
600         if (!mtd_is_partition(mtd))
601                 return mtd->size;
602  
603 -       return mtd_to_part(mtd)->master->size;
604 +       return mtd_to_part(mtd)->parent->size;
605  }
606  EXPORT_SYMBOL_GPL(mtd_get_device_size);