2 * Copyright (c) International Business Machines Corp., 2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
22 * UBI wear-leveling unit.
24 * This unit is responsible for wear-leveling. It works in terms of physical
25 * eraseblocks and erase counters and knows nothing about logical eraseblocks,
26 * volumes, etc. From this unit's perspective all physical eraseblocks are of
27 * two types - used and free. Used physical eraseblocks are those that were
28 * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
29 * those that were put by the 'ubi_wl_put_peb()' function.
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32 * header. The rest of the physical eraseblock contains only 0xFF bytes.
34 * When physical eraseblocks are returned to the WL unit by means of the
35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
37 * which is also managed by the WL unit.
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
46 * the WL unit may pick a free physical eraseblock with low erase counter, and
49 * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
51 * This unit is also responsible for scrubbing. If a bit-flip is detected in a
52 * physical eraseblock, it has to be moved. Technically this is the same as
53 * moving it for wear-leveling reasons.
55 * As it was said, for the UBI unit all physical eraseblocks are either "free"
56 * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
57 * eraseblocks are kept in a set of different RB-trees: @wl->used,
58 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
60 * Note, in this implementation, we keep a small in-RAM object for each physical
61 * eraseblock. This is surely not a scalable solution. But it appears to be good
62 * enough for moderately large flashes and it is simple. In future, one may
63 * re-work this unit and make it more scalable.
65 * At the moment this unit does not utilize the sequence number, which was
66 * introduced relatively recently. But it would be wise to do this because the
67 * sequence number of a logical eraseblock characterizes how old is it. For
68 * example, when we move a PEB with low erase counter, and we need to pick the
69 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
70 * pick target PEB with an average EC if our PEB is not very "old". This is a
71 * room for future re-works of the WL unit.
73 * FIXME: looks too complex, should be simplified (later).
77 #include <linux/slab.h>
78 #include <linux/crc32.h>
79 #include <linux/freezer.h>
80 #include <linux/kthread.h>
83 #include <ubi_uboot.h>
86 /* Number of physical eraseblocks reserved for wear-leveling purposes */
87 #define WL_RESERVED_PEBS 1
90 * How many erase cycles are short term, unknown, and long term physical
91 * eraseblocks protected.
93 #define ST_PROTECTION 16
94 #define U_PROTECTION 10
95 #define LT_PROTECTION 4
98 * Maximum difference between two erase counters. If this threshold is
99 * exceeded, the WL unit starts moving data from used physical eraseblocks with
100 * low erase counter to free physical eraseblocks with high erase counter.
102 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
105 * When a physical eraseblock is moved, the WL unit has to pick the target
106 * physical eraseblock to move to. The simplest way would be just to pick the
107 * one with the highest erase counter. But in certain workloads this could lead
108 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
109 * situation when the picked physical eraseblock is constantly erased after the
110 * data is written to it. So, we have a constant which limits the highest erase
111 * counter of the free physical eraseblock to pick. Namely, the WL unit does
112 * not pick eraseblocks with erase counter greater then the lowest erase
113 * counter plus %WL_FREE_MAX_DIFF.
115 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
118 * Maximum number of consecutive background thread failures which is enough to
119 * switch to read-only mode.
121 #define WL_MAX_FAILURES 32
124 * struct ubi_wl_prot_entry - PEB protection entry.
125 * @rb_pnum: link in the @wl->prot.pnum RB-tree
126 * @rb_aec: link in the @wl->prot.aec RB-tree
127 * @abs_ec: the absolute erase counter value when the protection ends
128 * @e: the wear-leveling entry of the physical eraseblock under protection
130 * When the WL unit returns a physical eraseblock, the physical eraseblock is
131 * protected from being moved for some "time". For this reason, the physical
132 * eraseblock is not directly moved from the @wl->free tree to the @wl->used
133 * tree. There is one more tree in between where this physical eraseblock is
134 * temporarily stored (@wl->prot).
136 * All this protection stuff is needed because:
137 * o we don't want to move physical eraseblocks just after we have given them
138 * to the user; instead, we first want to let users fill them up with data;
140 * o there is a chance that the user will put the physical eraseblock very
141 * soon, so it makes sense not to move it for some time, but wait; this is
142 * especially important in case of "short term" physical eraseblocks.
144 * Physical eraseblocks stay protected only for limited time. But the "time" is
145 * measured in erase cycles in this case. This is implemented with help of the
146 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
147 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
148 * the @wl->used tree.
150 * Protected physical eraseblocks are searched by physical eraseblock number
151 * (when they are put) and by the absolute erase counter (to check if it is
152 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
153 * storing the protected physical eraseblocks: @wl->prot.pnum and
154 * @wl->prot.aec. They are referred to as the "protection" trees. The
155 * first one is indexed by the physical eraseblock number. The second one is
156 * indexed by the absolute erase counter. Both trees store
157 * &struct ubi_wl_prot_entry objects.
159 * Each physical eraseblock has 2 main states: free and used. The former state
160 * corresponds to the @wl->free tree. The latter state is split up on several
162 * o the WL movement is allowed (@wl->used tree);
163 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
164 * @wl->prot.aec trees);
165 * o scrubbing is needed (@wl->scrub tree).
167 * Depending on the sub-state, wear-leveling entries of the used physical
168 * eraseblocks may be kept in one of those trees.
170 struct ubi_wl_prot_entry {
171 struct rb_node rb_pnum;
172 struct rb_node rb_aec;
173 unsigned long long abs_ec;
174 struct ubi_wl_entry *e;
178 * struct ubi_work - UBI work description data structure.
179 * @list: a link in the list of pending works
180 * @func: worker function
181 * @priv: private data of the worker function
183 * @e: physical eraseblock to erase
184 * @torture: if the physical eraseblock has to be tortured
186 * The @func pointer points to the worker function. If the @cancel argument is
187 * not zero, the worker has to free the resources and exit immediately. The
188 * worker has to return zero in case of success and a negative error code in
192 struct list_head list;
193 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
194 /* The below fields are only relevant to erasure works */
195 struct ubi_wl_entry *e;
199 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
200 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
201 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
202 struct rb_root *root);
204 #define paranoid_check_ec(ubi, pnum, ec) 0
205 #define paranoid_check_in_wl_tree(e, root)
209 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
210 * @e: the wear-leveling entry to add
211 * @root: the root of the tree
213 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
214 * the @ubi->used and @ubi->free RB-trees.
216 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
218 struct rb_node **p, *parent = NULL;
222 struct ubi_wl_entry *e1;
225 e1 = rb_entry(parent, struct ubi_wl_entry, rb);
229 else if (e->ec > e1->ec)
232 ubi_assert(e->pnum != e1->pnum);
233 if (e->pnum < e1->pnum)
240 rb_link_node(&e->rb, parent, p);
241 rb_insert_color(&e->rb, root);
245 * do_work - do one pending work.
246 * @ubi: UBI device description object
248 * This function returns zero in case of success and a negative error code in
251 static int do_work(struct ubi_device *ubi)
254 struct ubi_work *wrk;
259 * @ubi->work_sem is used to synchronize with the workers. Workers take
260 * it in read mode, so many of them may be doing works at a time. But
261 * the queue flush code has to be sure the whole queue of works is
262 * done, and it takes the mutex in write mode.
264 down_read(&ubi->work_sem);
265 spin_lock(&ubi->wl_lock);
266 if (list_empty(&ubi->works)) {
267 spin_unlock(&ubi->wl_lock);
268 up_read(&ubi->work_sem);
272 wrk = list_entry(ubi->works.next, struct ubi_work, list);
273 list_del(&wrk->list);
274 ubi->works_count -= 1;
275 ubi_assert(ubi->works_count >= 0);
276 spin_unlock(&ubi->wl_lock);
279 * Call the worker function. Do not touch the work structure
280 * after this call as it will have been freed or reused by that
281 * time by the worker function.
283 err = wrk->func(ubi, wrk, 0);
285 ubi_err("work failed with error code %d", err);
286 up_read(&ubi->work_sem);
292 * produce_free_peb - produce a free physical eraseblock.
293 * @ubi: UBI device description object
295 * This function tries to make a free PEB by means of synchronous execution of
296 * pending works. This may be needed if, for example the background thread is
297 * disabled. Returns zero in case of success and a negative error code in case
300 static int produce_free_peb(struct ubi_device *ubi)
304 spin_lock(&ubi->wl_lock);
305 while (!ubi->free.rb_node) {
306 spin_unlock(&ubi->wl_lock);
308 dbg_wl("do one work synchronously");
313 spin_lock(&ubi->wl_lock);
315 spin_unlock(&ubi->wl_lock);
321 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
322 * @e: the wear-leveling entry to check
323 * @root: the root of the tree
325 * This function returns non-zero if @e is in the @root RB-tree and zero if it
328 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
334 struct ubi_wl_entry *e1;
336 e1 = rb_entry(p, struct ubi_wl_entry, rb);
338 if (e->pnum == e1->pnum) {
345 else if (e->ec > e1->ec)
348 ubi_assert(e->pnum != e1->pnum);
349 if (e->pnum < e1->pnum)
360 * prot_tree_add - add physical eraseblock to protection trees.
361 * @ubi: UBI device description object
362 * @e: the physical eraseblock to add
363 * @pe: protection entry object to use
364 * @abs_ec: absolute erase counter value when this physical eraseblock has
365 * to be removed from the protection trees.
367 * @wl->lock has to be locked.
369 static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
370 struct ubi_wl_prot_entry *pe, int abs_ec)
372 struct rb_node **p, *parent = NULL;
373 struct ubi_wl_prot_entry *pe1;
376 pe->abs_ec = ubi->abs_ec + abs_ec;
378 p = &ubi->prot.pnum.rb_node;
381 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
383 if (e->pnum < pe1->e->pnum)
388 rb_link_node(&pe->rb_pnum, parent, p);
389 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
391 p = &ubi->prot.aec.rb_node;
395 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
397 if (pe->abs_ec < pe1->abs_ec)
402 rb_link_node(&pe->rb_aec, parent, p);
403 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
407 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
408 * @root: the RB-tree where to look for
409 * @max: highest possible erase counter
411 * This function looks for a wear leveling entry with erase counter closest to
412 * @max and less then @max.
414 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
417 struct ubi_wl_entry *e;
419 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
424 struct ubi_wl_entry *e1;
426 e1 = rb_entry(p, struct ubi_wl_entry, rb);
439 * ubi_wl_get_peb - get a physical eraseblock.
440 * @ubi: UBI device description object
441 * @dtype: type of data which will be stored in this physical eraseblock
443 * This function returns a physical eraseblock in case of success and a
444 * negative error code in case of failure. Might sleep.
446 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
448 int err, protect, medium_ec;
449 struct ubi_wl_entry *e, *first, *last;
450 struct ubi_wl_prot_entry *pe;
452 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
453 dtype == UBI_UNKNOWN);
455 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
460 spin_lock(&ubi->wl_lock);
461 if (!ubi->free.rb_node) {
462 if (ubi->works_count == 0) {
463 ubi_assert(list_empty(&ubi->works));
464 ubi_err("no free eraseblocks");
465 spin_unlock(&ubi->wl_lock);
469 spin_unlock(&ubi->wl_lock);
471 err = produce_free_peb(ubi);
482 * For long term data we pick a physical eraseblock
483 * with high erase counter. But the highest erase
484 * counter we can pick is bounded by the the lowest
485 * erase counter plus %WL_FREE_MAX_DIFF.
487 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
488 protect = LT_PROTECTION;
492 * For unknown data we pick a physical eraseblock with
493 * medium erase counter. But we by no means can pick a
494 * physical eraseblock with erase counter greater or
495 * equivalent than the lowest erase counter plus
498 first = rb_entry(rb_first(&ubi->free),
499 struct ubi_wl_entry, rb);
500 last = rb_entry(rb_last(&ubi->free),
501 struct ubi_wl_entry, rb);
503 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
504 e = rb_entry(ubi->free.rb_node,
505 struct ubi_wl_entry, rb);
507 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
508 e = find_wl_entry(&ubi->free, medium_ec);
510 protect = U_PROTECTION;
514 * For short term data we pick a physical eraseblock
515 * with the lowest erase counter as we expect it will
518 e = rb_entry(rb_first(&ubi->free),
519 struct ubi_wl_entry, rb);
520 protect = ST_PROTECTION;
529 * Move the physical eraseblock to the protection trees where it will
530 * be protected from being moved for some time.
532 paranoid_check_in_wl_tree(e, &ubi->free);
533 rb_erase(&e->rb, &ubi->free);
534 prot_tree_add(ubi, e, pe, protect);
536 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
537 spin_unlock(&ubi->wl_lock);
543 * prot_tree_del - remove a physical eraseblock from the protection trees
544 * @ubi: UBI device description object
545 * @pnum: the physical eraseblock to remove
547 * This function returns PEB @pnum from the protection trees and returns zero
548 * in case of success and %-ENODEV if the PEB was not found in the protection
551 static int prot_tree_del(struct ubi_device *ubi, int pnum)
554 struct ubi_wl_prot_entry *pe = NULL;
556 p = ubi->prot.pnum.rb_node;
559 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
561 if (pnum == pe->e->pnum)
564 if (pnum < pe->e->pnum)
573 ubi_assert(pe->e->pnum == pnum);
574 rb_erase(&pe->rb_aec, &ubi->prot.aec);
575 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
581 * sync_erase - synchronously erase a physical eraseblock.
582 * @ubi: UBI device description object
583 * @e: the the physical eraseblock to erase
584 * @torture: if the physical eraseblock has to be tortured
586 * This function returns zero in case of success and a negative error code in
589 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
592 struct ubi_ec_hdr *ec_hdr;
593 unsigned long long ec = e->ec;
595 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
597 err = paranoid_check_ec(ubi, e->pnum, e->ec);
601 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
605 err = ubi_io_sync_erase(ubi, e->pnum, torture);
610 if (ec > UBI_MAX_ERASECOUNTER) {
612 * Erase counter overflow. Upgrade UBI and use 64-bit
613 * erase counters internally.
615 ubi_err("erase counter overflow at PEB %d, EC %llu",
621 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
623 ec_hdr->ec = cpu_to_be64(ec);
625 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
630 spin_lock(&ubi->wl_lock);
631 if (e->ec > ubi->max_ec)
633 spin_unlock(&ubi->wl_lock);
641 * check_protection_over - check if it is time to stop protecting some
642 * physical eraseblocks.
643 * @ubi: UBI device description object
645 * This function is called after each erase operation, when the absolute erase
646 * counter is incremented, to check if some physical eraseblock have not to be
647 * protected any longer. These physical eraseblocks are moved from the
648 * protection trees to the used tree.
650 static void check_protection_over(struct ubi_device *ubi)
652 struct ubi_wl_prot_entry *pe;
655 * There may be several protected physical eraseblock to remove,
659 spin_lock(&ubi->wl_lock);
660 if (!ubi->prot.aec.rb_node) {
661 spin_unlock(&ubi->wl_lock);
665 pe = rb_entry(rb_first(&ubi->prot.aec),
666 struct ubi_wl_prot_entry, rb_aec);
668 if (pe->abs_ec > ubi->abs_ec) {
669 spin_unlock(&ubi->wl_lock);
673 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
674 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
675 rb_erase(&pe->rb_aec, &ubi->prot.aec);
676 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
677 wl_tree_add(pe->e, &ubi->used);
678 spin_unlock(&ubi->wl_lock);
686 * schedule_ubi_work - schedule a work.
687 * @ubi: UBI device description object
688 * @wrk: the work to schedule
690 * This function enqueues a work defined by @wrk to the tail of the pending
693 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
695 spin_lock(&ubi->wl_lock);
696 list_add_tail(&wrk->list, &ubi->works);
697 ubi_assert(ubi->works_count >= 0);
698 ubi->works_count += 1;
701 * U-Boot special: We have no bgt_thread in U-Boot!
702 * So just call do_work() here directly.
706 spin_unlock(&ubi->wl_lock);
709 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
713 * schedule_erase - schedule an erase work.
714 * @ubi: UBI device description object
715 * @e: the WL entry of the physical eraseblock to erase
716 * @torture: if the physical eraseblock has to be tortured
718 * This function returns zero in case of success and a %-ENOMEM in case of
721 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
724 struct ubi_work *wl_wrk;
726 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
727 e->pnum, e->ec, torture);
729 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
733 wl_wrk->func = &erase_worker;
735 wl_wrk->torture = torture;
737 schedule_ubi_work(ubi, wl_wrk);
742 * wear_leveling_worker - wear-leveling worker function.
743 * @ubi: UBI device description object
744 * @wrk: the work object
745 * @cancel: non-zero if the worker has to free memory and exit
747 * This function copies a more worn out physical eraseblock to a less worn out
748 * one. Returns zero in case of success and a negative error code in case of
751 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
754 int err, put = 0, scrubbing = 0, protect = 0;
755 struct ubi_wl_prot_entry *uninitialized_var(pe);
756 struct ubi_wl_entry *e1, *e2;
757 struct ubi_vid_hdr *vid_hdr;
764 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
768 mutex_lock(&ubi->move_mutex);
769 spin_lock(&ubi->wl_lock);
770 ubi_assert(!ubi->move_from && !ubi->move_to);
771 ubi_assert(!ubi->move_to_put);
773 if (!ubi->free.rb_node ||
774 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
776 * No free physical eraseblocks? Well, they must be waiting in
777 * the queue to be erased. Cancel movement - it will be
778 * triggered again when a free physical eraseblock appears.
780 * No used physical eraseblocks? They must be temporarily
781 * protected from being moved. They will be moved to the
782 * @ubi->used tree later and the wear-leveling will be
785 dbg_wl("cancel WL, a list is empty: free %d, used %d",
786 !ubi->free.rb_node, !ubi->used.rb_node);
790 if (!ubi->scrub.rb_node) {
792 * Now pick the least worn-out used physical eraseblock and a
793 * highly worn-out free physical eraseblock. If the erase
794 * counters differ much enough, start wear-leveling.
796 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
797 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
799 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
800 dbg_wl("no WL needed: min used EC %d, max free EC %d",
804 paranoid_check_in_wl_tree(e1, &ubi->used);
805 rb_erase(&e1->rb, &ubi->used);
806 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
807 e1->pnum, e1->ec, e2->pnum, e2->ec);
809 /* Perform scrubbing */
811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
813 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub);
815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
818 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free);
822 spin_unlock(&ubi->wl_lock);
825 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
826 * We so far do not know which logical eraseblock our physical
827 * eraseblock (@e1) belongs to. We have to read the volume identifier
830 * Note, we are protected from this PEB being unmapped and erased. The
831 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
832 * which is being moved was unmapped.
835 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
836 if (err && err != UBI_IO_BITFLIPS) {
837 if (err == UBI_IO_PEB_FREE) {
839 * We are trying to move PEB without a VID header. UBI
840 * always write VID headers shortly after the PEB was
841 * given, so we have a situation when it did not have
842 * chance to write it down because it was preempted.
843 * Just re-schedule the work, so that next time it will
844 * likely have the VID header in place.
846 dbg_wl("PEB %d has no VID header", e1->pnum);
850 ubi_err("error %d while reading VID header from PEB %d",
857 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
866 * For some reason the LEB was not moved - it might be because
867 * the volume is being deleted. We should prevent this PEB from
868 * being selected for wear-levelling movement for some "time",
869 * so put it to the protection tree.
872 dbg_wl("cancelled moving PEB %d", e1->pnum);
873 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
882 ubi_free_vid_hdr(ubi, vid_hdr);
883 spin_lock(&ubi->wl_lock);
885 prot_tree_add(ubi, e1, pe, protect);
886 if (!ubi->move_to_put)
887 wl_tree_add(e2, &ubi->used);
890 ubi->move_from = ubi->move_to = NULL;
891 ubi->move_to_put = ubi->wl_scheduled = 0;
892 spin_unlock(&ubi->wl_lock);
896 * Well, the target PEB was put meanwhile, schedule it for
899 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
900 err = schedule_erase(ubi, e2, 0);
906 err = schedule_erase(ubi, e1, 0);
913 mutex_unlock(&ubi->move_mutex);
917 * For some reasons the LEB was not moved, might be an error, might be
918 * something else. @e1 was not changed, so return it back. @e2 might
919 * be changed, schedule it for erasure.
922 ubi_free_vid_hdr(ubi, vid_hdr);
923 spin_lock(&ubi->wl_lock);
925 wl_tree_add(e1, &ubi->scrub);
927 wl_tree_add(e1, &ubi->used);
928 ubi->move_from = ubi->move_to = NULL;
929 ubi->move_to_put = ubi->wl_scheduled = 0;
930 spin_unlock(&ubi->wl_lock);
932 err = schedule_erase(ubi, e2, 0);
936 mutex_unlock(&ubi->move_mutex);
940 ubi_err("error %d while moving PEB %d to PEB %d",
941 err, e1->pnum, e2->pnum);
943 ubi_free_vid_hdr(ubi, vid_hdr);
944 spin_lock(&ubi->wl_lock);
945 ubi->move_from = ubi->move_to = NULL;
946 ubi->move_to_put = ubi->wl_scheduled = 0;
947 spin_unlock(&ubi->wl_lock);
949 kmem_cache_free(ubi_wl_entry_slab, e1);
950 kmem_cache_free(ubi_wl_entry_slab, e2);
953 mutex_unlock(&ubi->move_mutex);
957 ubi->wl_scheduled = 0;
958 spin_unlock(&ubi->wl_lock);
959 mutex_unlock(&ubi->move_mutex);
960 ubi_free_vid_hdr(ubi, vid_hdr);
965 * ensure_wear_leveling - schedule wear-leveling if it is needed.
966 * @ubi: UBI device description object
968 * This function checks if it is time to start wear-leveling and schedules it
969 * if yes. This function returns zero in case of success and a negative error
970 * code in case of failure.
972 static int ensure_wear_leveling(struct ubi_device *ubi)
975 struct ubi_wl_entry *e1;
976 struct ubi_wl_entry *e2;
977 struct ubi_work *wrk;
979 spin_lock(&ubi->wl_lock);
980 if (ubi->wl_scheduled)
981 /* Wear-leveling is already in the work queue */
985 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
986 * the WL worker has to be scheduled anyway.
988 if (!ubi->scrub.rb_node) {
989 if (!ubi->used.rb_node || !ubi->free.rb_node)
990 /* No physical eraseblocks - no deal */
994 * We schedule wear-leveling only if the difference between the
995 * lowest erase counter of used physical eraseblocks and a high
996 * erase counter of free physical eraseblocks is greater then
999 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
1000 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1002 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1004 dbg_wl("schedule wear-leveling");
1006 dbg_wl("schedule scrubbing");
1008 ubi->wl_scheduled = 1;
1009 spin_unlock(&ubi->wl_lock);
1011 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1017 wrk->func = &wear_leveling_worker;
1018 schedule_ubi_work(ubi, wrk);
1022 spin_lock(&ubi->wl_lock);
1023 ubi->wl_scheduled = 0;
1025 spin_unlock(&ubi->wl_lock);
1030 * erase_worker - physical eraseblock erase worker function.
1031 * @ubi: UBI device description object
1032 * @wl_wrk: the work object
1033 * @cancel: non-zero if the worker has to free memory and exit
1035 * This function erases a physical eraseblock and perform torture testing if
1036 * needed. It also takes care about marking the physical eraseblock bad if
1037 * needed. Returns zero in case of success and a negative error code in case of
1040 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1043 struct ubi_wl_entry *e = wl_wrk->e;
1044 int pnum = e->pnum, err, need;
1047 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1049 kmem_cache_free(ubi_wl_entry_slab, e);
1053 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1055 err = sync_erase(ubi, e, wl_wrk->torture);
1057 /* Fine, we've erased it successfully */
1060 spin_lock(&ubi->wl_lock);
1062 wl_tree_add(e, &ubi->free);
1063 spin_unlock(&ubi->wl_lock);
1066 * One more erase operation has happened, take care about protected
1067 * physical eraseblocks.
1069 check_protection_over(ubi);
1071 /* And take care about wear-leveling */
1072 err = ensure_wear_leveling(ubi);
1076 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1078 kmem_cache_free(ubi_wl_entry_slab, e);
1080 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1084 /* Re-schedule the LEB for erasure */
1085 err1 = schedule_erase(ubi, e, 0);
1091 } else if (err != -EIO) {
1093 * If this is not %-EIO, we have no idea what to do. Scheduling
1094 * this physical eraseblock for erasure again would cause
1095 * errors again and again. Well, lets switch to RO mode.
1100 /* It is %-EIO, the PEB went bad */
1102 if (!ubi->bad_allowed) {
1103 ubi_err("bad physical eraseblock %d detected", pnum);
1107 spin_lock(&ubi->volumes_lock);
1108 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1110 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1111 ubi->avail_pebs -= need;
1112 ubi->rsvd_pebs += need;
1113 ubi->beb_rsvd_pebs += need;
1115 ubi_msg("reserve more %d PEBs", need);
1118 if (ubi->beb_rsvd_pebs == 0) {
1119 spin_unlock(&ubi->volumes_lock);
1120 ubi_err("no reserved physical eraseblocks");
1124 spin_unlock(&ubi->volumes_lock);
1125 ubi_msg("mark PEB %d as bad", pnum);
1127 err = ubi_io_mark_bad(ubi, pnum);
1131 spin_lock(&ubi->volumes_lock);
1132 ubi->beb_rsvd_pebs -= 1;
1133 ubi->bad_peb_count += 1;
1134 ubi->good_peb_count -= 1;
1135 ubi_calculate_reserved(ubi);
1136 if (ubi->beb_rsvd_pebs == 0)
1137 ubi_warn("last PEB from the reserved pool was used");
1138 spin_unlock(&ubi->volumes_lock);
1148 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
1149 * @ubi: UBI device description object
1150 * @pnum: physical eraseblock to return
1151 * @torture: if this physical eraseblock has to be tortured
1153 * This function is called to return physical eraseblock @pnum to the pool of
1154 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1155 * occurred to this @pnum and it has to be tested. This function returns zero
1156 * in case of success, and a negative error code in case of failure.
1158 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1161 struct ubi_wl_entry *e;
1163 dbg_wl("PEB %d", pnum);
1164 ubi_assert(pnum >= 0);
1165 ubi_assert(pnum < ubi->peb_count);
1168 spin_lock(&ubi->wl_lock);
1169 e = ubi->lookuptbl[pnum];
1170 if (e == ubi->move_from) {
1172 * User is putting the physical eraseblock which was selected to
1173 * be moved. It will be scheduled for erasure in the
1174 * wear-leveling worker.
1176 dbg_wl("PEB %d is being moved, wait", pnum);
1177 spin_unlock(&ubi->wl_lock);
1179 /* Wait for the WL worker by taking the @ubi->move_mutex */
1180 mutex_lock(&ubi->move_mutex);
1181 mutex_unlock(&ubi->move_mutex);
1183 } else if (e == ubi->move_to) {
1185 * User is putting the physical eraseblock which was selected
1186 * as the target the data is moved to. It may happen if the EBA
1187 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
1188 * the WL unit has not put the PEB to the "used" tree yet, but
1189 * it is about to do this. So we just set a flag which will
1190 * tell the WL worker that the PEB is not needed anymore and
1191 * should be scheduled for erasure.
1193 dbg_wl("PEB %d is the target of data moving", pnum);
1194 ubi_assert(!ubi->move_to_put);
1195 ubi->move_to_put = 1;
1196 spin_unlock(&ubi->wl_lock);
1199 if (in_wl_tree(e, &ubi->used)) {
1200 paranoid_check_in_wl_tree(e, &ubi->used);
1201 rb_erase(&e->rb, &ubi->used);
1202 } else if (in_wl_tree(e, &ubi->scrub)) {
1203 paranoid_check_in_wl_tree(e, &ubi->scrub);
1204 rb_erase(&e->rb, &ubi->scrub);
1206 err = prot_tree_del(ubi, e->pnum);
1208 ubi_err("PEB %d not found", pnum);
1210 spin_unlock(&ubi->wl_lock);
1215 spin_unlock(&ubi->wl_lock);
1217 err = schedule_erase(ubi, e, torture);
1219 spin_lock(&ubi->wl_lock);
1220 wl_tree_add(e, &ubi->used);
1221 spin_unlock(&ubi->wl_lock);
1228 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1229 * @ubi: UBI device description object
1230 * @pnum: the physical eraseblock to schedule
1232 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1233 * needs scrubbing. This function schedules a physical eraseblock for
1234 * scrubbing which is done in background. This function returns zero in case of
1235 * success and a negative error code in case of failure.
1237 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1239 struct ubi_wl_entry *e;
1241 ubi_msg("schedule PEB %d for scrubbing", pnum);
1244 spin_lock(&ubi->wl_lock);
1245 e = ubi->lookuptbl[pnum];
1246 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1247 spin_unlock(&ubi->wl_lock);
1251 if (e == ubi->move_to) {
1253 * This physical eraseblock was used to move data to. The data
1254 * was moved but the PEB was not yet inserted to the proper
1255 * tree. We should just wait a little and let the WL worker
1258 spin_unlock(&ubi->wl_lock);
1259 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1264 if (in_wl_tree(e, &ubi->used)) {
1265 paranoid_check_in_wl_tree(e, &ubi->used);
1266 rb_erase(&e->rb, &ubi->used);
1270 err = prot_tree_del(ubi, e->pnum);
1272 ubi_err("PEB %d not found", pnum);
1274 spin_unlock(&ubi->wl_lock);
1279 wl_tree_add(e, &ubi->scrub);
1280 spin_unlock(&ubi->wl_lock);
1283 * Technically scrubbing is the same as wear-leveling, so it is done
1286 return ensure_wear_leveling(ubi);
1290 * ubi_wl_flush - flush all pending works.
1291 * @ubi: UBI device description object
1293 * This function returns zero in case of success and a negative error code in
1296 int ubi_wl_flush(struct ubi_device *ubi)
1301 * Erase while the pending works queue is not empty, but not more then
1302 * the number of currently pending works.
1304 dbg_wl("flush (%d pending works)", ubi->works_count);
1305 while (ubi->works_count) {
1312 * Make sure all the works which have been done in parallel are
1315 down_write(&ubi->work_sem);
1316 up_write(&ubi->work_sem);
1319 * And in case last was the WL worker and it cancelled the LEB
1320 * movement, flush again.
1322 while (ubi->works_count) {
1323 dbg_wl("flush more (%d pending works)", ubi->works_count);
1333 * tree_destroy - destroy an RB-tree.
1334 * @root: the root of the tree to destroy
1336 static void tree_destroy(struct rb_root *root)
1339 struct ubi_wl_entry *e;
1345 else if (rb->rb_right)
1348 e = rb_entry(rb, struct ubi_wl_entry, rb);
1352 if (rb->rb_left == &e->rb)
1355 rb->rb_right = NULL;
1358 kmem_cache_free(ubi_wl_entry_slab, e);
1364 * ubi_thread - UBI background thread.
1365 * @u: the UBI device description object pointer
1367 int ubi_thread(void *u)
1370 struct ubi_device *ubi = u;
1372 ubi_msg("background thread \"%s\" started, PID %d",
1373 ubi->bgt_name, task_pid_nr(current));
1379 if (kthread_should_stop())
1382 if (try_to_freeze())
1385 spin_lock(&ubi->wl_lock);
1386 if (list_empty(&ubi->works) || ubi->ro_mode ||
1387 !ubi->thread_enabled) {
1388 set_current_state(TASK_INTERRUPTIBLE);
1389 spin_unlock(&ubi->wl_lock);
1393 spin_unlock(&ubi->wl_lock);
1397 ubi_err("%s: work failed with error code %d",
1398 ubi->bgt_name, err);
1399 if (failures++ > WL_MAX_FAILURES) {
1401 * Too many failures, disable the thread and
1402 * switch to read-only mode.
1404 ubi_msg("%s: %d consecutive failures",
1405 ubi->bgt_name, WL_MAX_FAILURES);
1415 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1420 * cancel_pending - cancel all pending works.
1421 * @ubi: UBI device description object
1423 static void cancel_pending(struct ubi_device *ubi)
1425 while (!list_empty(&ubi->works)) {
1426 struct ubi_work *wrk;
1428 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1429 list_del(&wrk->list);
1430 wrk->func(ubi, wrk, 1);
1431 ubi->works_count -= 1;
1432 ubi_assert(ubi->works_count >= 0);
1437 * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
1439 * @ubi: UBI device description object
1440 * @si: scanning information
1442 * This function returns zero in case of success, and a negative error code in
1445 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1448 struct rb_node *rb1, *rb2;
1449 struct ubi_scan_volume *sv;
1450 struct ubi_scan_leb *seb, *tmp;
1451 struct ubi_wl_entry *e;
1454 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1455 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1456 spin_lock_init(&ubi->wl_lock);
1457 mutex_init(&ubi->move_mutex);
1458 init_rwsem(&ubi->work_sem);
1459 ubi->max_ec = si->max_ec;
1460 INIT_LIST_HEAD(&ubi->works);
1462 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1465 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1466 if (!ubi->lookuptbl)
1469 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1472 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1476 e->pnum = seb->pnum;
1478 ubi->lookuptbl[e->pnum] = e;
1479 if (schedule_erase(ubi, e, 0)) {
1480 kmem_cache_free(ubi_wl_entry_slab, e);
1485 list_for_each_entry(seb, &si->free, u.list) {
1488 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1492 e->pnum = seb->pnum;
1494 ubi_assert(e->ec >= 0);
1495 wl_tree_add(e, &ubi->free);
1496 ubi->lookuptbl[e->pnum] = e;
1499 list_for_each_entry(seb, &si->corr, u.list) {
1502 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1506 e->pnum = seb->pnum;
1508 ubi->lookuptbl[e->pnum] = e;
1509 if (schedule_erase(ubi, e, 0)) {
1510 kmem_cache_free(ubi_wl_entry_slab, e);
1515 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1516 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1519 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1523 e->pnum = seb->pnum;
1525 ubi->lookuptbl[e->pnum] = e;
1527 dbg_wl("add PEB %d EC %d to the used tree",
1529 wl_tree_add(e, &ubi->used);
1531 dbg_wl("add PEB %d EC %d to the scrub tree",
1533 wl_tree_add(e, &ubi->scrub);
1538 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1539 ubi_err("no enough physical eraseblocks (%d, need %d)",
1540 ubi->avail_pebs, WL_RESERVED_PEBS);
1543 ubi->avail_pebs -= WL_RESERVED_PEBS;
1544 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1546 /* Schedule wear-leveling if needed */
1547 err = ensure_wear_leveling(ubi);
1554 cancel_pending(ubi);
1555 tree_destroy(&ubi->used);
1556 tree_destroy(&ubi->free);
1557 tree_destroy(&ubi->scrub);
1558 kfree(ubi->lookuptbl);
1563 * protection_trees_destroy - destroy the protection RB-trees.
1564 * @ubi: UBI device description object
1566 static void protection_trees_destroy(struct ubi_device *ubi)
1569 struct ubi_wl_prot_entry *pe;
1571 rb = ubi->prot.aec.rb_node;
1575 else if (rb->rb_right)
1578 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1582 if (rb->rb_left == &pe->rb_aec)
1585 rb->rb_right = NULL;
1588 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1595 * ubi_wl_close - close the wear-leveling unit.
1596 * @ubi: UBI device description object
1598 void ubi_wl_close(struct ubi_device *ubi)
1600 dbg_wl("close the UBI wear-leveling unit");
1602 cancel_pending(ubi);
1603 protection_trees_destroy(ubi);
1604 tree_destroy(&ubi->used);
1605 tree_destroy(&ubi->free);
1606 tree_destroy(&ubi->scrub);
1607 kfree(ubi->lookuptbl);
1610 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1613 * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
1615 * @ubi: UBI device description object
1616 * @pnum: the physical eraseblock number to check
1617 * @ec: the erase counter to check
1619 * This function returns zero if the erase counter of physical eraseblock @pnum
1620 * is equivalent to @ec, %1 if not, and a negative error code if an error
1623 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1627 struct ubi_ec_hdr *ec_hdr;
1629 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1633 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1634 if (err && err != UBI_IO_BITFLIPS) {
1635 /* The header does not have to exist */
1640 read_ec = be64_to_cpu(ec_hdr->ec);
1641 if (ec != read_ec) {
1642 ubi_err("paranoid check failed for PEB %d", pnum);
1643 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1644 ubi_dbg_dump_stack();
1655 * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
1657 * @e: the wear-leveling entry to check
1658 * @root: the root of the tree
1660 * This function returns zero if @e is in the @root RB-tree and %1 if it
1663 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1664 struct rb_root *root)
1666 if (in_wl_tree(e, root))
1669 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1670 e->pnum, e->ec, root);
1671 ubi_dbg_dump_stack();
1675 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */