1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017 Free Electrons
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Peter Pan <peterpandong@micron.com>
10 #define pr_fmt(fmt) "nand: " fmt
14 #include <linux/compat.h>
15 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/mtd/nand.h>
21 * nanddev_isbad() - Check if a block is bad
23 * @pos: position pointing to the block we want to check
25 * Return: true if the block is bad, false otherwise.
27 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
29 if (nanddev_bbt_is_initialized(nand)) {
33 entry = nanddev_bbt_pos_to_entry(nand, pos);
34 status = nanddev_bbt_get_block_status(nand, entry);
35 /* Lazy block status retrieval */
36 if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
37 if (nand->ops->isbad(nand, pos))
38 status = NAND_BBT_BLOCK_FACTORY_BAD;
40 status = NAND_BBT_BLOCK_GOOD;
42 nanddev_bbt_set_block_status(nand, entry, status);
45 if (status == NAND_BBT_BLOCK_WORN ||
46 status == NAND_BBT_BLOCK_FACTORY_BAD)
52 return nand->ops->isbad(nand, pos);
54 EXPORT_SYMBOL_GPL(nanddev_isbad);
57 * nanddev_markbad() - Mark a block as bad
59 * @pos: position of the block to mark bad
61 * Mark a block bad. This function is updating the BBT if available and
62 * calls the low-level markbad hook (nand->ops->markbad()).
64 * Return: 0 in case of success, a negative error code otherwise.
66 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
68 struct mtd_info *mtd = nanddev_to_mtd(nand);
72 if (nanddev_isbad(nand, pos))
75 ret = nand->ops->markbad(nand, pos);
77 pr_warn("failed to write BBM to block @%llx (err = %d)\n",
78 nanddev_pos_to_offs(nand, pos), ret);
80 if (!nanddev_bbt_is_initialized(nand))
83 entry = nanddev_bbt_pos_to_entry(nand, pos);
84 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
88 ret = nanddev_bbt_update(nand);
92 mtd->ecc_stats.badblocks++;
96 EXPORT_SYMBOL_GPL(nanddev_markbad);
99 * nanddev_isreserved() - Check whether an eraseblock is reserved or not
101 * @pos: NAND position to test
103 * Checks whether the eraseblock pointed by @pos is reserved or not.
105 * Return: true if the eraseblock is reserved, false otherwise.
107 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
112 if (!nanddev_bbt_is_initialized(nand))
115 /* Return info from the table */
116 entry = nanddev_bbt_pos_to_entry(nand, pos);
117 status = nanddev_bbt_get_block_status(nand, entry);
118 return status == NAND_BBT_BLOCK_RESERVED;
120 EXPORT_SYMBOL_GPL(nanddev_isreserved);
123 * nanddev_erase() - Erase a NAND portion
125 * @pos: position of the block to erase
127 * Erases the block if it's not bad.
129 * Return: 0 in case of success, a negative error code otherwise.
131 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
133 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
134 pr_warn("attempt to erase a bad/reserved block @%llx\n",
135 nanddev_pos_to_offs(nand, pos));
139 return nand->ops->erase(nand, pos);
141 EXPORT_SYMBOL_GPL(nanddev_erase);
144 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
146 * @einfo: erase request
148 * This is a simple mtd->_erase() implementation iterating over all blocks
149 * concerned by @einfo and calling nand->ops->erase() on each of them.
151 * Note that mtd->_erase should not be directly assigned to this helper,
152 * because there's no locking here. NAND specialized layers should instead
153 * implement there own wrapper around nanddev_mtd_erase() taking the
154 * appropriate lock before calling nanddev_mtd_erase().
156 * Return: 0 in case of success, a negative error code otherwise.
158 int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
160 struct nand_device *nand = mtd_to_nanddev(mtd);
161 struct nand_pos pos, last;
164 nanddev_offs_to_pos(nand, einfo->addr, &pos);
165 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
166 while (nanddev_pos_cmp(&pos, &last) <= 0) {
167 ret = nanddev_erase(nand, &pos);
169 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
174 nanddev_pos_next_eraseblock(nand, &pos);
179 EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
182 * nanddev_init() - Initialize a NAND device
184 * @ops: NAND device operations
185 * @owner: NAND device owner
187 * Initializes a NAND device object. Consistency checks are done on @ops and
188 * @nand->memorg. Also takes care of initializing the BBT.
190 * Return: 0 in case of success, a negative error code otherwise.
192 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
193 struct module *owner)
195 struct mtd_info *mtd = nanddev_to_mtd(nand);
196 struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
201 if (!ops->erase || !ops->markbad || !ops->isbad)
204 if (!memorg->bits_per_cell || !memorg->pagesize ||
205 !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
206 !memorg->planes_per_lun || !memorg->luns_per_target ||
210 nand->rowconv.eraseblock_addr_shift =
211 fls(memorg->pages_per_eraseblock - 1);
212 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
213 nand->rowconv.eraseblock_addr_shift;
217 mtd->type = memorg->bits_per_cell == 1 ?
218 MTD_NANDFLASH : MTD_MLCNANDFLASH;
219 mtd->flags = MTD_CAP_NANDFLASH;
220 mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
221 mtd->writesize = memorg->pagesize;
222 mtd->writebufsize = memorg->pagesize;
223 mtd->oobsize = memorg->oobsize;
224 mtd->size = nanddev_size(nand);
227 return nanddev_bbt_init(nand);
229 EXPORT_SYMBOL_GPL(nanddev_init);
232 * nanddev_cleanup() - Release resources allocated in nanddev_init()
235 * Basically undoes what has been done in nanddev_init().
237 void nanddev_cleanup(struct nand_device *nand)
239 if (nanddev_bbt_is_initialized(nand))
240 nanddev_bbt_cleanup(nand);
242 EXPORT_SYMBOL_GPL(nanddev_cleanup);
244 MODULE_DESCRIPTION("Generic NAND framework");
245 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
246 MODULE_LICENSE("GPL v2");