Linux-libre 4.10.3-gnu
[librecmc/linux-libre.git] / drivers / lightnvm / sysblk.c
1 /*
2  * Copyright (C) 2015 Matias Bjorling. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License version
6  * 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; see the file COPYING.  If not, write to
15  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16  * USA.
17  *
18  */
19
20 #include <linux/lightnvm.h>
21
22 #define MAX_SYSBLKS 3   /* remember to update mapping scheme on change */
23 #define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24                               * enables ~1.5M updates per sysblk unit
25                               */
26
27 struct sysblk_scan {
28         /* A row is a collection of flash blocks for a system block. */
29         int nr_rows;
30         int row;
31         int act_blk[MAX_SYSBLKS];
32
33         int nr_ppas;
34         struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
35 };
36
37 static inline int scan_ppa_idx(int row, int blkid)
38 {
39         return (row * MAX_BLKS_PR_SYSBLK) + blkid;
40 }
41
42 static void nvm_sysblk_to_cpu(struct nvm_sb_info *info,
43                               struct nvm_system_block *sb)
44 {
45         info->seqnr = be32_to_cpu(sb->seqnr);
46         info->erase_cnt = be32_to_cpu(sb->erase_cnt);
47         info->version = be16_to_cpu(sb->version);
48         strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
49         info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
50 }
51
52 static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
53                               struct nvm_sb_info *info)
54 {
55         sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
56         sb->seqnr = cpu_to_be32(info->seqnr);
57         sb->erase_cnt = cpu_to_be32(info->erase_cnt);
58         sb->version = cpu_to_be16(info->version);
59         strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
60         sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
61 }
62
63 static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
64 {
65         struct nvm_geo *geo = &dev->geo;
66         int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls);
67         int i;
68
69         for (i = 0; i < nr_rows; i++)
70                 sysblk_ppas[i].ppa = 0;
71
72         /* if possible, place sysblk at first channel, middle channel and last
73          * channel of the device. If not, create only one or two sys blocks
74          */
75         switch (geo->nr_chnls) {
76         case 2:
77                 sysblk_ppas[1].g.ch = 1;
78                 /* fall-through */
79         case 1:
80                 sysblk_ppas[0].g.ch = 0;
81                 break;
82         default:
83                 sysblk_ppas[0].g.ch = 0;
84                 sysblk_ppas[1].g.ch = geo->nr_chnls / 2;
85                 sysblk_ppas[2].g.ch = geo->nr_chnls - 1;
86                 break;
87         }
88
89         return nr_rows;
90 }
91
92 static void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
93                                                 struct ppa_addr *sysblk_ppas)
94 {
95         memset(s, 0, sizeof(struct sysblk_scan));
96         s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
97 }
98
99 static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
100                                         u8 *blks, int nr_blks,
101                                         struct sysblk_scan *s)
102 {
103         struct ppa_addr *sppa;
104         int i, blkid = 0;
105
106         nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
107         if (nr_blks < 0)
108                 return nr_blks;
109
110         for (i = 0; i < nr_blks; i++) {
111                 if (blks[i] == NVM_BLK_T_HOST)
112                         return -EEXIST;
113
114                 if (blks[i] != NVM_BLK_T_FREE)
115                         continue;
116
117                 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
118                 sppa->g.ch = ppa.g.ch;
119                 sppa->g.lun = ppa.g.lun;
120                 sppa->g.blk = i;
121                 s->nr_ppas++;
122                 blkid++;
123
124                 pr_debug("nvm: use (%u %u %u) as sysblk\n",
125                                         sppa->g.ch, sppa->g.lun, sppa->g.blk);
126                 if (blkid > MAX_BLKS_PR_SYSBLK - 1)
127                         return 0;
128         }
129
130         pr_err("nvm: sysblk failed get sysblk\n");
131         return -EINVAL;
132 }
133
134 static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
135                                         u8 *blks, int nr_blks,
136                                         struct sysblk_scan *s)
137 {
138         int i, nr_sysblk = 0;
139
140         nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
141         if (nr_blks < 0)
142                 return nr_blks;
143
144         for (i = 0; i < nr_blks; i++) {
145                 if (blks[i] != NVM_BLK_T_HOST)
146                         continue;
147
148                 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
149                         pr_err("nvm: too many host blks\n");
150                         return -EINVAL;
151                 }
152
153                 ppa.g.blk = i;
154
155                 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
156                 s->nr_ppas++;
157                 nr_sysblk++;
158         }
159
160         return 0;
161 }
162
163 static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
164                                 struct ppa_addr *ppas, int get_free)
165 {
166         struct nvm_geo *geo = &dev->geo;
167         int i, nr_blks, ret = 0;
168         u8 *blks;
169
170         s->nr_ppas = 0;
171         nr_blks = geo->blks_per_lun * geo->plane_mode;
172
173         blks = kmalloc(nr_blks, GFP_KERNEL);
174         if (!blks)
175                 return -ENOMEM;
176
177         for (i = 0; i < s->nr_rows; i++) {
178                 s->row = i;
179
180                 ret = nvm_get_bb_tbl(dev, ppas[i], blks);
181                 if (ret) {
182                         pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
183                                                         ppas[i].g.ch,
184                                                         ppas[i].g.blk);
185                         goto err_get;
186                 }
187
188                 if (get_free)
189                         ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
190                                                                         s);
191                 else
192                         ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
193                                                                         s);
194
195                 if (ret)
196                         goto err_get;
197         }
198
199 err_get:
200         kfree(blks);
201         return ret;
202 }
203
204 /*
205  * scans a block for latest sysblk.
206  * Returns:
207  *      0 - newer sysblk not found. PPA is updated to latest page.
208  *      1 - newer sysblk found and stored in *cur. PPA is updated to
209  *          next valid page.
210  *      <0- error.
211  */
212 static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
213                                                 struct nvm_system_block *sblk)
214 {
215         struct nvm_geo *geo = &dev->geo;
216         struct nvm_system_block *cur;
217         int pg, ret, found = 0;
218
219         /* the full buffer for a flash page is allocated. Only the first of it
220          * contains the system block information
221          */
222         cur = kmalloc(geo->pfpg_size, GFP_KERNEL);
223         if (!cur)
224                 return -ENOMEM;
225
226         /* perform linear scan through the block */
227         for (pg = 0; pg < dev->lps_per_blk; pg++) {
228                 ppa->g.pg = ppa_to_slc(dev, pg);
229
230                 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
231                                                         cur, geo->pfpg_size);
232                 if (ret) {
233                         if (ret == NVM_RSP_ERR_EMPTYPAGE) {
234                                 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
235                                                         ppa->g.ch,
236                                                         ppa->g.lun,
237                                                         ppa->g.blk,
238                                                         ppa->g.pg);
239                                 break;
240                         }
241                         pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
242                                                         ret,
243                                                         ppa->g.ch,
244                                                         ppa->g.lun,
245                                                         ppa->g.blk,
246                                                         ppa->g.pg);
247                         break; /* if we can't read a page, continue to the
248                                 * next blk
249                                 */
250                 }
251
252                 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
253                         pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
254                                                         ppa->g.ch,
255                                                         ppa->g.lun,
256                                                         ppa->g.blk,
257                                                         ppa->g.pg);
258                         break; /* last valid page already found */
259                 }
260
261                 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
262                         continue;
263
264                 memcpy(sblk, cur, sizeof(struct nvm_system_block));
265                 found = 1;
266         }
267
268         kfree(cur);
269
270         return found;
271 }
272
273 static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
274                                                                 int type)
275 {
276         return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
277 }
278
279 static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
280                                                         struct sysblk_scan *s)
281 {
282         struct nvm_geo *geo = &dev->geo;
283         struct nvm_system_block nvmsb;
284         void *buf;
285         int i, sect, ret = 0;
286         struct ppa_addr *ppas;
287
288         nvm_cpu_to_sysblk(&nvmsb, info);
289
290         buf = kzalloc(geo->pfpg_size, GFP_KERNEL);
291         if (!buf)
292                 return -ENOMEM;
293         memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
294
295         ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
296         if (!ppas) {
297                 ret = -ENOMEM;
298                 goto err;
299         }
300
301         /* Write and verify */
302         for (i = 0; i < s->nr_rows; i++) {
303                 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
304
305                 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
306                                                         ppas[0].g.ch,
307                                                         ppas[0].g.lun,
308                                                         ppas[0].g.blk,
309                                                         ppas[0].g.pg);
310
311                 /* Expand to all sectors within a flash page */
312                 if (geo->sec_per_pg > 1) {
313                         for (sect = 1; sect < geo->sec_per_pg; sect++) {
314                                 ppas[sect].ppa = ppas[0].ppa;
315                                 ppas[sect].g.sec = sect;
316                         }
317                 }
318
319                 ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE,
320                                         NVM_IO_SLC_MODE, buf, geo->pfpg_size);
321                 if (ret) {
322                         pr_err("nvm: sysblk failed program (%u %u %u)\n",
323                                                         ppas[0].g.ch,
324                                                         ppas[0].g.lun,
325                                                         ppas[0].g.blk);
326                         break;
327                 }
328
329                 ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD,
330                                         NVM_IO_SLC_MODE, buf, geo->pfpg_size);
331                 if (ret) {
332                         pr_err("nvm: sysblk failed read (%u %u %u)\n",
333                                                         ppas[0].g.ch,
334                                                         ppas[0].g.lun,
335                                                         ppas[0].g.blk);
336                         break;
337                 }
338
339                 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
340                         pr_err("nvm: sysblk failed verify (%u %u %u)\n",
341                                                         ppas[0].g.ch,
342                                                         ppas[0].g.lun,
343                                                         ppas[0].g.blk);
344                         ret = -EINVAL;
345                         break;
346                 }
347         }
348
349         kfree(ppas);
350 err:
351         kfree(buf);
352
353         return ret;
354 }
355
356 static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
357 {
358         int i, ret;
359         unsigned long nxt_blk;
360         struct ppa_addr *ppa;
361
362         for (i = 0; i < s->nr_rows; i++) {
363                 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
364                 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
365                 ppa->g.pg = ppa_to_slc(dev, 0);
366
367                 ret = nvm_erase_ppa(dev, ppa, 1, 0);
368                 if (ret)
369                         return ret;
370
371                 s->act_blk[i] = nxt_blk;
372         }
373
374         return 0;
375 }
376
377 int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
378 {
379         struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
380         struct sysblk_scan s;
381         struct nvm_system_block *cur;
382         int i, j, found = 0;
383         int ret = -ENOMEM;
384
385         /*
386          * 1. setup sysblk locations
387          * 2. get bad block list
388          * 3. filter on host-specific (type 3)
389          * 4. iterate through all and find the highest seq nr.
390          * 5. return superblock information
391          */
392
393         if (!dev->ops->get_bb_tbl)
394                 return -EINVAL;
395
396         nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
397
398         mutex_lock(&dev->mlock);
399         ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
400         if (ret)
401                 goto err_sysblk;
402
403         /* no sysblocks initialized */
404         if (!s.nr_ppas)
405                 goto err_sysblk;
406
407         cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
408         if (!cur)
409                 goto err_sysblk;
410
411         /* find the latest block across all sysblocks */
412         for (i = 0; i < s.nr_rows; i++) {
413                 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
414                         struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
415
416                         ret = nvm_scan_block(dev, &ppa, cur);
417                         if (ret > 0)
418                                 found = 1;
419                         else if (ret < 0)
420                                 break;
421                 }
422         }
423
424         nvm_sysblk_to_cpu(info, cur);
425
426         kfree(cur);
427 err_sysblk:
428         mutex_unlock(&dev->mlock);
429
430         if (found)
431                 return 1;
432         return ret;
433 }
434
435 int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
436 {
437         /* 1. for each latest superblock
438          * 2. if room
439          *    a. write new flash page entry with the updated information
440          * 3. if no room
441          *    a. find next available block on lun (linear search)
442          *       if none, continue to next lun
443          *       if none at all, report error. also report that it wasn't
444          *       possible to write to all superblocks.
445          *    c. write data to block.
446          */
447         struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
448         struct sysblk_scan s;
449         struct nvm_system_block *cur;
450         int i, j, ppaidx, found = 0;
451         int ret = -ENOMEM;
452
453         if (!dev->ops->get_bb_tbl)
454                 return -EINVAL;
455
456         nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
457
458         mutex_lock(&dev->mlock);
459         ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
460         if (ret)
461                 goto err_sysblk;
462
463         cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
464         if (!cur)
465                 goto err_sysblk;
466
467         /* Get the latest sysblk for each sysblk row */
468         for (i = 0; i < s.nr_rows; i++) {
469                 found = 0;
470                 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
471                         ppaidx = scan_ppa_idx(i, j);
472                         ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
473                         if (ret > 0) {
474                                 s.act_blk[i] = j;
475                                 found = 1;
476                         } else if (ret < 0)
477                                 break;
478                 }
479         }
480
481         if (!found) {
482                 pr_err("nvm: no valid sysblks found to update\n");
483                 ret = -EINVAL;
484                 goto err_cur;
485         }
486
487         /*
488          * All sysblocks found. Check that they have same page id in their flash
489          * blocks
490          */
491         for (i = 1; i < s.nr_rows; i++) {
492                 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
493                 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
494
495                 if (l.g.pg != r.g.pg) {
496                         pr_err("nvm: sysblks not on same page. Previous update failed.\n");
497                         ret = -EINVAL;
498                         goto err_cur;
499                 }
500         }
501
502         /*
503          * Check that there haven't been another update to the seqnr since we
504          * began
505          */
506         if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
507                 pr_err("nvm: seq is not sequential\n");
508                 ret = -EINVAL;
509                 goto err_cur;
510         }
511
512         /*
513          * When all pages in a block has been written, a new block is selected
514          * and writing is performed on the new block.
515          */
516         if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
517                                                 dev->lps_per_blk - 1) {
518                 ret = nvm_prepare_new_sysblks(dev, &s);
519                 if (ret)
520                         goto err_cur;
521         }
522
523         ret = nvm_write_and_verify(dev, new, &s);
524 err_cur:
525         kfree(cur);
526 err_sysblk:
527         mutex_unlock(&dev->mlock);
528
529         return ret;
530 }
531
532 int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
533 {
534         struct nvm_geo *geo = &dev->geo;
535         struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
536         struct sysblk_scan s;
537         int ret;
538
539         /*
540          * 1. select master blocks and select first available blks
541          * 2. get bad block list
542          * 3. mark MAX_SYSBLKS block as host-based device allocated.
543          * 4. write and verify data to block
544          */
545
546         if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
547                 return -EINVAL;
548
549         if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
550                 pr_err("nvm: memory does not support SLC access\n");
551                 return -EINVAL;
552         }
553
554         /* Index all sysblocks and mark them as host-driven */
555         nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
556
557         mutex_lock(&dev->mlock);
558         ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
559         if (ret)
560                 goto err_mark;
561
562         ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
563         if (ret)
564                 goto err_mark;
565
566         /* Write to the first block of each row */
567         ret = nvm_write_and_verify(dev, info, &s);
568 err_mark:
569         mutex_unlock(&dev->mlock);
570         return ret;
571 }
572
573 static int factory_nblks(int nblks)
574 {
575         /* Round up to nearest BITS_PER_LONG */
576         return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
577 }
578
579 static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa)
580 {
581         int nblks = factory_nblks(geo->blks_per_lun);
582
583         return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
584                                                                 BITS_PER_LONG;
585 }
586
587 static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
588                                         u8 *blks, int nr_blks,
589                                         unsigned long *blk_bitmap, int flags)
590 {
591         int i, lunoff;
592
593         nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
594         if (nr_blks < 0)
595                 return nr_blks;
596
597         lunoff = factory_blk_offset(&dev->geo, ppa);
598
599         /* non-set bits correspond to the block must be erased */
600         for (i = 0; i < nr_blks; i++) {
601                 switch (blks[i]) {
602                 case NVM_BLK_T_FREE:
603                         if (flags & NVM_FACTORY_ERASE_ONLY_USER)
604                                 set_bit(i, &blk_bitmap[lunoff]);
605                         break;
606                 case NVM_BLK_T_HOST:
607                         if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
608                                 set_bit(i, &blk_bitmap[lunoff]);
609                         break;
610                 case NVM_BLK_T_GRWN_BAD:
611                         if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
612                                 set_bit(i, &blk_bitmap[lunoff]);
613                         break;
614                 default:
615                         set_bit(i, &blk_bitmap[lunoff]);
616                         break;
617                 }
618         }
619
620         return 0;
621 }
622
623 static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
624                                         int max_ppas, unsigned long *blk_bitmap)
625 {
626         struct nvm_geo *geo = &dev->geo;
627         struct ppa_addr ppa;
628         int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
629         unsigned long *offset;
630
631         while (!done) {
632                 done = 1;
633                 nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
634                         idx = factory_blk_offset(geo, ppa);
635                         offset = &blk_bitmap[idx];
636
637                         blkid = find_first_zero_bit(offset, geo->blks_per_lun);
638                         if (blkid >= geo->blks_per_lun)
639                                 continue;
640                         set_bit(blkid, offset);
641
642                         ppa.g.blk = blkid;
643                         pr_debug("nvm: erase ppa (%u %u %u)\n",
644                                                         ppa.g.ch,
645                                                         ppa.g.lun,
646                                                         ppa.g.blk);
647
648                         erase_list[ppa_cnt] = ppa;
649                         ppa_cnt++;
650                         done = 0;
651
652                         if (ppa_cnt == max_ppas)
653                                 return ppa_cnt;
654                 }
655         }
656
657         return ppa_cnt;
658 }
659
660 static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
661                                                                 int flags)
662 {
663         struct nvm_geo *geo = &dev->geo;
664         struct ppa_addr ppa;
665         int ch, lun, nr_blks, ret = 0;
666         u8 *blks;
667
668         nr_blks = geo->blks_per_lun * geo->plane_mode;
669         blks = kmalloc(nr_blks, GFP_KERNEL);
670         if (!blks)
671                 return -ENOMEM;
672
673         nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
674                 ret = nvm_get_bb_tbl(dev, ppa, blks);
675                 if (ret)
676                         pr_err("nvm: failed bb tbl for ch%u lun%u\n",
677                                                         ppa.g.ch, ppa.g.blk);
678
679                 ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
680                                                                         flags);
681                 if (ret)
682                         break;
683         }
684
685         kfree(blks);
686         return ret;
687 }
688
689 int nvm_dev_factory(struct nvm_dev *dev, int flags)
690 {
691         struct nvm_geo *geo = &dev->geo;
692         struct ppa_addr *ppas;
693         int ppa_cnt, ret = -ENOMEM;
694         int max_ppas = dev->ops->max_phys_sect / geo->nr_planes;
695         struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
696         struct sysblk_scan s;
697         unsigned long *blk_bitmap;
698
699         blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns,
700                                                                 GFP_KERNEL);
701         if (!blk_bitmap)
702                 return ret;
703
704         ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
705         if (!ppas)
706                 goto err_blks;
707
708         /* create list of blks to be erased */
709         ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
710         if (ret)
711                 goto err_ppas;
712
713         /* continue to erase until list of blks until empty */
714         while ((ppa_cnt =
715                         nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
716                 nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
717
718         /* mark host reserved blocks free */
719         if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
720                 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
721                 mutex_lock(&dev->mlock);
722                 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
723                 if (!ret)
724                         ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
725                 mutex_unlock(&dev->mlock);
726         }
727 err_ppas:
728         kfree(ppas);
729 err_blks:
730         kfree(blk_bitmap);
731         return ret;
732 }
733 EXPORT_SYMBOL(nvm_dev_factory);