Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / staging / erofs / unzip_vle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14 #include "compress.h"
15 #include <linux/prefetch.h>
16
17 #include <trace/events/erofs.h>
18
19 /*
20  * a compressed_pages[] placeholder in order to avoid
21  * being filled with file pages for in-place decompression.
22  */
23 #define PAGE_UNALLOCATED     ((void *)0x5F0E4B1D)
24
25 /* how to allocate cached pages for a workgroup */
26 enum z_erofs_cache_alloctype {
27         DONTALLOC,      /* don't allocate any cached pages */
28         DELAYEDALLOC,   /* delayed allocation (at the time of submitting io) */
29 };
30
31 /*
32  * tagged pointer with 1-bit tag for all compressed pages
33  * tag 0 - the page is just found with an extra page reference
34  */
35 typedef tagptr1_t compressed_page_t;
36
37 #define tag_compressed_page_justfound(page) \
38         tagptr_fold(compressed_page_t, page, 1)
39
40 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
41 static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
42
43 void z_erofs_exit_zip_subsystem(void)
44 {
45         destroy_workqueue(z_erofs_workqueue);
46         kmem_cache_destroy(z_erofs_workgroup_cachep);
47 }
48
49 static inline int init_unzip_workqueue(void)
50 {
51         const unsigned int onlinecpus = num_possible_cpus();
52
53         /*
54          * we don't need too many threads, limiting threads
55          * could improve scheduling performance.
56          */
57         z_erofs_workqueue =
58                 alloc_workqueue("erofs_unzipd",
59                                 WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
60                                 onlinecpus + onlinecpus / 4);
61
62         return z_erofs_workqueue ? 0 : -ENOMEM;
63 }
64
65 static void init_once(void *ptr)
66 {
67         struct z_erofs_vle_workgroup *grp = ptr;
68         struct z_erofs_vle_work *const work =
69                 z_erofs_vle_grab_primary_work(grp);
70         unsigned int i;
71
72         mutex_init(&work->lock);
73         work->nr_pages = 0;
74         work->vcnt = 0;
75         for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
76                 grp->compressed_pages[i] = NULL;
77 }
78
79 static void init_always(struct z_erofs_vle_workgroup *grp)
80 {
81         struct z_erofs_vle_work *const work =
82                 z_erofs_vle_grab_primary_work(grp);
83
84         atomic_set(&grp->obj.refcount, 1);
85         grp->flags = 0;
86
87         DBG_BUGON(work->nr_pages);
88         DBG_BUGON(work->vcnt);
89 }
90
91 int __init z_erofs_init_zip_subsystem(void)
92 {
93         z_erofs_workgroup_cachep =
94                 kmem_cache_create("erofs_compress",
95                                   Z_EROFS_WORKGROUP_SIZE, 0,
96                                   SLAB_RECLAIM_ACCOUNT, init_once);
97
98         if (z_erofs_workgroup_cachep) {
99                 if (!init_unzip_workqueue())
100                         return 0;
101
102                 kmem_cache_destroy(z_erofs_workgroup_cachep);
103         }
104         return -ENOMEM;
105 }
106
107 enum z_erofs_vle_work_role {
108         Z_EROFS_VLE_WORK_SECONDARY,
109         Z_EROFS_VLE_WORK_PRIMARY,
110         /*
111          * The current work was the tail of an exist chain, and the previous
112          * processed chained works are all decided to be hooked up to it.
113          * A new chain should be created for the remaining unprocessed works,
114          * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
115          * the next work cannot reuse the whole page in the following scenario:
116          *  ________________________________________________________________
117          * |      tail (partial) page     |       head (partial) page       |
118          * |  (belongs to the next work)  |  (belongs to the current work)  |
119          * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
120          */
121         Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
122         /*
123          * The current work has been linked with the processed chained works,
124          * and could be also linked with the potential remaining works, which
125          * means if the processing page is the tail partial page of the work,
126          * the current work can safely use the whole page (since the next work
127          * is under control) for in-place decompression, as illustrated below:
128          *  ________________________________________________________________
129          * |  tail (partial) page  |          head (partial) page           |
130          * | (of the current work) |         (of the previous work)         |
131          * |  PRIMARY_FOLLOWED or  |                                        |
132          * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
133          *
134          * [  (*) the above page can be used for the current work itself.  ]
135          */
136         Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
137         Z_EROFS_VLE_WORK_MAX
138 };
139
140 struct z_erofs_vle_work_builder {
141         enum z_erofs_vle_work_role role;
142         /*
143          * 'hosted = false' means that the current workgroup doesn't belong to
144          * the owned chained workgroups. In the other words, it is none of our
145          * business to submit this workgroup.
146          */
147         bool hosted;
148
149         struct z_erofs_vle_workgroup *grp;
150         struct z_erofs_vle_work *work;
151         struct z_erofs_pagevec_ctor vector;
152
153         /* pages used for reading the compressed data */
154         struct page **compressed_pages;
155         unsigned int compressed_deficit;
156 };
157
158 #define VLE_WORK_BUILDER_INIT() \
159         { .work = NULL, .role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED }
160
161 #ifdef EROFS_FS_HAS_MANAGED_CACHE
162 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
163                                      struct address_space *mc,
164                                      pgoff_t index,
165                                      unsigned int clusterpages,
166                                      enum z_erofs_cache_alloctype type,
167                                      struct list_head *pagepool,
168                                      gfp_t gfp)
169 {
170         struct page **const pages = bl->compressed_pages;
171         const unsigned int remaining = bl->compressed_deficit;
172         bool standalone = true;
173         unsigned int i, j = 0;
174
175         if (bl->role < Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
176                 return;
177
178         gfp = mapping_gfp_constraint(mc, gfp) & ~__GFP_RECLAIM;
179
180         index += clusterpages - remaining;
181
182         for (i = 0; i < remaining; ++i) {
183                 struct page *page;
184                 compressed_page_t t;
185
186                 /* the compressed page was loaded before */
187                 if (READ_ONCE(pages[i]))
188                         continue;
189
190                 page = find_get_page(mc, index + i);
191
192                 if (page) {
193                         t = tag_compressed_page_justfound(page);
194                 } else if (type == DELAYEDALLOC) {
195                         t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
196                 } else {        /* DONTALLOC */
197                         if (standalone)
198                                 j = i;
199                         standalone = false;
200                         continue;
201                 }
202
203                 if (!cmpxchg_relaxed(&pages[i], NULL, tagptr_cast_ptr(t)))
204                         continue;
205
206                 if (page)
207                         put_page(page);
208         }
209         bl->compressed_pages += j;
210         bl->compressed_deficit = remaining - j;
211
212         if (standalone)
213                 bl->role = Z_EROFS_VLE_WORK_PRIMARY;
214 }
215
216 /* called by erofs_shrinker to get rid of all compressed_pages */
217 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
218                                        struct erofs_workgroup *egrp)
219 {
220         struct z_erofs_vle_workgroup *const grp =
221                 container_of(egrp, struct z_erofs_vle_workgroup, obj);
222         struct address_space *const mapping = MNGD_MAPPING(sbi);
223         const int clusterpages = erofs_clusterpages(sbi);
224         int i;
225
226         /*
227          * refcount of workgroup is now freezed as 1,
228          * therefore no need to worry about available decompression users.
229          */
230         for (i = 0; i < clusterpages; ++i) {
231                 struct page *page = grp->compressed_pages[i];
232
233                 if (!page || page->mapping != mapping)
234                         continue;
235
236                 /* block other users from reclaiming or migrating the page */
237                 if (!trylock_page(page))
238                         return -EBUSY;
239
240                 /* barrier is implied in the following 'unlock_page' */
241                 WRITE_ONCE(grp->compressed_pages[i], NULL);
242
243                 set_page_private(page, 0);
244                 ClearPagePrivate(page);
245
246                 unlock_page(page);
247                 put_page(page);
248         }
249         return 0;
250 }
251
252 int erofs_try_to_free_cached_page(struct address_space *mapping,
253                                   struct page *page)
254 {
255         struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb);
256         const unsigned int clusterpages = erofs_clusterpages(sbi);
257         struct z_erofs_vle_workgroup *const grp = (void *)page_private(page);
258         int ret = 0;    /* 0 - busy */
259
260         if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) {
261                 unsigned int i;
262
263                 for (i = 0; i < clusterpages; ++i) {
264                         if (grp->compressed_pages[i] == page) {
265                                 WRITE_ONCE(grp->compressed_pages[i], NULL);
266                                 ret = 1;
267                                 break;
268                         }
269                 }
270                 erofs_workgroup_unfreeze(&grp->obj, 1);
271
272                 if (ret) {
273                         ClearPagePrivate(page);
274                         put_page(page);
275                 }
276         }
277         return ret;
278 }
279 #else
280 static void preload_compressed_pages(struct z_erofs_vle_work_builder *bl,
281                                      struct address_space *mc,
282                                      pgoff_t index,
283                                      unsigned int clusterpages,
284                                      enum z_erofs_cache_alloctype type,
285                                      struct list_head *pagepool,
286                                      gfp_t gfp)
287 {
288         /* nowhere to load compressed pages from */
289 }
290 #endif
291
292 /* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
293 static inline bool try_to_reuse_as_compressed_page(
294         struct z_erofs_vle_work_builder *b,
295         struct page *page)
296 {
297         while (b->compressed_deficit) {
298                 --b->compressed_deficit;
299                 if (!cmpxchg(b->compressed_pages++, NULL, page))
300                         return true;
301         }
302
303         return false;
304 }
305
306 /* callers must be with work->lock held */
307 static int z_erofs_vle_work_add_page(
308         struct z_erofs_vle_work_builder *builder,
309         struct page *page,
310         enum z_erofs_page_type type)
311 {
312         int ret;
313         bool occupied;
314
315         /* give priority for the compressed data storage */
316         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY &&
317             type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
318             try_to_reuse_as_compressed_page(builder, page))
319                 return 0;
320
321         ret = z_erofs_pagevec_ctor_enqueue(&builder->vector,
322                                            page, type, &occupied);
323         builder->work->vcnt += (unsigned int)ret;
324
325         return ret ? 0 : -EAGAIN;
326 }
327
328 static enum z_erofs_vle_work_role
329 try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
330                        z_erofs_vle_owned_workgrp_t *owned_head,
331                        bool *hosted)
332 {
333         DBG_BUGON(*hosted);
334
335         /* let's claim these following types of workgroup */
336 retry:
337         if (grp->next == Z_EROFS_VLE_WORKGRP_NIL) {
338                 /* type 1, nil workgroup */
339                 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_NIL,
340                             *owned_head) != Z_EROFS_VLE_WORKGRP_NIL)
341                         goto retry;
342
343                 *owned_head = &grp->next;
344                 *hosted = true;
345                 /* lucky, I am the followee :) */
346                 return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
347
348         } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
349                 /*
350                  * type 2, link to the end of a existing open chain,
351                  * be careful that its submission itself is governed
352                  * by the original owned chain.
353                  */
354                 if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
355                             *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL)
356                         goto retry;
357                 *owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
358                 return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
359         }
360
361         return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
362 }
363
364 struct z_erofs_vle_work_finder {
365         struct super_block *sb;
366         pgoff_t idx;
367         unsigned int pageofs;
368
369         struct z_erofs_vle_workgroup **grp_ret;
370         enum z_erofs_vle_work_role *role;
371         z_erofs_vle_owned_workgrp_t *owned_head;
372         bool *hosted;
373 };
374
375 static struct z_erofs_vle_work *
376 z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f)
377 {
378         bool tag, primary;
379         struct erofs_workgroup *egrp;
380         struct z_erofs_vle_workgroup *grp;
381         struct z_erofs_vle_work *work;
382
383         egrp = erofs_find_workgroup(f->sb, f->idx, &tag);
384         if (!egrp) {
385                 *f->grp_ret = NULL;
386                 return NULL;
387         }
388
389         grp = container_of(egrp, struct z_erofs_vle_workgroup, obj);
390         *f->grp_ret = grp;
391
392         work = z_erofs_vle_grab_work(grp, f->pageofs);
393         /* if multiref is disabled, `primary' is always true */
394         primary = true;
395
396         if (work->pageofs != f->pageofs) {
397                 DBG_BUGON(1);
398                 erofs_workgroup_put(egrp);
399                 return ERR_PTR(-EIO);
400         }
401
402         /*
403          * lock must be taken first to avoid grp->next == NIL between
404          * claiming workgroup and adding pages:
405          *                        grp->next != NIL
406          *   grp->next = NIL
407          *   mutex_unlock_all
408          *                        mutex_lock(&work->lock)
409          *                        add all pages to pagevec
410          *
411          * [correct locking case 1]:
412          *   mutex_lock(grp->work[a])
413          *   ...
414          *   mutex_lock(grp->work[b])     mutex_lock(grp->work[c])
415          *   ...                          *role = SECONDARY
416          *                                add all pages to pagevec
417          *                                ...
418          *                                mutex_unlock(grp->work[c])
419          *   mutex_lock(grp->work[c])
420          *   ...
421          *   grp->next = NIL
422          *   mutex_unlock_all
423          *
424          * [correct locking case 2]:
425          *   mutex_lock(grp->work[b])
426          *   ...
427          *   mutex_lock(grp->work[a])
428          *   ...
429          *   mutex_lock(grp->work[c])
430          *   ...
431          *   grp->next = NIL
432          *   mutex_unlock_all
433          *                                mutex_lock(grp->work[a])
434          *                                *role = PRIMARY_OWNER
435          *                                add all pages to pagevec
436          *                                ...
437          */
438         mutex_lock(&work->lock);
439
440         *f->hosted = false;
441         if (!primary)
442                 *f->role = Z_EROFS_VLE_WORK_SECONDARY;
443         else    /* claim the workgroup if possible */
444                 *f->role = try_to_claim_workgroup(grp, f->owned_head,
445                                                   f->hosted);
446         return work;
447 }
448
449 static struct z_erofs_vle_work *
450 z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
451                           struct erofs_map_blocks *map)
452 {
453         bool gnew = false;
454         struct z_erofs_vle_workgroup *grp = *f->grp_ret;
455         struct z_erofs_vle_work *work;
456
457         /* if multiref is disabled, grp should never be nullptr */
458         if (unlikely(grp)) {
459                 DBG_BUGON(1);
460                 return ERR_PTR(-EINVAL);
461         }
462
463         /* no available workgroup, let's allocate one */
464         grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
465         if (unlikely(!grp))
466                 return ERR_PTR(-ENOMEM);
467
468         init_always(grp);
469         grp->obj.index = f->idx;
470         grp->llen = map->m_llen;
471
472         z_erofs_vle_set_workgrp_fmt(grp, (map->m_flags & EROFS_MAP_ZIPPED) ?
473                                     Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
474                                     Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
475
476         if (map->m_flags & EROFS_MAP_FULL_MAPPED)
477                 grp->flags |= Z_EROFS_VLE_WORKGRP_FULL_LENGTH;
478
479         /* new workgrps have been claimed as type 1 */
480         WRITE_ONCE(grp->next, *f->owned_head);
481         /* primary and followed work for all new workgrps */
482         *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
483         /* it should be submitted by ourselves */
484         *f->hosted = true;
485
486         gnew = true;
487         work = z_erofs_vle_grab_primary_work(grp);
488         work->pageofs = f->pageofs;
489
490         /*
491          * lock all primary followed works before visible to others
492          * and mutex_trylock *never* fails for a new workgroup.
493          */
494         mutex_trylock(&work->lock);
495
496         if (gnew) {
497                 int err = erofs_register_workgroup(f->sb, &grp->obj, 0);
498
499                 if (err) {
500                         mutex_unlock(&work->lock);
501                         kmem_cache_free(z_erofs_workgroup_cachep, grp);
502                         return ERR_PTR(-EAGAIN);
503                 }
504         }
505
506         *f->owned_head = &grp->next;
507         *f->grp_ret = grp;
508         return work;
509 }
510
511 #define builder_is_hooked(builder) \
512         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
513
514 #define builder_is_followed(builder) \
515         ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
516
517 static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
518                                        struct super_block *sb,
519                                        struct erofs_map_blocks *map,
520                                        z_erofs_vle_owned_workgrp_t *owned_head)
521 {
522         const unsigned int clusterpages = erofs_clusterpages(EROFS_SB(sb));
523         struct z_erofs_vle_workgroup *grp;
524         const struct z_erofs_vle_work_finder finder = {
525                 .sb = sb,
526                 .idx = erofs_blknr(map->m_pa),
527                 .pageofs = map->m_la & ~PAGE_MASK,
528                 .grp_ret = &grp,
529                 .role = &builder->role,
530                 .owned_head = owned_head,
531                 .hosted = &builder->hosted
532         };
533         struct z_erofs_vle_work *work;
534
535         DBG_BUGON(builder->work);
536
537         /* must be Z_EROFS_WORK_TAIL or the next chained work */
538         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_NIL);
539         DBG_BUGON(*owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
540
541         DBG_BUGON(erofs_blkoff(map->m_pa));
542
543 repeat:
544         work = z_erofs_vle_work_lookup(&finder);
545         if (work) {
546                 unsigned int orig_llen;
547
548                 /* increase workgroup `llen' if needed */
549                 while ((orig_llen = READ_ONCE(grp->llen)) < map->m_llen &&
550                        orig_llen != cmpxchg_relaxed(&grp->llen,
551                                                     orig_llen, map->m_llen))
552                         cpu_relax();
553                 goto got_it;
554         }
555
556         work = z_erofs_vle_work_register(&finder, map);
557         if (unlikely(work == ERR_PTR(-EAGAIN)))
558                 goto repeat;
559
560         if (IS_ERR(work))
561                 return PTR_ERR(work);
562 got_it:
563         z_erofs_pagevec_ctor_init(&builder->vector, Z_EROFS_NR_INLINE_PAGEVECS,
564                                   work->pagevec, work->vcnt);
565
566         if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
567                 /* enable possibly in-place decompression */
568                 builder->compressed_pages = grp->compressed_pages;
569                 builder->compressed_deficit = clusterpages;
570         } else {
571                 builder->compressed_pages = NULL;
572                 builder->compressed_deficit = 0;
573         }
574
575         builder->grp = grp;
576         builder->work = work;
577         return 0;
578 }
579
580 /*
581  * keep in mind that no referenced workgroups will be freed
582  * only after a RCU grace period, so rcu_read_lock() could
583  * prevent a workgroup from being freed.
584  */
585 static void z_erofs_rcu_callback(struct rcu_head *head)
586 {
587         struct z_erofs_vle_work *work = container_of(head,
588                 struct z_erofs_vle_work, rcu);
589         struct z_erofs_vle_workgroup *grp =
590                 z_erofs_vle_work_workgroup(work, true);
591
592         kmem_cache_free(z_erofs_workgroup_cachep, grp);
593 }
594
595 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
596 {
597         struct z_erofs_vle_workgroup *const vgrp = container_of(grp,
598                 struct z_erofs_vle_workgroup, obj);
599         struct z_erofs_vle_work *const work = &vgrp->work;
600
601         call_rcu(&work->rcu, z_erofs_rcu_callback);
602 }
603
604 static void
605 __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
606                            struct z_erofs_vle_work *work __maybe_unused)
607 {
608         erofs_workgroup_put(&grp->obj);
609 }
610
611 static void z_erofs_vle_work_release(struct z_erofs_vle_work *work)
612 {
613         struct z_erofs_vle_workgroup *grp =
614                 z_erofs_vle_work_workgroup(work, true);
615
616         __z_erofs_vle_work_release(grp, work);
617 }
618
619 static inline bool
620 z_erofs_vle_work_iter_end(struct z_erofs_vle_work_builder *builder)
621 {
622         struct z_erofs_vle_work *work = builder->work;
623
624         if (!work)
625                 return false;
626
627         z_erofs_pagevec_ctor_exit(&builder->vector, false);
628         mutex_unlock(&work->lock);
629
630         /*
631          * if all pending pages are added, don't hold work reference
632          * any longer if the current work isn't hosted by ourselves.
633          */
634         if (!builder->hosted)
635                 __z_erofs_vle_work_release(builder->grp, work);
636
637         builder->work = NULL;
638         builder->grp = NULL;
639         return true;
640 }
641
642 static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
643                                                gfp_t gfp)
644 {
645         struct page *page = erofs_allocpage(pagepool, gfp);
646
647         if (unlikely(!page))
648                 return NULL;
649
650         page->mapping = Z_EROFS_MAPPING_STAGING;
651         return page;
652 }
653
654 struct z_erofs_vle_frontend {
655         struct inode *const inode;
656
657         struct z_erofs_vle_work_builder builder;
658         struct erofs_map_blocks map;
659
660         z_erofs_vle_owned_workgrp_t owned_head;
661
662         /* used for applying cache strategy on the fly */
663         bool backmost;
664         erofs_off_t headoffset;
665 };
666
667 #define VLE_FRONTEND_INIT(__i) { \
668         .inode = __i, \
669         .map = { \
670                 .m_llen = 0, \
671                 .m_plen = 0, \
672                 .mpage = NULL \
673         }, \
674         .builder = VLE_WORK_BUILDER_INIT(), \
675         .owned_head = Z_EROFS_VLE_WORKGRP_TAIL, \
676         .backmost = true, }
677
678 #ifdef EROFS_FS_HAS_MANAGED_CACHE
679 static inline bool
680 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
681 {
682         if (fe->backmost)
683                 return true;
684
685         if (EROFS_FS_ZIP_CACHE_LVL >= 2)
686                 return la < fe->headoffset;
687
688         return false;
689 }
690 #else
691 static inline bool
692 should_alloc_managed_pages(struct z_erofs_vle_frontend *fe, erofs_off_t la)
693 {
694         return false;
695 }
696 #endif
697
698 static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
699                                 struct page *page,
700                                 struct list_head *page_pool)
701 {
702         struct super_block *const sb = fe->inode->i_sb;
703         struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
704         struct erofs_map_blocks *const map = &fe->map;
705         struct z_erofs_vle_work_builder *const builder = &fe->builder;
706         const loff_t offset = page_offset(page);
707
708         bool tight = builder_is_hooked(builder);
709         struct z_erofs_vle_work *work = builder->work;
710
711         enum z_erofs_cache_alloctype cache_strategy;
712         enum z_erofs_page_type page_type;
713         unsigned int cur, end, spiltted, index;
714         int err = 0;
715
716         /* register locked file pages as online pages in pack */
717         z_erofs_onlinepage_init(page);
718
719         spiltted = 0;
720         end = PAGE_SIZE;
721 repeat:
722         cur = end - 1;
723
724         /* lucky, within the range of the current map_blocks */
725         if (offset + cur >= map->m_la &&
726             offset + cur < map->m_la + map->m_llen) {
727                 /* didn't get a valid unzip work previously (very rare) */
728                 if (!builder->work)
729                         goto restart_now;
730                 goto hitted;
731         }
732
733         /* go ahead the next map_blocks */
734         debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
735
736         if (z_erofs_vle_work_iter_end(builder))
737                 fe->backmost = false;
738
739         map->m_la = offset + cur;
740         map->m_llen = 0;
741         err = z_erofs_map_blocks_iter(fe->inode, map, 0);
742         if (unlikely(err))
743                 goto err_out;
744
745 restart_now:
746         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
747                 goto hitted;
748
749         DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
750         DBG_BUGON(erofs_blkoff(map->m_pa));
751
752         err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
753         if (unlikely(err))
754                 goto err_out;
755
756         /* preload all compressed pages (maybe downgrade role if necessary) */
757         if (should_alloc_managed_pages(fe, map->m_la))
758                 cache_strategy = DELAYEDALLOC;
759         else
760                 cache_strategy = DONTALLOC;
761
762         preload_compressed_pages(builder, MNGD_MAPPING(sbi),
763                                  map->m_pa / PAGE_SIZE,
764                                  map->m_plen / PAGE_SIZE,
765                                  cache_strategy, page_pool, GFP_KERNEL);
766
767         tight &= builder_is_hooked(builder);
768         work = builder->work;
769 hitted:
770         cur = end - min_t(unsigned int, offset + end - map->m_la, end);
771         if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
772                 zero_user_segment(page, cur, end);
773                 goto next_part;
774         }
775
776         /* let's derive page type */
777         page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
778                 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
779                         (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
780                                 Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
781
782         if (cur)
783                 tight &= builder_is_followed(builder);
784
785 retry:
786         err = z_erofs_vle_work_add_page(builder, page, page_type);
787         /* should allocate an additional staging page for pagevec */
788         if (err == -EAGAIN) {
789                 struct page *const newpage =
790                         __stagingpage_alloc(page_pool, GFP_NOFS);
791
792                 err = z_erofs_vle_work_add_page(builder, newpage,
793                                                 Z_EROFS_PAGE_TYPE_EXCLUSIVE);
794                 if (likely(!err))
795                         goto retry;
796         }
797
798         if (unlikely(err))
799                 goto err_out;
800
801         index = page->index - map->m_la / PAGE_SIZE;
802
803         /* FIXME! avoid the last relundant fixup & endio */
804         z_erofs_onlinepage_fixup(page, index, true);
805
806         /* bump up the number of spiltted parts of a page */
807         ++spiltted;
808         /* also update nr_pages */
809         work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
810 next_part:
811         /* can be used for verification */
812         map->m_llen = offset + cur - map->m_la;
813
814         end = cur;
815         if (end > 0)
816                 goto repeat;
817
818 out:
819         /* FIXME! avoid the last relundant fixup & endio */
820         z_erofs_onlinepage_endio(page);
821
822         debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
823                 __func__, page, spiltted, map->m_llen);
824         return err;
825
826         /* if some error occurred while processing this page */
827 err_out:
828         SetPageError(page);
829         goto out;
830 }
831
832 static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
833 {
834         tagptr1_t t = tagptr_init(tagptr1_t, ptr);
835         struct z_erofs_vle_unzip_io *io = tagptr_unfold_ptr(t);
836         bool background = tagptr_unfold_tags(t);
837
838         if (!background) {
839                 unsigned long flags;
840
841                 spin_lock_irqsave(&io->u.wait.lock, flags);
842                 if (!atomic_add_return(bios, &io->pending_bios))
843                         wake_up_locked(&io->u.wait);
844                 spin_unlock_irqrestore(&io->u.wait.lock, flags);
845                 return;
846         }
847
848         if (!atomic_add_return(bios, &io->pending_bios))
849                 queue_work(z_erofs_workqueue, &io->u.work);
850 }
851
852 static inline void z_erofs_vle_read_endio(struct bio *bio)
853 {
854         struct erofs_sb_info *sbi = NULL;
855         blk_status_t err = bio->bi_status;
856         struct bio_vec *bvec;
857         struct bvec_iter_all iter_all;
858
859         bio_for_each_segment_all(bvec, bio, iter_all) {
860                 struct page *page = bvec->bv_page;
861                 bool cachemngd = false;
862
863                 DBG_BUGON(PageUptodate(page));
864                 DBG_BUGON(!page->mapping);
865
866                 if (unlikely(!sbi && !z_erofs_page_is_staging(page))) {
867                         sbi = EROFS_SB(page->mapping->host->i_sb);
868
869                         if (time_to_inject(sbi, FAULT_READ_IO)) {
870                                 erofs_show_injection_info(FAULT_READ_IO);
871                                 err = BLK_STS_IOERR;
872                         }
873                 }
874
875                 /* sbi should already be gotten if the page is managed */
876                 if (sbi)
877                         cachemngd = erofs_page_is_managed(sbi, page);
878
879                 if (unlikely(err))
880                         SetPageError(page);
881                 else if (cachemngd)
882                         SetPageUptodate(page);
883
884                 if (cachemngd)
885                         unlock_page(page);
886         }
887
888         z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
889         bio_put(bio);
890 }
891
892 static struct page *z_pagemap_global[Z_EROFS_VLE_VMAP_GLOBAL_PAGES];
893 static DEFINE_MUTEX(z_pagemap_global_lock);
894
895 static int z_erofs_vle_unzip(struct super_block *sb,
896                              struct z_erofs_vle_workgroup *grp,
897                              struct list_head *page_pool)
898 {
899         struct erofs_sb_info *const sbi = EROFS_SB(sb);
900         const unsigned int clusterpages = erofs_clusterpages(sbi);
901
902         struct z_erofs_pagevec_ctor ctor;
903         unsigned int nr_pages;
904         unsigned int sparsemem_pages = 0;
905         struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
906         struct page **pages, **compressed_pages, *page;
907         unsigned int algorithm;
908         unsigned int i, outputsize;
909
910         enum z_erofs_page_type page_type;
911         bool overlapped, partial;
912         struct z_erofs_vle_work *work;
913         int err;
914
915         might_sleep();
916         work = z_erofs_vle_grab_primary_work(grp);
917         DBG_BUGON(!READ_ONCE(work->nr_pages));
918
919         mutex_lock(&work->lock);
920         nr_pages = work->nr_pages;
921
922         if (likely(nr_pages <= Z_EROFS_VLE_VMAP_ONSTACK_PAGES))
923                 pages = pages_onstack;
924         else if (nr_pages <= Z_EROFS_VLE_VMAP_GLOBAL_PAGES &&
925                  mutex_trylock(&z_pagemap_global_lock))
926                 pages = z_pagemap_global;
927         else {
928 repeat:
929                 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
930                                        GFP_KERNEL);
931
932                 /* fallback to global pagemap for the lowmem scenario */
933                 if (unlikely(!pages)) {
934                         if (nr_pages > Z_EROFS_VLE_VMAP_GLOBAL_PAGES)
935                                 goto repeat;
936                         else {
937                                 mutex_lock(&z_pagemap_global_lock);
938                                 pages = z_pagemap_global;
939                         }
940                 }
941         }
942
943         for (i = 0; i < nr_pages; ++i)
944                 pages[i] = NULL;
945
946         err = 0;
947         z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
948                                   work->pagevec, 0);
949
950         for (i = 0; i < work->vcnt; ++i) {
951                 unsigned int pagenr;
952
953                 page = z_erofs_pagevec_ctor_dequeue(&ctor, &page_type);
954
955                 /* all pages in pagevec ought to be valid */
956                 DBG_BUGON(!page);
957                 DBG_BUGON(!page->mapping);
958
959                 if (z_erofs_put_stagingpage(page_pool, page))
960                         continue;
961
962                 if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
963                         pagenr = 0;
964                 else
965                         pagenr = z_erofs_onlinepage_index(page);
966
967                 DBG_BUGON(pagenr >= nr_pages);
968
969                 /*
970                  * currently EROFS doesn't support multiref(dedup),
971                  * so here erroring out one multiref page.
972                  */
973                 if (pages[pagenr]) {
974                         DBG_BUGON(1);
975                         SetPageError(pages[pagenr]);
976                         z_erofs_onlinepage_endio(pages[pagenr]);
977                         err = -EIO;
978                 }
979                 pages[pagenr] = page;
980         }
981         sparsemem_pages = i;
982
983         z_erofs_pagevec_ctor_exit(&ctor, true);
984
985         overlapped = false;
986         compressed_pages = grp->compressed_pages;
987
988         for (i = 0; i < clusterpages; ++i) {
989                 unsigned int pagenr;
990
991                 page = compressed_pages[i];
992
993                 /* all compressed pages ought to be valid */
994                 DBG_BUGON(!page);
995                 DBG_BUGON(!page->mapping);
996
997                 if (!z_erofs_page_is_staging(page)) {
998                         if (erofs_page_is_managed(sbi, page)) {
999                                 if (unlikely(!PageUptodate(page)))
1000                                         err = -EIO;
1001                                 continue;
1002                         }
1003
1004                         /*
1005                          * only if non-head page can be selected
1006                          * for inplace decompression
1007                          */
1008                         pagenr = z_erofs_onlinepage_index(page);
1009
1010                         DBG_BUGON(pagenr >= nr_pages);
1011                         if (pages[pagenr]) {
1012                                 DBG_BUGON(1);
1013                                 SetPageError(pages[pagenr]);
1014                                 z_erofs_onlinepage_endio(pages[pagenr]);
1015                                 err = -EIO;
1016                         }
1017                         ++sparsemem_pages;
1018                         pages[pagenr] = page;
1019
1020                         overlapped = true;
1021                 }
1022
1023                 /* PG_error needs checking for inplaced and staging pages */
1024                 if (unlikely(PageError(page))) {
1025                         DBG_BUGON(PageUptodate(page));
1026                         err = -EIO;
1027                 }
1028         }
1029
1030         if (unlikely(err))
1031                 goto out;
1032
1033         if (nr_pages << PAGE_SHIFT >= work->pageofs + grp->llen) {
1034                 outputsize = grp->llen;
1035                 partial = !(grp->flags & Z_EROFS_VLE_WORKGRP_FULL_LENGTH);
1036         } else {
1037                 outputsize = (nr_pages << PAGE_SHIFT) - work->pageofs;
1038                 partial = true;
1039         }
1040
1041         if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN)
1042                 algorithm = Z_EROFS_COMPRESSION_SHIFTED;
1043         else
1044                 algorithm = Z_EROFS_COMPRESSION_LZ4;
1045
1046         err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
1047                                         .sb = sb,
1048                                         .in = compressed_pages,
1049                                         .out = pages,
1050                                         .pageofs_out = work->pageofs,
1051                                         .inputsize = PAGE_SIZE,
1052                                         .outputsize = outputsize,
1053                                         .alg = algorithm,
1054                                         .inplace_io = overlapped,
1055                                         .partial_decoding = partial
1056                                  }, page_pool);
1057
1058 out:
1059         /* must handle all compressed pages before endding pages */
1060         for (i = 0; i < clusterpages; ++i) {
1061                 page = compressed_pages[i];
1062
1063                 if (erofs_page_is_managed(sbi, page))
1064                         continue;
1065
1066                 /* recycle all individual staging pages */
1067                 (void)z_erofs_put_stagingpage(page_pool, page);
1068
1069                 WRITE_ONCE(compressed_pages[i], NULL);
1070         }
1071
1072         for (i = 0; i < nr_pages; ++i) {
1073                 page = pages[i];
1074                 if (!page)
1075                         continue;
1076
1077                 DBG_BUGON(!page->mapping);
1078
1079                 /* recycle all individual staging pages */
1080                 if (z_erofs_put_stagingpage(page_pool, page))
1081                         continue;
1082
1083                 if (unlikely(err < 0))
1084                         SetPageError(page);
1085
1086                 z_erofs_onlinepage_endio(page);
1087         }
1088
1089         if (pages == z_pagemap_global)
1090                 mutex_unlock(&z_pagemap_global_lock);
1091         else if (unlikely(pages != pages_onstack))
1092                 kvfree(pages);
1093
1094         work->nr_pages = 0;
1095         work->vcnt = 0;
1096
1097         /* all work locks MUST be taken before the following line */
1098
1099         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_NIL);
1100
1101         /* all work locks SHOULD be released right now */
1102         mutex_unlock(&work->lock);
1103
1104         z_erofs_vle_work_release(work);
1105         return err;
1106 }
1107
1108 static void z_erofs_vle_unzip_all(struct super_block *sb,
1109                                   struct z_erofs_vle_unzip_io *io,
1110                                   struct list_head *page_pool)
1111 {
1112         z_erofs_vle_owned_workgrp_t owned = io->head;
1113
1114         while (owned != Z_EROFS_VLE_WORKGRP_TAIL_CLOSED) {
1115                 struct z_erofs_vle_workgroup *grp;
1116
1117                 /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
1118                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_TAIL);
1119
1120                 /* no possible that 'owned' equals NULL */
1121                 DBG_BUGON(owned == Z_EROFS_VLE_WORKGRP_NIL);
1122
1123                 grp = container_of(owned, struct z_erofs_vle_workgroup, next);
1124                 owned = READ_ONCE(grp->next);
1125
1126                 z_erofs_vle_unzip(sb, grp, page_pool);
1127         }
1128 }
1129
1130 static void z_erofs_vle_unzip_wq(struct work_struct *work)
1131 {
1132         struct z_erofs_vle_unzip_io_sb *iosb = container_of(work,
1133                 struct z_erofs_vle_unzip_io_sb, io.u.work);
1134         LIST_HEAD(page_pool);
1135
1136         DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1137         z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
1138
1139         put_pages_list(&page_pool);
1140         kvfree(iosb);
1141 }
1142
1143 static struct page *
1144 pickup_page_for_submission(struct z_erofs_vle_workgroup *grp,
1145                            unsigned int nr,
1146                            struct list_head *pagepool,
1147                            struct address_space *mc,
1148                            gfp_t gfp)
1149 {
1150         /* determined at compile time to avoid too many #ifdefs */
1151         const bool nocache = __builtin_constant_p(mc) ? !mc : false;
1152         const pgoff_t index = grp->obj.index;
1153         bool tocache = false;
1154
1155         struct address_space *mapping;
1156         struct page *oldpage, *page;
1157
1158         compressed_page_t t;
1159         int justfound;
1160
1161 repeat:
1162         page = READ_ONCE(grp->compressed_pages[nr]);
1163         oldpage = page;
1164
1165         if (!page)
1166                 goto out_allocpage;
1167
1168         /*
1169          * the cached page has not been allocated and
1170          * an placeholder is out there, prepare it now.
1171          */
1172         if (!nocache && page == PAGE_UNALLOCATED) {
1173                 tocache = true;
1174                 goto out_allocpage;
1175         }
1176
1177         /* process the target tagged pointer */
1178         t = tagptr_init(compressed_page_t, page);
1179         justfound = tagptr_unfold_tags(t);
1180         page = tagptr_unfold_ptr(t);
1181
1182         mapping = READ_ONCE(page->mapping);
1183
1184         /*
1185          * if managed cache is disabled, it's no way to
1186          * get such a cached-like page.
1187          */
1188         if (nocache) {
1189                 /* if managed cache is disabled, it is impossible `justfound' */
1190                 DBG_BUGON(justfound);
1191
1192                 /* and it should be locked, not uptodate, and not truncated */
1193                 DBG_BUGON(!PageLocked(page));
1194                 DBG_BUGON(PageUptodate(page));
1195                 DBG_BUGON(!mapping);
1196                 goto out;
1197         }
1198
1199         /*
1200          * unmanaged (file) pages are all locked solidly,
1201          * therefore it is impossible for `mapping' to be NULL.
1202          */
1203         if (mapping && mapping != mc)
1204                 /* ought to be unmanaged pages */
1205                 goto out;
1206
1207         lock_page(page);
1208
1209         /* only true if page reclaim goes wrong, should never happen */
1210         DBG_BUGON(justfound && PagePrivate(page));
1211
1212         /* the page is still in manage cache */
1213         if (page->mapping == mc) {
1214                 WRITE_ONCE(grp->compressed_pages[nr], page);
1215
1216                 ClearPageError(page);
1217                 if (!PagePrivate(page)) {
1218                         /*
1219                          * impossible to be !PagePrivate(page) for
1220                          * the current restriction as well if
1221                          * the page is already in compressed_pages[].
1222                          */
1223                         DBG_BUGON(!justfound);
1224
1225                         justfound = 0;
1226                         set_page_private(page, (unsigned long)grp);
1227                         SetPagePrivate(page);
1228                 }
1229
1230                 /* no need to submit io if it is already up-to-date */
1231                 if (PageUptodate(page)) {
1232                         unlock_page(page);
1233                         page = NULL;
1234                 }
1235                 goto out;
1236         }
1237
1238         /*
1239          * the managed page has been truncated, it's unsafe to
1240          * reuse this one, let's allocate a new cache-managed page.
1241          */
1242         DBG_BUGON(page->mapping);
1243         DBG_BUGON(!justfound);
1244
1245         tocache = true;
1246         unlock_page(page);
1247         put_page(page);
1248 out_allocpage:
1249         page = __stagingpage_alloc(pagepool, gfp);
1250         if (oldpage != cmpxchg(&grp->compressed_pages[nr], oldpage, page)) {
1251                 list_add(&page->lru, pagepool);
1252                 cpu_relax();
1253                 goto repeat;
1254         }
1255         if (nocache || !tocache)
1256                 goto out;
1257         if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1258                 page->mapping = Z_EROFS_MAPPING_STAGING;
1259                 goto out;
1260         }
1261
1262         set_page_private(page, (unsigned long)grp);
1263         SetPagePrivate(page);
1264 out:    /* the only exit (for tracing and debugging) */
1265         return page;
1266 }
1267
1268 static struct z_erofs_vle_unzip_io *
1269 jobqueue_init(struct super_block *sb,
1270               struct z_erofs_vle_unzip_io *io,
1271               bool foreground)
1272 {
1273         struct z_erofs_vle_unzip_io_sb *iosb;
1274
1275         if (foreground) {
1276                 /* waitqueue available for foreground io */
1277                 DBG_BUGON(!io);
1278
1279                 init_waitqueue_head(&io->u.wait);
1280                 atomic_set(&io->pending_bios, 0);
1281                 goto out;
1282         }
1283
1284         iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL);
1285         DBG_BUGON(!iosb);
1286
1287         /* initialize fields in the allocated descriptor */
1288         io = &iosb->io;
1289         iosb->sb = sb;
1290         INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
1291 out:
1292         io->head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1293         return io;
1294 }
1295
1296 /* define workgroup jobqueue types */
1297 enum {
1298 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1299         JQ_BYPASS,
1300 #endif
1301         JQ_SUBMIT,
1302         NR_JOBQUEUES,
1303 };
1304
1305 static void *jobqueueset_init(struct super_block *sb,
1306                               z_erofs_vle_owned_workgrp_t qtail[],
1307                               struct z_erofs_vle_unzip_io *q[],
1308                               struct z_erofs_vle_unzip_io *fgq,
1309                               bool forcefg)
1310 {
1311 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1312         /*
1313          * if managed cache is enabled, bypass jobqueue is needed,
1314          * no need to read from device for all workgroups in this queue.
1315          */
1316         q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
1317         qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
1318 #endif
1319
1320         q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
1321         qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
1322
1323         return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
1324 }
1325
1326 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1327 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1328                                     z_erofs_vle_owned_workgrp_t qtail[],
1329                                     z_erofs_vle_owned_workgrp_t owned_head)
1330 {
1331         z_erofs_vle_owned_workgrp_t *const submit_qtail = qtail[JQ_SUBMIT];
1332         z_erofs_vle_owned_workgrp_t *const bypass_qtail = qtail[JQ_BYPASS];
1333
1334         DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1335         if (owned_head == Z_EROFS_VLE_WORKGRP_TAIL)
1336                 owned_head = Z_EROFS_VLE_WORKGRP_TAIL_CLOSED;
1337
1338         WRITE_ONCE(grp->next, Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1339
1340         WRITE_ONCE(*submit_qtail, owned_head);
1341         WRITE_ONCE(*bypass_qtail, &grp->next);
1342
1343         qtail[JQ_BYPASS] = &grp->next;
1344 }
1345
1346 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1347                                        unsigned int nr_bios,
1348                                        bool force_fg)
1349 {
1350         /*
1351          * although background is preferred, no one is pending for submission.
1352          * don't issue workqueue for decompression but drop it directly instead.
1353          */
1354         if (force_fg || nr_bios)
1355                 return false;
1356
1357         kvfree(container_of(q[JQ_SUBMIT],
1358                             struct z_erofs_vle_unzip_io_sb,
1359                             io));
1360         return true;
1361 }
1362 #else
1363 static void move_to_bypass_jobqueue(struct z_erofs_vle_workgroup *grp,
1364                                     z_erofs_vle_owned_workgrp_t qtail[],
1365                                     z_erofs_vle_owned_workgrp_t owned_head)
1366 {
1367         /* impossible to bypass submission for managed cache disabled */
1368         DBG_BUGON(1);
1369 }
1370
1371 static bool postsubmit_is_all_bypassed(struct z_erofs_vle_unzip_io *q[],
1372                                        unsigned int nr_bios,
1373                                        bool force_fg)
1374 {
1375         /* bios should be >0 if managed cache is disabled */
1376         DBG_BUGON(!nr_bios);
1377         return false;
1378 }
1379 #endif
1380
1381 static bool z_erofs_vle_submit_all(struct super_block *sb,
1382                                    z_erofs_vle_owned_workgrp_t owned_head,
1383                                    struct list_head *pagepool,
1384                                    struct z_erofs_vle_unzip_io *fgq,
1385                                    bool force_fg)
1386 {
1387         struct erofs_sb_info *const sbi = EROFS_SB(sb);
1388         const unsigned int clusterpages = erofs_clusterpages(sbi);
1389         const gfp_t gfp = GFP_NOFS;
1390
1391         z_erofs_vle_owned_workgrp_t qtail[NR_JOBQUEUES];
1392         struct z_erofs_vle_unzip_io *q[NR_JOBQUEUES];
1393         struct bio *bio;
1394         void *bi_private;
1395         /* since bio will be NULL, no need to initialize last_index */
1396         pgoff_t uninitialized_var(last_index);
1397         bool force_submit = false;
1398         unsigned int nr_bios;
1399
1400         if (unlikely(owned_head == Z_EROFS_VLE_WORKGRP_TAIL))
1401                 return false;
1402
1403         force_submit = false;
1404         bio = NULL;
1405         nr_bios = 0;
1406         bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1407
1408         /* by default, all need io submission */
1409         q[JQ_SUBMIT]->head = owned_head;
1410
1411         do {
1412                 struct z_erofs_vle_workgroup *grp;
1413                 pgoff_t first_index;
1414                 struct page *page;
1415                 unsigned int i = 0, bypass = 0;
1416                 int err;
1417
1418                 /* no possible 'owned_head' equals the following */
1419                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1420                 DBG_BUGON(owned_head == Z_EROFS_VLE_WORKGRP_NIL);
1421
1422                 grp = container_of(owned_head,
1423                                    struct z_erofs_vle_workgroup, next);
1424
1425                 /* close the main owned chain at first */
1426                 owned_head = cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL,
1427                                      Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
1428
1429                 first_index = grp->obj.index;
1430                 force_submit |= (first_index != last_index + 1);
1431
1432 repeat:
1433                 page = pickup_page_for_submission(grp, i, pagepool,
1434                                                   MNGD_MAPPING(sbi), gfp);
1435                 if (!page) {
1436                         force_submit = true;
1437                         ++bypass;
1438                         goto skippage;
1439                 }
1440
1441                 if (bio && force_submit) {
1442 submit_bio_retry:
1443                         __submit_bio(bio, REQ_OP_READ, 0);
1444                         bio = NULL;
1445                 }
1446
1447                 if (!bio) {
1448                         bio = erofs_grab_bio(sb, first_index + i,
1449                                              BIO_MAX_PAGES, bi_private,
1450                                              z_erofs_vle_read_endio, true);
1451                         ++nr_bios;
1452                 }
1453
1454                 err = bio_add_page(bio, page, PAGE_SIZE, 0);
1455                 if (err < PAGE_SIZE)
1456                         goto submit_bio_retry;
1457
1458                 force_submit = false;
1459                 last_index = first_index + i;
1460 skippage:
1461                 if (++i < clusterpages)
1462                         goto repeat;
1463
1464                 if (bypass < clusterpages)
1465                         qtail[JQ_SUBMIT] = &grp->next;
1466                 else
1467                         move_to_bypass_jobqueue(grp, qtail, owned_head);
1468         } while (owned_head != Z_EROFS_VLE_WORKGRP_TAIL);
1469
1470         if (bio)
1471                 __submit_bio(bio, REQ_OP_READ, 0);
1472
1473         if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
1474                 return true;
1475
1476         z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
1477         return true;
1478 }
1479
1480 static void z_erofs_submit_and_unzip(struct z_erofs_vle_frontend *f,
1481                                      struct list_head *pagepool,
1482                                      bool force_fg)
1483 {
1484         struct super_block *sb = f->inode->i_sb;
1485         struct z_erofs_vle_unzip_io io[NR_JOBQUEUES];
1486
1487         if (!z_erofs_vle_submit_all(sb, f->owned_head, pagepool, io, force_fg))
1488                 return;
1489
1490 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1491         z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
1492 #endif
1493         if (!force_fg)
1494                 return;
1495
1496         /* wait until all bios are completed */
1497         wait_event(io[JQ_SUBMIT].u.wait,
1498                    !atomic_read(&io[JQ_SUBMIT].pending_bios));
1499
1500         /* let's synchronous decompression */
1501         z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
1502 }
1503
1504 static int z_erofs_vle_normalaccess_readpage(struct file *file,
1505                                              struct page *page)
1506 {
1507         struct inode *const inode = page->mapping->host;
1508         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1509         int err;
1510         LIST_HEAD(pagepool);
1511
1512         trace_erofs_readpage(page, false);
1513
1514         f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
1515
1516         err = z_erofs_do_read_page(&f, page, &pagepool);
1517         (void)z_erofs_vle_work_iter_end(&f.builder);
1518
1519         /* if some compressed cluster ready, need submit them anyway */
1520         z_erofs_submit_and_unzip(&f, &pagepool, true);
1521
1522         if (err)
1523                 errln("%s, failed to read, err [%d]", __func__, err);
1524
1525         if (f.map.mpage)
1526                 put_page(f.map.mpage);
1527
1528         /* clean up the remaining free pages */
1529         put_pages_list(&pagepool);
1530         return err;
1531 }
1532
1533 static int z_erofs_vle_normalaccess_readpages(struct file *filp,
1534                                               struct address_space *mapping,
1535                                               struct list_head *pages,
1536                                               unsigned int nr_pages)
1537 {
1538         struct inode *const inode = mapping->host;
1539         struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
1540
1541         bool sync = __should_decompress_synchronously(sbi, nr_pages);
1542         struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
1543         gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
1544         struct page *head = NULL;
1545         LIST_HEAD(pagepool);
1546
1547         trace_erofs_readpages(mapping->host, lru_to_page(pages),
1548                               nr_pages, false);
1549
1550         f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
1551
1552         for (; nr_pages; --nr_pages) {
1553                 struct page *page = lru_to_page(pages);
1554
1555                 prefetchw(&page->flags);
1556                 list_del(&page->lru);
1557
1558                 /*
1559                  * A pure asynchronous readahead is indicated if
1560                  * a PG_readahead marked page is hitted at first.
1561                  * Let's also do asynchronous decompression for this case.
1562                  */
1563                 sync &= !(PageReadahead(page) && !head);
1564
1565                 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
1566                         list_add(&page->lru, &pagepool);
1567                         continue;
1568                 }
1569
1570                 set_page_private(page, (unsigned long)head);
1571                 head = page;
1572         }
1573
1574         while (head) {
1575                 struct page *page = head;
1576                 int err;
1577
1578                 /* traversal in reverse order */
1579                 head = (void *)page_private(page);
1580
1581                 err = z_erofs_do_read_page(&f, page, &pagepool);
1582                 if (err) {
1583                         struct erofs_vnode *vi = EROFS_V(inode);
1584
1585                         errln("%s, readahead error at page %lu of nid %llu",
1586                               __func__, page->index, vi->nid);
1587                 }
1588
1589                 put_page(page);
1590         }
1591
1592         (void)z_erofs_vle_work_iter_end(&f.builder);
1593
1594         z_erofs_submit_and_unzip(&f, &pagepool, sync);
1595
1596         if (f.map.mpage)
1597                 put_page(f.map.mpage);
1598
1599         /* clean up the remaining free pages */
1600         put_pages_list(&pagepool);
1601         return 0;
1602 }
1603
1604 const struct address_space_operations z_erofs_vle_normalaccess_aops = {
1605         .readpage = z_erofs_vle_normalaccess_readpage,
1606         .readpages = z_erofs_vle_normalaccess_readpages,
1607 };
1608