1 /* SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle.h
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #ifndef __EROFS_FS_UNZIP_VLE_H
14 #define __EROFS_FS_UNZIP_VLE_H
17 #include "unzip_pagevec.h"
19 #define Z_EROFS_NR_INLINE_PAGEVECS 3
22 * Structure fields follow one of the following exclusion rules.
24 * I: Modifiable by initialization/destruction paths and read-only
29 struct z_erofs_vle_work {
32 /* I: decompression offset in page */
33 unsigned short pageofs;
34 unsigned short nr_pages;
36 /* L: queued pages in pagevec[] */
41 erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
46 #define Z_EROFS_VLE_WORKGRP_FMT_PLAIN 0
47 #define Z_EROFS_VLE_WORKGRP_FMT_LZ4 1
48 #define Z_EROFS_VLE_WORKGRP_FMT_MASK 1
49 #define Z_EROFS_VLE_WORKGRP_FULL_LENGTH 2
51 typedef void *z_erofs_vle_owned_workgrp_t;
53 struct z_erofs_vle_workgroup {
54 struct erofs_workgroup obj;
55 struct z_erofs_vle_work work;
57 /* point to next owned_workgrp_t */
58 z_erofs_vle_owned_workgrp_t next;
60 /* compressed pages (including multi-usage pages) */
61 struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
62 unsigned int llen, flags;
65 /* let's avoid the valid 32-bit kernel addresses */
67 /* the chained workgroup has't submitted io (still open) */
68 #define Z_EROFS_VLE_WORKGRP_TAIL ((void *)0x5F0ECAFE)
69 /* the chained workgroup has already submitted io */
70 #define Z_EROFS_VLE_WORKGRP_TAIL_CLOSED ((void *)0x5F0EDEAD)
72 #define Z_EROFS_VLE_WORKGRP_NIL (NULL)
74 #define z_erofs_vle_workgrp_fmt(grp) \
75 ((grp)->flags & Z_EROFS_VLE_WORKGRP_FMT_MASK)
77 static inline void z_erofs_vle_set_workgrp_fmt(
78 struct z_erofs_vle_workgroup *grp,
81 grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
85 /* definitions if multiref is disabled */
86 #define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
87 #define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
88 #define z_erofs_vle_work_workgroup(wrk, primary) \
89 ((primary) ? container_of(wrk, \
90 struct z_erofs_vle_workgroup, work) : \
91 ({ BUG(); (void *)NULL; }))
94 #define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
96 struct z_erofs_vle_unzip_io {
97 atomic_t pending_bios;
98 z_erofs_vle_owned_workgrp_t head;
101 wait_queue_head_t wait;
102 struct work_struct work;
106 struct z_erofs_vle_unzip_io_sb {
107 struct z_erofs_vle_unzip_io io;
108 struct super_block *sb;
111 #define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
112 #define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
113 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
116 * waiters (aka. ongoing_packs): # to unlock the page
117 * sub-index: 0 - for partial page, >= 1 full page sub-index
119 typedef atomic_t z_erofs_onlinepage_t;
122 union z_erofs_onlinepage_converter {
123 z_erofs_onlinepage_t *o;
127 static inline unsigned z_erofs_onlinepage_index(struct page *page)
129 union z_erofs_onlinepage_converter u;
131 DBG_BUGON(!PagePrivate(page));
132 u.v = &page_private(page);
134 return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
137 static inline void z_erofs_onlinepage_init(struct page *page)
140 z_erofs_onlinepage_t o;
142 /* keep from being unlocked in advance */
143 } u = { .o = ATOMIC_INIT(1) };
145 set_page_private(page, u.v);
147 SetPagePrivate(page);
150 static inline void z_erofs_onlinepage_fixup(struct page *page,
151 uintptr_t index, bool down)
153 unsigned long *p, o, v, id;
155 p = &page_private(page);
158 id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
163 DBG_BUGON(id != index);
166 v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
167 ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned)down);
168 if (cmpxchg(p, o, v) != o)
172 static inline void z_erofs_onlinepage_endio(struct page *page)
174 union z_erofs_onlinepage_converter u;
177 DBG_BUGON(!PagePrivate(page));
178 u.v = &page_private(page);
180 v = atomic_dec_return(u.o);
181 if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
182 ClearPagePrivate(page);
183 if (!PageError(page))
184 SetPageUptodate(page);
188 debugln("%s, page %p value %x", __func__, page, atomic_read(u.o));
191 #define Z_EROFS_VLE_VMAP_ONSTACK_PAGES \
192 min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
193 #define Z_EROFS_VLE_VMAP_GLOBAL_PAGES 2048