Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / fs / xfs / libxfs / xfs_refcount_btree.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_btree.h"
15 #include "xfs_refcount_btree.h"
16 #include "xfs_alloc.h"
17 #include "xfs_error.h"
18 #include "xfs_trace.h"
19 #include "xfs_trans.h"
20 #include "xfs_bit.h"
21 #include "xfs_rmap.h"
22
23 static struct xfs_btree_cur *
24 xfs_refcountbt_dup_cursor(
25         struct xfs_btree_cur    *cur)
26 {
27         return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
28                         cur->bc_private.a.agbp, cur->bc_private.a.agno);
29 }
30
31 STATIC void
32 xfs_refcountbt_set_root(
33         struct xfs_btree_cur    *cur,
34         union xfs_btree_ptr     *ptr,
35         int                     inc)
36 {
37         struct xfs_buf          *agbp = cur->bc_private.a.agbp;
38         struct xfs_agf          *agf = XFS_BUF_TO_AGF(agbp);
39         xfs_agnumber_t          seqno = be32_to_cpu(agf->agf_seqno);
40         struct xfs_perag        *pag = xfs_perag_get(cur->bc_mp, seqno);
41
42         ASSERT(ptr->s != 0);
43
44         agf->agf_refcount_root = ptr->s;
45         be32_add_cpu(&agf->agf_refcount_level, inc);
46         pag->pagf_refcount_level += inc;
47         xfs_perag_put(pag);
48
49         xfs_alloc_log_agf(cur->bc_tp, agbp,
50                         XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
51 }
52
53 STATIC int
54 xfs_refcountbt_alloc_block(
55         struct xfs_btree_cur    *cur,
56         union xfs_btree_ptr     *start,
57         union xfs_btree_ptr     *new,
58         int                     *stat)
59 {
60         struct xfs_buf          *agbp = cur->bc_private.a.agbp;
61         struct xfs_agf          *agf = XFS_BUF_TO_AGF(agbp);
62         struct xfs_alloc_arg    args;           /* block allocation args */
63         int                     error;          /* error return value */
64
65         memset(&args, 0, sizeof(args));
66         args.tp = cur->bc_tp;
67         args.mp = cur->bc_mp;
68         args.type = XFS_ALLOCTYPE_NEAR_BNO;
69         args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
70                         xfs_refc_block(args.mp));
71         args.oinfo = XFS_RMAP_OINFO_REFC;
72         args.minlen = args.maxlen = args.prod = 1;
73         args.resv = XFS_AG_RESV_METADATA;
74
75         error = xfs_alloc_vextent(&args);
76         if (error)
77                 goto out_error;
78         trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
79                         args.agbno, 1);
80         if (args.fsbno == NULLFSBLOCK) {
81                 *stat = 0;
82                 return 0;
83         }
84         ASSERT(args.agno == cur->bc_private.a.agno);
85         ASSERT(args.len == 1);
86
87         new->s = cpu_to_be32(args.agbno);
88         be32_add_cpu(&agf->agf_refcount_blocks, 1);
89         xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
90
91         *stat = 1;
92         return 0;
93
94 out_error:
95         return error;
96 }
97
98 STATIC int
99 xfs_refcountbt_free_block(
100         struct xfs_btree_cur    *cur,
101         struct xfs_buf          *bp)
102 {
103         struct xfs_mount        *mp = cur->bc_mp;
104         struct xfs_buf          *agbp = cur->bc_private.a.agbp;
105         struct xfs_agf          *agf = XFS_BUF_TO_AGF(agbp);
106         xfs_fsblock_t           fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
107         int                     error;
108
109         trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
110                         XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
111         be32_add_cpu(&agf->agf_refcount_blocks, -1);
112         xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
113         error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC,
114                         XFS_AG_RESV_METADATA);
115         if (error)
116                 return error;
117
118         return error;
119 }
120
121 STATIC int
122 xfs_refcountbt_get_minrecs(
123         struct xfs_btree_cur    *cur,
124         int                     level)
125 {
126         return cur->bc_mp->m_refc_mnr[level != 0];
127 }
128
129 STATIC int
130 xfs_refcountbt_get_maxrecs(
131         struct xfs_btree_cur    *cur,
132         int                     level)
133 {
134         return cur->bc_mp->m_refc_mxr[level != 0];
135 }
136
137 STATIC void
138 xfs_refcountbt_init_key_from_rec(
139         union xfs_btree_key     *key,
140         union xfs_btree_rec     *rec)
141 {
142         key->refc.rc_startblock = rec->refc.rc_startblock;
143 }
144
145 STATIC void
146 xfs_refcountbt_init_high_key_from_rec(
147         union xfs_btree_key     *key,
148         union xfs_btree_rec     *rec)
149 {
150         __u32                   x;
151
152         x = be32_to_cpu(rec->refc.rc_startblock);
153         x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
154         key->refc.rc_startblock = cpu_to_be32(x);
155 }
156
157 STATIC void
158 xfs_refcountbt_init_rec_from_cur(
159         struct xfs_btree_cur    *cur,
160         union xfs_btree_rec     *rec)
161 {
162         rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
163         rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
164         rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
165 }
166
167 STATIC void
168 xfs_refcountbt_init_ptr_from_cur(
169         struct xfs_btree_cur    *cur,
170         union xfs_btree_ptr     *ptr)
171 {
172         struct xfs_agf          *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
173
174         ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
175
176         ptr->s = agf->agf_refcount_root;
177 }
178
179 STATIC int64_t
180 xfs_refcountbt_key_diff(
181         struct xfs_btree_cur    *cur,
182         union xfs_btree_key     *key)
183 {
184         struct xfs_refcount_irec        *rec = &cur->bc_rec.rc;
185         struct xfs_refcount_key         *kp = &key->refc;
186
187         return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
188 }
189
190 STATIC int64_t
191 xfs_refcountbt_diff_two_keys(
192         struct xfs_btree_cur    *cur,
193         union xfs_btree_key     *k1,
194         union xfs_btree_key     *k2)
195 {
196         return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
197                           be32_to_cpu(k2->refc.rc_startblock);
198 }
199
200 STATIC xfs_failaddr_t
201 xfs_refcountbt_verify(
202         struct xfs_buf          *bp)
203 {
204         struct xfs_mount        *mp = bp->b_mount;
205         struct xfs_btree_block  *block = XFS_BUF_TO_BLOCK(bp);
206         struct xfs_perag        *pag = bp->b_pag;
207         xfs_failaddr_t          fa;
208         unsigned int            level;
209
210         if (!xfs_verify_magic(bp, block->bb_magic))
211                 return __this_address;
212
213         if (!xfs_sb_version_hasreflink(&mp->m_sb))
214                 return __this_address;
215         fa = xfs_btree_sblock_v5hdr_verify(bp);
216         if (fa)
217                 return fa;
218
219         level = be16_to_cpu(block->bb_level);
220         if (pag && pag->pagf_init) {
221                 if (level >= pag->pagf_refcount_level)
222                         return __this_address;
223         } else if (level >= mp->m_refc_maxlevels)
224                 return __this_address;
225
226         return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
227 }
228
229 STATIC void
230 xfs_refcountbt_read_verify(
231         struct xfs_buf  *bp)
232 {
233         xfs_failaddr_t  fa;
234
235         if (!xfs_btree_sblock_verify_crc(bp))
236                 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
237         else {
238                 fa = xfs_refcountbt_verify(bp);
239                 if (fa)
240                         xfs_verifier_error(bp, -EFSCORRUPTED, fa);
241         }
242
243         if (bp->b_error)
244                 trace_xfs_btree_corrupt(bp, _RET_IP_);
245 }
246
247 STATIC void
248 xfs_refcountbt_write_verify(
249         struct xfs_buf  *bp)
250 {
251         xfs_failaddr_t  fa;
252
253         fa = xfs_refcountbt_verify(bp);
254         if (fa) {
255                 trace_xfs_btree_corrupt(bp, _RET_IP_);
256                 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
257                 return;
258         }
259         xfs_btree_sblock_calc_crc(bp);
260
261 }
262
263 const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
264         .name                   = "xfs_refcountbt",
265         .magic                  = { 0, cpu_to_be32(XFS_REFC_CRC_MAGIC) },
266         .verify_read            = xfs_refcountbt_read_verify,
267         .verify_write           = xfs_refcountbt_write_verify,
268         .verify_struct          = xfs_refcountbt_verify,
269 };
270
271 STATIC int
272 xfs_refcountbt_keys_inorder(
273         struct xfs_btree_cur    *cur,
274         union xfs_btree_key     *k1,
275         union xfs_btree_key     *k2)
276 {
277         return be32_to_cpu(k1->refc.rc_startblock) <
278                be32_to_cpu(k2->refc.rc_startblock);
279 }
280
281 STATIC int
282 xfs_refcountbt_recs_inorder(
283         struct xfs_btree_cur    *cur,
284         union xfs_btree_rec     *r1,
285         union xfs_btree_rec     *r2)
286 {
287         return  be32_to_cpu(r1->refc.rc_startblock) +
288                 be32_to_cpu(r1->refc.rc_blockcount) <=
289                 be32_to_cpu(r2->refc.rc_startblock);
290 }
291
292 static const struct xfs_btree_ops xfs_refcountbt_ops = {
293         .rec_len                = sizeof(struct xfs_refcount_rec),
294         .key_len                = sizeof(struct xfs_refcount_key),
295
296         .dup_cursor             = xfs_refcountbt_dup_cursor,
297         .set_root               = xfs_refcountbt_set_root,
298         .alloc_block            = xfs_refcountbt_alloc_block,
299         .free_block             = xfs_refcountbt_free_block,
300         .get_minrecs            = xfs_refcountbt_get_minrecs,
301         .get_maxrecs            = xfs_refcountbt_get_maxrecs,
302         .init_key_from_rec      = xfs_refcountbt_init_key_from_rec,
303         .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec,
304         .init_rec_from_cur      = xfs_refcountbt_init_rec_from_cur,
305         .init_ptr_from_cur      = xfs_refcountbt_init_ptr_from_cur,
306         .key_diff               = xfs_refcountbt_key_diff,
307         .buf_ops                = &xfs_refcountbt_buf_ops,
308         .diff_two_keys          = xfs_refcountbt_diff_two_keys,
309         .keys_inorder           = xfs_refcountbt_keys_inorder,
310         .recs_inorder           = xfs_refcountbt_recs_inorder,
311 };
312
313 /*
314  * Allocate a new refcount btree cursor.
315  */
316 struct xfs_btree_cur *
317 xfs_refcountbt_init_cursor(
318         struct xfs_mount        *mp,
319         struct xfs_trans        *tp,
320         struct xfs_buf          *agbp,
321         xfs_agnumber_t          agno)
322 {
323         struct xfs_agf          *agf = XFS_BUF_TO_AGF(agbp);
324         struct xfs_btree_cur    *cur;
325
326         ASSERT(agno != NULLAGNUMBER);
327         ASSERT(agno < mp->m_sb.sb_agcount);
328         cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
329
330         cur->bc_tp = tp;
331         cur->bc_mp = mp;
332         cur->bc_btnum = XFS_BTNUM_REFC;
333         cur->bc_blocklog = mp->m_sb.sb_blocklog;
334         cur->bc_ops = &xfs_refcountbt_ops;
335         cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
336
337         cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
338
339         cur->bc_private.a.agbp = agbp;
340         cur->bc_private.a.agno = agno;
341         cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
342
343         cur->bc_private.a.priv.refc.nr_ops = 0;
344         cur->bc_private.a.priv.refc.shape_changes = 0;
345
346         return cur;
347 }
348
349 /*
350  * Calculate the number of records in a refcount btree block.
351  */
352 int
353 xfs_refcountbt_maxrecs(
354         int                     blocklen,
355         bool                    leaf)
356 {
357         blocklen -= XFS_REFCOUNT_BLOCK_LEN;
358
359         if (leaf)
360                 return blocklen / sizeof(struct xfs_refcount_rec);
361         return blocklen / (sizeof(struct xfs_refcount_key) +
362                            sizeof(xfs_refcount_ptr_t));
363 }
364
365 /* Compute the maximum height of a refcount btree. */
366 void
367 xfs_refcountbt_compute_maxlevels(
368         struct xfs_mount                *mp)
369 {
370         mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
371                         mp->m_refc_mnr, mp->m_sb.sb_agblocks);
372 }
373
374 /* Calculate the refcount btree size for some records. */
375 xfs_extlen_t
376 xfs_refcountbt_calc_size(
377         struct xfs_mount        *mp,
378         unsigned long long      len)
379 {
380         return xfs_btree_calc_size(mp->m_refc_mnr, len);
381 }
382
383 /*
384  * Calculate the maximum refcount btree size.
385  */
386 xfs_extlen_t
387 xfs_refcountbt_max_size(
388         struct xfs_mount        *mp,
389         xfs_agblock_t           agblocks)
390 {
391         /* Bail out if we're uninitialized, which can happen in mkfs. */
392         if (mp->m_refc_mxr[0] == 0)
393                 return 0;
394
395         return xfs_refcountbt_calc_size(mp, agblocks);
396 }
397
398 /*
399  * Figure out how many blocks to reserve and how many are used by this btree.
400  */
401 int
402 xfs_refcountbt_calc_reserves(
403         struct xfs_mount        *mp,
404         struct xfs_trans        *tp,
405         xfs_agnumber_t          agno,
406         xfs_extlen_t            *ask,
407         xfs_extlen_t            *used)
408 {
409         struct xfs_buf          *agbp;
410         struct xfs_agf          *agf;
411         xfs_agblock_t           agblocks;
412         xfs_extlen_t            tree_len;
413         int                     error;
414
415         if (!xfs_sb_version_hasreflink(&mp->m_sb))
416                 return 0;
417
418
419         error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
420         if (error)
421                 return error;
422
423         agf = XFS_BUF_TO_AGF(agbp);
424         agblocks = be32_to_cpu(agf->agf_length);
425         tree_len = be32_to_cpu(agf->agf_refcount_blocks);
426         xfs_trans_brelse(tp, agbp);
427
428         /*
429          * The log is permanently allocated, so the space it occupies will
430          * never be available for the kinds of things that would require btree
431          * expansion.  We therefore can pretend the space isn't there.
432          */
433         if (mp->m_sb.sb_logstart &&
434             XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == agno)
435                 agblocks -= mp->m_sb.sb_logblocks;
436
437         *ask += xfs_refcountbt_max_size(mp, agblocks);
438         *used += tree_len;
439
440         return error;
441 }