Linux-libre 5.4-rc7-gnu
[librecmc/linux-libre.git] / fs / btrfs / block-group.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "misc.h"
4 #include "ctree.h"
5 #include "block-group.h"
6 #include "space-info.h"
7 #include "disk-io.h"
8 #include "free-space-cache.h"
9 #include "free-space-tree.h"
10 #include "disk-io.h"
11 #include "volumes.h"
12 #include "transaction.h"
13 #include "ref-verify.h"
14 #include "sysfs.h"
15 #include "tree-log.h"
16 #include "delalloc-space.h"
17
18 /*
19  * Return target flags in extended format or 0 if restripe for this chunk_type
20  * is not in progress
21  *
22  * Should be called with balance_lock held
23  */
24 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
25 {
26         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
27         u64 target = 0;
28
29         if (!bctl)
30                 return 0;
31
32         if (flags & BTRFS_BLOCK_GROUP_DATA &&
33             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
34                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
35         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
36                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
37                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
38         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
39                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
40                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
41         }
42
43         return target;
44 }
45
46 /*
47  * @flags: available profiles in extended format (see ctree.h)
48  *
49  * Return reduced profile in chunk format.  If profile changing is in progress
50  * (either running or paused) picks the target profile (if it's already
51  * available), otherwise falls back to plain reducing.
52  */
53 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
54 {
55         u64 num_devices = fs_info->fs_devices->rw_devices;
56         u64 target;
57         u64 raid_type;
58         u64 allowed = 0;
59
60         /*
61          * See if restripe for this chunk_type is in progress, if so try to
62          * reduce to the target profile
63          */
64         spin_lock(&fs_info->balance_lock);
65         target = get_restripe_target(fs_info, flags);
66         if (target) {
67                 /* Pick target profile only if it's already available */
68                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
69                         spin_unlock(&fs_info->balance_lock);
70                         return extended_to_chunk(target);
71                 }
72         }
73         spin_unlock(&fs_info->balance_lock);
74
75         /* First, mask out the RAID levels which aren't possible */
76         for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
77                 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
78                         allowed |= btrfs_raid_array[raid_type].bg_flag;
79         }
80         allowed &= flags;
81
82         if (allowed & BTRFS_BLOCK_GROUP_RAID6)
83                 allowed = BTRFS_BLOCK_GROUP_RAID6;
84         else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
85                 allowed = BTRFS_BLOCK_GROUP_RAID5;
86         else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
87                 allowed = BTRFS_BLOCK_GROUP_RAID10;
88         else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
89                 allowed = BTRFS_BLOCK_GROUP_RAID1;
90         else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
91                 allowed = BTRFS_BLOCK_GROUP_RAID0;
92
93         flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
94
95         return extended_to_chunk(flags | allowed);
96 }
97
98 static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
99 {
100         unsigned seq;
101         u64 flags;
102
103         do {
104                 flags = orig_flags;
105                 seq = read_seqbegin(&fs_info->profiles_lock);
106
107                 if (flags & BTRFS_BLOCK_GROUP_DATA)
108                         flags |= fs_info->avail_data_alloc_bits;
109                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
110                         flags |= fs_info->avail_system_alloc_bits;
111                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
112                         flags |= fs_info->avail_metadata_alloc_bits;
113         } while (read_seqretry(&fs_info->profiles_lock, seq));
114
115         return btrfs_reduce_alloc_profile(fs_info, flags);
116 }
117
118 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
119 {
120         return get_alloc_profile(fs_info, orig_flags);
121 }
122
123 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
124 {
125         atomic_inc(&cache->count);
126 }
127
128 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
129 {
130         if (atomic_dec_and_test(&cache->count)) {
131                 WARN_ON(cache->pinned > 0);
132                 WARN_ON(cache->reserved > 0);
133
134                 /*
135                  * If not empty, someone is still holding mutex of
136                  * full_stripe_lock, which can only be released by caller.
137                  * And it will definitely cause use-after-free when caller
138                  * tries to release full stripe lock.
139                  *
140                  * No better way to resolve, but only to warn.
141                  */
142                 WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
143                 kfree(cache->free_space_ctl);
144                 kfree(cache);
145         }
146 }
147
148 /*
149  * This adds the block group to the fs_info rb tree for the block group cache
150  */
151 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
152                                 struct btrfs_block_group_cache *block_group)
153 {
154         struct rb_node **p;
155         struct rb_node *parent = NULL;
156         struct btrfs_block_group_cache *cache;
157
158         spin_lock(&info->block_group_cache_lock);
159         p = &info->block_group_cache_tree.rb_node;
160
161         while (*p) {
162                 parent = *p;
163                 cache = rb_entry(parent, struct btrfs_block_group_cache,
164                                  cache_node);
165                 if (block_group->key.objectid < cache->key.objectid) {
166                         p = &(*p)->rb_left;
167                 } else if (block_group->key.objectid > cache->key.objectid) {
168                         p = &(*p)->rb_right;
169                 } else {
170                         spin_unlock(&info->block_group_cache_lock);
171                         return -EEXIST;
172                 }
173         }
174
175         rb_link_node(&block_group->cache_node, parent, p);
176         rb_insert_color(&block_group->cache_node,
177                         &info->block_group_cache_tree);
178
179         if (info->first_logical_byte > block_group->key.objectid)
180                 info->first_logical_byte = block_group->key.objectid;
181
182         spin_unlock(&info->block_group_cache_lock);
183
184         return 0;
185 }
186
187 /*
188  * This will return the block group at or after bytenr if contains is 0, else
189  * it will return the block group that contains the bytenr
190  */
191 static struct btrfs_block_group_cache *block_group_cache_tree_search(
192                 struct btrfs_fs_info *info, u64 bytenr, int contains)
193 {
194         struct btrfs_block_group_cache *cache, *ret = NULL;
195         struct rb_node *n;
196         u64 end, start;
197
198         spin_lock(&info->block_group_cache_lock);
199         n = info->block_group_cache_tree.rb_node;
200
201         while (n) {
202                 cache = rb_entry(n, struct btrfs_block_group_cache,
203                                  cache_node);
204                 end = cache->key.objectid + cache->key.offset - 1;
205                 start = cache->key.objectid;
206
207                 if (bytenr < start) {
208                         if (!contains && (!ret || start < ret->key.objectid))
209                                 ret = cache;
210                         n = n->rb_left;
211                 } else if (bytenr > start) {
212                         if (contains && bytenr <= end) {
213                                 ret = cache;
214                                 break;
215                         }
216                         n = n->rb_right;
217                 } else {
218                         ret = cache;
219                         break;
220                 }
221         }
222         if (ret) {
223                 btrfs_get_block_group(ret);
224                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
225                         info->first_logical_byte = ret->key.objectid;
226         }
227         spin_unlock(&info->block_group_cache_lock);
228
229         return ret;
230 }
231
232 /*
233  * Return the block group that starts at or after bytenr
234  */
235 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
236                 struct btrfs_fs_info *info, u64 bytenr)
237 {
238         return block_group_cache_tree_search(info, bytenr, 0);
239 }
240
241 /*
242  * Return the block group that contains the given bytenr
243  */
244 struct btrfs_block_group_cache *btrfs_lookup_block_group(
245                 struct btrfs_fs_info *info, u64 bytenr)
246 {
247         return block_group_cache_tree_search(info, bytenr, 1);
248 }
249
250 struct btrfs_block_group_cache *btrfs_next_block_group(
251                 struct btrfs_block_group_cache *cache)
252 {
253         struct btrfs_fs_info *fs_info = cache->fs_info;
254         struct rb_node *node;
255
256         spin_lock(&fs_info->block_group_cache_lock);
257
258         /* If our block group was removed, we need a full search. */
259         if (RB_EMPTY_NODE(&cache->cache_node)) {
260                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
261
262                 spin_unlock(&fs_info->block_group_cache_lock);
263                 btrfs_put_block_group(cache);
264                 cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
265         }
266         node = rb_next(&cache->cache_node);
267         btrfs_put_block_group(cache);
268         if (node) {
269                 cache = rb_entry(node, struct btrfs_block_group_cache,
270                                  cache_node);
271                 btrfs_get_block_group(cache);
272         } else
273                 cache = NULL;
274         spin_unlock(&fs_info->block_group_cache_lock);
275         return cache;
276 }
277
278 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
279 {
280         struct btrfs_block_group_cache *bg;
281         bool ret = true;
282
283         bg = btrfs_lookup_block_group(fs_info, bytenr);
284         if (!bg)
285                 return false;
286
287         spin_lock(&bg->lock);
288         if (bg->ro)
289                 ret = false;
290         else
291                 atomic_inc(&bg->nocow_writers);
292         spin_unlock(&bg->lock);
293
294         /* No put on block group, done by btrfs_dec_nocow_writers */
295         if (!ret)
296                 btrfs_put_block_group(bg);
297
298         return ret;
299 }
300
301 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr)
302 {
303         struct btrfs_block_group_cache *bg;
304
305         bg = btrfs_lookup_block_group(fs_info, bytenr);
306         ASSERT(bg);
307         if (atomic_dec_and_test(&bg->nocow_writers))
308                 wake_up_var(&bg->nocow_writers);
309         /*
310          * Once for our lookup and once for the lookup done by a previous call
311          * to btrfs_inc_nocow_writers()
312          */
313         btrfs_put_block_group(bg);
314         btrfs_put_block_group(bg);
315 }
316
317 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg)
318 {
319         wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
320 }
321
322 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
323                                         const u64 start)
324 {
325         struct btrfs_block_group_cache *bg;
326
327         bg = btrfs_lookup_block_group(fs_info, start);
328         ASSERT(bg);
329         if (atomic_dec_and_test(&bg->reservations))
330                 wake_up_var(&bg->reservations);
331         btrfs_put_block_group(bg);
332 }
333
334 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
335 {
336         struct btrfs_space_info *space_info = bg->space_info;
337
338         ASSERT(bg->ro);
339
340         if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
341                 return;
342
343         /*
344          * Our block group is read only but before we set it to read only,
345          * some task might have had allocated an extent from it already, but it
346          * has not yet created a respective ordered extent (and added it to a
347          * root's list of ordered extents).
348          * Therefore wait for any task currently allocating extents, since the
349          * block group's reservations counter is incremented while a read lock
350          * on the groups' semaphore is held and decremented after releasing
351          * the read access on that semaphore and creating the ordered extent.
352          */
353         down_write(&space_info->groups_sem);
354         up_write(&space_info->groups_sem);
355
356         wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
357 }
358
359 struct btrfs_caching_control *btrfs_get_caching_control(
360                 struct btrfs_block_group_cache *cache)
361 {
362         struct btrfs_caching_control *ctl;
363
364         spin_lock(&cache->lock);
365         if (!cache->caching_ctl) {
366                 spin_unlock(&cache->lock);
367                 return NULL;
368         }
369
370         ctl = cache->caching_ctl;
371         refcount_inc(&ctl->count);
372         spin_unlock(&cache->lock);
373         return ctl;
374 }
375
376 void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
377 {
378         if (refcount_dec_and_test(&ctl->count))
379                 kfree(ctl);
380 }
381
382 /*
383  * When we wait for progress in the block group caching, its because our
384  * allocation attempt failed at least once.  So, we must sleep and let some
385  * progress happen before we try again.
386  *
387  * This function will sleep at least once waiting for new free space to show
388  * up, and then it will check the block group free space numbers for our min
389  * num_bytes.  Another option is to have it go ahead and look in the rbtree for
390  * a free extent of a given size, but this is a good start.
391  *
392  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
393  * any of the information in this block group.
394  */
395 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
396                                            u64 num_bytes)
397 {
398         struct btrfs_caching_control *caching_ctl;
399
400         caching_ctl = btrfs_get_caching_control(cache);
401         if (!caching_ctl)
402                 return;
403
404         wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) ||
405                    (cache->free_space_ctl->free_space >= num_bytes));
406
407         btrfs_put_caching_control(caching_ctl);
408 }
409
410 int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
411 {
412         struct btrfs_caching_control *caching_ctl;
413         int ret = 0;
414
415         caching_ctl = btrfs_get_caching_control(cache);
416         if (!caching_ctl)
417                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
418
419         wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache));
420         if (cache->cached == BTRFS_CACHE_ERROR)
421                 ret = -EIO;
422         btrfs_put_caching_control(caching_ctl);
423         return ret;
424 }
425
426 #ifdef CONFIG_BTRFS_DEBUG
427 static void fragment_free_space(struct btrfs_block_group_cache *block_group)
428 {
429         struct btrfs_fs_info *fs_info = block_group->fs_info;
430         u64 start = block_group->key.objectid;
431         u64 len = block_group->key.offset;
432         u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
433                 fs_info->nodesize : fs_info->sectorsize;
434         u64 step = chunk << 1;
435
436         while (len > chunk) {
437                 btrfs_remove_free_space(block_group, start, chunk);
438                 start += step;
439                 if (len < step)
440                         len = 0;
441                 else
442                         len -= step;
443         }
444 }
445 #endif
446
447 /*
448  * This is only called by btrfs_cache_block_group, since we could have freed
449  * extents we need to check the pinned_extents for any extents that can't be
450  * used yet since their free space will be released as soon as the transaction
451  * commits.
452  */
453 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
454                        u64 start, u64 end)
455 {
456         struct btrfs_fs_info *info = block_group->fs_info;
457         u64 extent_start, extent_end, size, total_added = 0;
458         int ret;
459
460         while (start < end) {
461                 ret = find_first_extent_bit(info->pinned_extents, start,
462                                             &extent_start, &extent_end,
463                                             EXTENT_DIRTY | EXTENT_UPTODATE,
464                                             NULL);
465                 if (ret)
466                         break;
467
468                 if (extent_start <= start) {
469                         start = extent_end + 1;
470                 } else if (extent_start > start && extent_start < end) {
471                         size = extent_start - start;
472                         total_added += size;
473                         ret = btrfs_add_free_space(block_group, start,
474                                                    size);
475                         BUG_ON(ret); /* -ENOMEM or logic error */
476                         start = extent_end + 1;
477                 } else {
478                         break;
479                 }
480         }
481
482         if (start < end) {
483                 size = end - start;
484                 total_added += size;
485                 ret = btrfs_add_free_space(block_group, start, size);
486                 BUG_ON(ret); /* -ENOMEM or logic error */
487         }
488
489         return total_added;
490 }
491
492 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
493 {
494         struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
495         struct btrfs_fs_info *fs_info = block_group->fs_info;
496         struct btrfs_root *extent_root = fs_info->extent_root;
497         struct btrfs_path *path;
498         struct extent_buffer *leaf;
499         struct btrfs_key key;
500         u64 total_found = 0;
501         u64 last = 0;
502         u32 nritems;
503         int ret;
504         bool wakeup = true;
505
506         path = btrfs_alloc_path();
507         if (!path)
508                 return -ENOMEM;
509
510         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
511
512 #ifdef CONFIG_BTRFS_DEBUG
513         /*
514          * If we're fragmenting we don't want to make anybody think we can
515          * allocate from this block group until we've had a chance to fragment
516          * the free space.
517          */
518         if (btrfs_should_fragment_free_space(block_group))
519                 wakeup = false;
520 #endif
521         /*
522          * We don't want to deadlock with somebody trying to allocate a new
523          * extent for the extent root while also trying to search the extent
524          * root to add free space.  So we skip locking and search the commit
525          * root, since its read-only
526          */
527         path->skip_locking = 1;
528         path->search_commit_root = 1;
529         path->reada = READA_FORWARD;
530
531         key.objectid = last;
532         key.offset = 0;
533         key.type = BTRFS_EXTENT_ITEM_KEY;
534
535 next:
536         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
537         if (ret < 0)
538                 goto out;
539
540         leaf = path->nodes[0];
541         nritems = btrfs_header_nritems(leaf);
542
543         while (1) {
544                 if (btrfs_fs_closing(fs_info) > 1) {
545                         last = (u64)-1;
546                         break;
547                 }
548
549                 if (path->slots[0] < nritems) {
550                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
551                 } else {
552                         ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
553                         if (ret)
554                                 break;
555
556                         if (need_resched() ||
557                             rwsem_is_contended(&fs_info->commit_root_sem)) {
558                                 if (wakeup)
559                                         caching_ctl->progress = last;
560                                 btrfs_release_path(path);
561                                 up_read(&fs_info->commit_root_sem);
562                                 mutex_unlock(&caching_ctl->mutex);
563                                 cond_resched();
564                                 mutex_lock(&caching_ctl->mutex);
565                                 down_read(&fs_info->commit_root_sem);
566                                 goto next;
567                         }
568
569                         ret = btrfs_next_leaf(extent_root, path);
570                         if (ret < 0)
571                                 goto out;
572                         if (ret)
573                                 break;
574                         leaf = path->nodes[0];
575                         nritems = btrfs_header_nritems(leaf);
576                         continue;
577                 }
578
579                 if (key.objectid < last) {
580                         key.objectid = last;
581                         key.offset = 0;
582                         key.type = BTRFS_EXTENT_ITEM_KEY;
583
584                         if (wakeup)
585                                 caching_ctl->progress = last;
586                         btrfs_release_path(path);
587                         goto next;
588                 }
589
590                 if (key.objectid < block_group->key.objectid) {
591                         path->slots[0]++;
592                         continue;
593                 }
594
595                 if (key.objectid >= block_group->key.objectid +
596                     block_group->key.offset)
597                         break;
598
599                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
600                     key.type == BTRFS_METADATA_ITEM_KEY) {
601                         total_found += add_new_free_space(block_group, last,
602                                                           key.objectid);
603                         if (key.type == BTRFS_METADATA_ITEM_KEY)
604                                 last = key.objectid +
605                                         fs_info->nodesize;
606                         else
607                                 last = key.objectid + key.offset;
608
609                         if (total_found > CACHING_CTL_WAKE_UP) {
610                                 total_found = 0;
611                                 if (wakeup)
612                                         wake_up(&caching_ctl->wait);
613                         }
614                 }
615                 path->slots[0]++;
616         }
617         ret = 0;
618
619         total_found += add_new_free_space(block_group, last,
620                                           block_group->key.objectid +
621                                           block_group->key.offset);
622         caching_ctl->progress = (u64)-1;
623
624 out:
625         btrfs_free_path(path);
626         return ret;
627 }
628
629 static noinline void caching_thread(struct btrfs_work *work)
630 {
631         struct btrfs_block_group_cache *block_group;
632         struct btrfs_fs_info *fs_info;
633         struct btrfs_caching_control *caching_ctl;
634         int ret;
635
636         caching_ctl = container_of(work, struct btrfs_caching_control, work);
637         block_group = caching_ctl->block_group;
638         fs_info = block_group->fs_info;
639
640         mutex_lock(&caching_ctl->mutex);
641         down_read(&fs_info->commit_root_sem);
642
643         if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
644                 ret = load_free_space_tree(caching_ctl);
645         else
646                 ret = load_extent_tree_free(caching_ctl);
647
648         spin_lock(&block_group->lock);
649         block_group->caching_ctl = NULL;
650         block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
651         spin_unlock(&block_group->lock);
652
653 #ifdef CONFIG_BTRFS_DEBUG
654         if (btrfs_should_fragment_free_space(block_group)) {
655                 u64 bytes_used;
656
657                 spin_lock(&block_group->space_info->lock);
658                 spin_lock(&block_group->lock);
659                 bytes_used = block_group->key.offset -
660                         btrfs_block_group_used(&block_group->item);
661                 block_group->space_info->bytes_used += bytes_used >> 1;
662                 spin_unlock(&block_group->lock);
663                 spin_unlock(&block_group->space_info->lock);
664                 fragment_free_space(block_group);
665         }
666 #endif
667
668         caching_ctl->progress = (u64)-1;
669
670         up_read(&fs_info->commit_root_sem);
671         btrfs_free_excluded_extents(block_group);
672         mutex_unlock(&caching_ctl->mutex);
673
674         wake_up(&caching_ctl->wait);
675
676         btrfs_put_caching_control(caching_ctl);
677         btrfs_put_block_group(block_group);
678 }
679
680 int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
681                             int load_cache_only)
682 {
683         DEFINE_WAIT(wait);
684         struct btrfs_fs_info *fs_info = cache->fs_info;
685         struct btrfs_caching_control *caching_ctl;
686         int ret = 0;
687
688         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
689         if (!caching_ctl)
690                 return -ENOMEM;
691
692         INIT_LIST_HEAD(&caching_ctl->list);
693         mutex_init(&caching_ctl->mutex);
694         init_waitqueue_head(&caching_ctl->wait);
695         caching_ctl->block_group = cache;
696         caching_ctl->progress = cache->key.objectid;
697         refcount_set(&caching_ctl->count, 1);
698         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
699                         caching_thread, NULL, NULL);
700
701         spin_lock(&cache->lock);
702         /*
703          * This should be a rare occasion, but this could happen I think in the
704          * case where one thread starts to load the space cache info, and then
705          * some other thread starts a transaction commit which tries to do an
706          * allocation while the other thread is still loading the space cache
707          * info.  The previous loop should have kept us from choosing this block
708          * group, but if we've moved to the state where we will wait on caching
709          * block groups we need to first check if we're doing a fast load here,
710          * so we can wait for it to finish, otherwise we could end up allocating
711          * from a block group who's cache gets evicted for one reason or
712          * another.
713          */
714         while (cache->cached == BTRFS_CACHE_FAST) {
715                 struct btrfs_caching_control *ctl;
716
717                 ctl = cache->caching_ctl;
718                 refcount_inc(&ctl->count);
719                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
720                 spin_unlock(&cache->lock);
721
722                 schedule();
723
724                 finish_wait(&ctl->wait, &wait);
725                 btrfs_put_caching_control(ctl);
726                 spin_lock(&cache->lock);
727         }
728
729         if (cache->cached != BTRFS_CACHE_NO) {
730                 spin_unlock(&cache->lock);
731                 kfree(caching_ctl);
732                 return 0;
733         }
734         WARN_ON(cache->caching_ctl);
735         cache->caching_ctl = caching_ctl;
736         cache->cached = BTRFS_CACHE_FAST;
737         spin_unlock(&cache->lock);
738
739         if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
740                 mutex_lock(&caching_ctl->mutex);
741                 ret = load_free_space_cache(cache);
742
743                 spin_lock(&cache->lock);
744                 if (ret == 1) {
745                         cache->caching_ctl = NULL;
746                         cache->cached = BTRFS_CACHE_FINISHED;
747                         cache->last_byte_to_unpin = (u64)-1;
748                         caching_ctl->progress = (u64)-1;
749                 } else {
750                         if (load_cache_only) {
751                                 cache->caching_ctl = NULL;
752                                 cache->cached = BTRFS_CACHE_NO;
753                         } else {
754                                 cache->cached = BTRFS_CACHE_STARTED;
755                                 cache->has_caching_ctl = 1;
756                         }
757                 }
758                 spin_unlock(&cache->lock);
759 #ifdef CONFIG_BTRFS_DEBUG
760                 if (ret == 1 &&
761                     btrfs_should_fragment_free_space(cache)) {
762                         u64 bytes_used;
763
764                         spin_lock(&cache->space_info->lock);
765                         spin_lock(&cache->lock);
766                         bytes_used = cache->key.offset -
767                                 btrfs_block_group_used(&cache->item);
768                         cache->space_info->bytes_used += bytes_used >> 1;
769                         spin_unlock(&cache->lock);
770                         spin_unlock(&cache->space_info->lock);
771                         fragment_free_space(cache);
772                 }
773 #endif
774                 mutex_unlock(&caching_ctl->mutex);
775
776                 wake_up(&caching_ctl->wait);
777                 if (ret == 1) {
778                         btrfs_put_caching_control(caching_ctl);
779                         btrfs_free_excluded_extents(cache);
780                         return 0;
781                 }
782         } else {
783                 /*
784                  * We're either using the free space tree or no caching at all.
785                  * Set cached to the appropriate value and wakeup any waiters.
786                  */
787                 spin_lock(&cache->lock);
788                 if (load_cache_only) {
789                         cache->caching_ctl = NULL;
790                         cache->cached = BTRFS_CACHE_NO;
791                 } else {
792                         cache->cached = BTRFS_CACHE_STARTED;
793                         cache->has_caching_ctl = 1;
794                 }
795                 spin_unlock(&cache->lock);
796                 wake_up(&caching_ctl->wait);
797         }
798
799         if (load_cache_only) {
800                 btrfs_put_caching_control(caching_ctl);
801                 return 0;
802         }
803
804         down_write(&fs_info->commit_root_sem);
805         refcount_inc(&caching_ctl->count);
806         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
807         up_write(&fs_info->commit_root_sem);
808
809         btrfs_get_block_group(cache);
810
811         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
812
813         return ret;
814 }
815
816 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
817 {
818         u64 extra_flags = chunk_to_extended(flags) &
819                                 BTRFS_EXTENDED_PROFILE_MASK;
820
821         write_seqlock(&fs_info->profiles_lock);
822         if (flags & BTRFS_BLOCK_GROUP_DATA)
823                 fs_info->avail_data_alloc_bits &= ~extra_flags;
824         if (flags & BTRFS_BLOCK_GROUP_METADATA)
825                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
826         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
827                 fs_info->avail_system_alloc_bits &= ~extra_flags;
828         write_sequnlock(&fs_info->profiles_lock);
829 }
830
831 /*
832  * Clear incompat bits for the following feature(s):
833  *
834  * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
835  *            in the whole filesystem
836  */
837 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
838 {
839         if (flags & BTRFS_BLOCK_GROUP_RAID56_MASK) {
840                 struct list_head *head = &fs_info->space_info;
841                 struct btrfs_space_info *sinfo;
842
843                 list_for_each_entry_rcu(sinfo, head, list) {
844                         bool found = false;
845
846                         down_read(&sinfo->groups_sem);
847                         if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
848                                 found = true;
849                         if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
850                                 found = true;
851                         up_read(&sinfo->groups_sem);
852
853                         if (found)
854                                 return;
855                 }
856                 btrfs_clear_fs_incompat(fs_info, RAID56);
857         }
858 }
859
860 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
861                              u64 group_start, struct extent_map *em)
862 {
863         struct btrfs_fs_info *fs_info = trans->fs_info;
864         struct btrfs_root *root = fs_info->extent_root;
865         struct btrfs_path *path;
866         struct btrfs_block_group_cache *block_group;
867         struct btrfs_free_cluster *cluster;
868         struct btrfs_root *tree_root = fs_info->tree_root;
869         struct btrfs_key key;
870         struct inode *inode;
871         struct kobject *kobj = NULL;
872         int ret;
873         int index;
874         int factor;
875         struct btrfs_caching_control *caching_ctl = NULL;
876         bool remove_em;
877         bool remove_rsv = false;
878
879         block_group = btrfs_lookup_block_group(fs_info, group_start);
880         BUG_ON(!block_group);
881         BUG_ON(!block_group->ro);
882
883         trace_btrfs_remove_block_group(block_group);
884         /*
885          * Free the reserved super bytes from this block group before
886          * remove it.
887          */
888         btrfs_free_excluded_extents(block_group);
889         btrfs_free_ref_tree_range(fs_info, block_group->key.objectid,
890                                   block_group->key.offset);
891
892         memcpy(&key, &block_group->key, sizeof(key));
893         index = btrfs_bg_flags_to_raid_index(block_group->flags);
894         factor = btrfs_bg_type_to_factor(block_group->flags);
895
896         /* make sure this block group isn't part of an allocation cluster */
897         cluster = &fs_info->data_alloc_cluster;
898         spin_lock(&cluster->refill_lock);
899         btrfs_return_cluster_to_free_space(block_group, cluster);
900         spin_unlock(&cluster->refill_lock);
901
902         /*
903          * make sure this block group isn't part of a metadata
904          * allocation cluster
905          */
906         cluster = &fs_info->meta_alloc_cluster;
907         spin_lock(&cluster->refill_lock);
908         btrfs_return_cluster_to_free_space(block_group, cluster);
909         spin_unlock(&cluster->refill_lock);
910
911         path = btrfs_alloc_path();
912         if (!path) {
913                 ret = -ENOMEM;
914                 goto out;
915         }
916
917         /*
918          * get the inode first so any iput calls done for the io_list
919          * aren't the final iput (no unlinks allowed now)
920          */
921         inode = lookup_free_space_inode(block_group, path);
922
923         mutex_lock(&trans->transaction->cache_write_mutex);
924         /*
925          * Make sure our free space cache IO is done before removing the
926          * free space inode
927          */
928         spin_lock(&trans->transaction->dirty_bgs_lock);
929         if (!list_empty(&block_group->io_list)) {
930                 list_del_init(&block_group->io_list);
931
932                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
933
934                 spin_unlock(&trans->transaction->dirty_bgs_lock);
935                 btrfs_wait_cache_io(trans, block_group, path);
936                 btrfs_put_block_group(block_group);
937                 spin_lock(&trans->transaction->dirty_bgs_lock);
938         }
939
940         if (!list_empty(&block_group->dirty_list)) {
941                 list_del_init(&block_group->dirty_list);
942                 remove_rsv = true;
943                 btrfs_put_block_group(block_group);
944         }
945         spin_unlock(&trans->transaction->dirty_bgs_lock);
946         mutex_unlock(&trans->transaction->cache_write_mutex);
947
948         if (!IS_ERR(inode)) {
949                 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
950                 if (ret) {
951                         btrfs_add_delayed_iput(inode);
952                         goto out;
953                 }
954                 clear_nlink(inode);
955                 /* One for the block groups ref */
956                 spin_lock(&block_group->lock);
957                 if (block_group->iref) {
958                         block_group->iref = 0;
959                         block_group->inode = NULL;
960                         spin_unlock(&block_group->lock);
961                         iput(inode);
962                 } else {
963                         spin_unlock(&block_group->lock);
964                 }
965                 /* One for our lookup ref */
966                 btrfs_add_delayed_iput(inode);
967         }
968
969         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
970         key.offset = block_group->key.objectid;
971         key.type = 0;
972
973         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
974         if (ret < 0)
975                 goto out;
976         if (ret > 0)
977                 btrfs_release_path(path);
978         if (ret == 0) {
979                 ret = btrfs_del_item(trans, tree_root, path);
980                 if (ret)
981                         goto out;
982                 btrfs_release_path(path);
983         }
984
985         spin_lock(&fs_info->block_group_cache_lock);
986         rb_erase(&block_group->cache_node,
987                  &fs_info->block_group_cache_tree);
988         RB_CLEAR_NODE(&block_group->cache_node);
989
990         if (fs_info->first_logical_byte == block_group->key.objectid)
991                 fs_info->first_logical_byte = (u64)-1;
992         spin_unlock(&fs_info->block_group_cache_lock);
993
994         down_write(&block_group->space_info->groups_sem);
995         /*
996          * we must use list_del_init so people can check to see if they
997          * are still on the list after taking the semaphore
998          */
999         list_del_init(&block_group->list);
1000         if (list_empty(&block_group->space_info->block_groups[index])) {
1001                 kobj = block_group->space_info->block_group_kobjs[index];
1002                 block_group->space_info->block_group_kobjs[index] = NULL;
1003                 clear_avail_alloc_bits(fs_info, block_group->flags);
1004         }
1005         up_write(&block_group->space_info->groups_sem);
1006         clear_incompat_bg_bits(fs_info, block_group->flags);
1007         if (kobj) {
1008                 kobject_del(kobj);
1009                 kobject_put(kobj);
1010         }
1011
1012         if (block_group->has_caching_ctl)
1013                 caching_ctl = btrfs_get_caching_control(block_group);
1014         if (block_group->cached == BTRFS_CACHE_STARTED)
1015                 btrfs_wait_block_group_cache_done(block_group);
1016         if (block_group->has_caching_ctl) {
1017                 down_write(&fs_info->commit_root_sem);
1018                 if (!caching_ctl) {
1019                         struct btrfs_caching_control *ctl;
1020
1021                         list_for_each_entry(ctl,
1022                                     &fs_info->caching_block_groups, list)
1023                                 if (ctl->block_group == block_group) {
1024                                         caching_ctl = ctl;
1025                                         refcount_inc(&caching_ctl->count);
1026                                         break;
1027                                 }
1028                 }
1029                 if (caching_ctl)
1030                         list_del_init(&caching_ctl->list);
1031                 up_write(&fs_info->commit_root_sem);
1032                 if (caching_ctl) {
1033                         /* Once for the caching bgs list and once for us. */
1034                         btrfs_put_caching_control(caching_ctl);
1035                         btrfs_put_caching_control(caching_ctl);
1036                 }
1037         }
1038
1039         spin_lock(&trans->transaction->dirty_bgs_lock);
1040         WARN_ON(!list_empty(&block_group->dirty_list));
1041         WARN_ON(!list_empty(&block_group->io_list));
1042         spin_unlock(&trans->transaction->dirty_bgs_lock);
1043
1044         btrfs_remove_free_space_cache(block_group);
1045
1046         spin_lock(&block_group->space_info->lock);
1047         list_del_init(&block_group->ro_list);
1048
1049         if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1050                 WARN_ON(block_group->space_info->total_bytes
1051                         < block_group->key.offset);
1052                 WARN_ON(block_group->space_info->bytes_readonly
1053                         < block_group->key.offset);
1054                 WARN_ON(block_group->space_info->disk_total
1055                         < block_group->key.offset * factor);
1056         }
1057         block_group->space_info->total_bytes -= block_group->key.offset;
1058         block_group->space_info->bytes_readonly -= block_group->key.offset;
1059         block_group->space_info->disk_total -= block_group->key.offset * factor;
1060
1061         spin_unlock(&block_group->space_info->lock);
1062
1063         memcpy(&key, &block_group->key, sizeof(key));
1064
1065         mutex_lock(&fs_info->chunk_mutex);
1066         spin_lock(&block_group->lock);
1067         block_group->removed = 1;
1068         /*
1069          * At this point trimming can't start on this block group, because we
1070          * removed the block group from the tree fs_info->block_group_cache_tree
1071          * so no one can't find it anymore and even if someone already got this
1072          * block group before we removed it from the rbtree, they have already
1073          * incremented block_group->trimming - if they didn't, they won't find
1074          * any free space entries because we already removed them all when we
1075          * called btrfs_remove_free_space_cache().
1076          *
1077          * And we must not remove the extent map from the fs_info->mapping_tree
1078          * to prevent the same logical address range and physical device space
1079          * ranges from being reused for a new block group. This is because our
1080          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1081          * completely transactionless, so while it is trimming a range the
1082          * currently running transaction might finish and a new one start,
1083          * allowing for new block groups to be created that can reuse the same
1084          * physical device locations unless we take this special care.
1085          *
1086          * There may also be an implicit trim operation if the file system
1087          * is mounted with -odiscard. The same protections must remain
1088          * in place until the extents have been discarded completely when
1089          * the transaction commit has completed.
1090          */
1091         remove_em = (atomic_read(&block_group->trimming) == 0);
1092         spin_unlock(&block_group->lock);
1093
1094         mutex_unlock(&fs_info->chunk_mutex);
1095
1096         ret = remove_block_group_free_space(trans, block_group);
1097         if (ret)
1098                 goto out;
1099
1100         btrfs_put_block_group(block_group);
1101         btrfs_put_block_group(block_group);
1102
1103         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1104         if (ret > 0)
1105                 ret = -EIO;
1106         if (ret < 0)
1107                 goto out;
1108
1109         ret = btrfs_del_item(trans, root, path);
1110         if (ret)
1111                 goto out;
1112
1113         if (remove_em) {
1114                 struct extent_map_tree *em_tree;
1115
1116                 em_tree = &fs_info->mapping_tree;
1117                 write_lock(&em_tree->lock);
1118                 remove_extent_mapping(em_tree, em);
1119                 write_unlock(&em_tree->lock);
1120                 /* once for the tree */
1121                 free_extent_map(em);
1122         }
1123 out:
1124         if (remove_rsv)
1125                 btrfs_delayed_refs_rsv_release(fs_info, 1);
1126         btrfs_free_path(path);
1127         return ret;
1128 }
1129
1130 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1131                 struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1132 {
1133         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1134         struct extent_map *em;
1135         struct map_lookup *map;
1136         unsigned int num_items;
1137
1138         read_lock(&em_tree->lock);
1139         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1140         read_unlock(&em_tree->lock);
1141         ASSERT(em && em->start == chunk_offset);
1142
1143         /*
1144          * We need to reserve 3 + N units from the metadata space info in order
1145          * to remove a block group (done at btrfs_remove_chunk() and at
1146          * btrfs_remove_block_group()), which are used for:
1147          *
1148          * 1 unit for adding the free space inode's orphan (located in the tree
1149          * of tree roots).
1150          * 1 unit for deleting the block group item (located in the extent
1151          * tree).
1152          * 1 unit for deleting the free space item (located in tree of tree
1153          * roots).
1154          * N units for deleting N device extent items corresponding to each
1155          * stripe (located in the device tree).
1156          *
1157          * In order to remove a block group we also need to reserve units in the
1158          * system space info in order to update the chunk tree (update one or
1159          * more device items and remove one chunk item), but this is done at
1160          * btrfs_remove_chunk() through a call to check_system_chunk().
1161          */
1162         map = em->map_lookup;
1163         num_items = 3 + map->num_stripes;
1164         free_extent_map(em);
1165
1166         return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
1167                                                            num_items, 1);
1168 }
1169
1170 /*
1171  * Mark block group @cache read-only, so later write won't happen to block
1172  * group @cache.
1173  *
1174  * If @force is not set, this function will only mark the block group readonly
1175  * if we have enough free space (1M) in other metadata/system block groups.
1176  * If @force is not set, this function will mark the block group readonly
1177  * without checking free space.
1178  *
1179  * NOTE: This function doesn't care if other block groups can contain all the
1180  * data in this block group. That check should be done by relocation routine,
1181  * not this function.
1182  */
1183 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
1184 {
1185         struct btrfs_space_info *sinfo = cache->space_info;
1186         u64 num_bytes;
1187         u64 sinfo_used;
1188         u64 min_allocable_bytes;
1189         int ret = -ENOSPC;
1190
1191         /*
1192          * We need some metadata space and system metadata space for
1193          * allocating chunks in some corner cases until we force to set
1194          * it to be readonly.
1195          */
1196         if ((sinfo->flags &
1197              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
1198             !force)
1199                 min_allocable_bytes = SZ_1M;
1200         else
1201                 min_allocable_bytes = 0;
1202
1203         spin_lock(&sinfo->lock);
1204         spin_lock(&cache->lock);
1205
1206         if (cache->ro) {
1207                 cache->ro++;
1208                 ret = 0;
1209                 goto out;
1210         }
1211
1212         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
1213                     cache->bytes_super - btrfs_block_group_used(&cache->item);
1214         sinfo_used = btrfs_space_info_used(sinfo, true);
1215
1216         /*
1217          * sinfo_used + num_bytes should always <= sinfo->total_bytes.
1218          *
1219          * Here we make sure if we mark this bg RO, we still have enough
1220          * free space as buffer (if min_allocable_bytes is not 0).
1221          */
1222         if (sinfo_used + num_bytes + min_allocable_bytes <=
1223             sinfo->total_bytes) {
1224                 sinfo->bytes_readonly += num_bytes;
1225                 cache->ro++;
1226                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1227                 ret = 0;
1228         }
1229 out:
1230         spin_unlock(&cache->lock);
1231         spin_unlock(&sinfo->lock);
1232         if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1233                 btrfs_info(cache->fs_info,
1234                         "unable to make block group %llu ro",
1235                         cache->key.objectid);
1236                 btrfs_info(cache->fs_info,
1237                         "sinfo_used=%llu bg_num_bytes=%llu min_allocable=%llu",
1238                         sinfo_used, num_bytes, min_allocable_bytes);
1239                 btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1240         }
1241         return ret;
1242 }
1243
1244 /*
1245  * Process the unused_bgs list and remove any that don't have any allocated
1246  * space inside of them.
1247  */
1248 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1249 {
1250         struct btrfs_block_group_cache *block_group;
1251         struct btrfs_space_info *space_info;
1252         struct btrfs_trans_handle *trans;
1253         int ret = 0;
1254
1255         if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1256                 return;
1257
1258         spin_lock(&fs_info->unused_bgs_lock);
1259         while (!list_empty(&fs_info->unused_bgs)) {
1260                 u64 start, end;
1261                 int trimming;
1262
1263                 block_group = list_first_entry(&fs_info->unused_bgs,
1264                                                struct btrfs_block_group_cache,
1265                                                bg_list);
1266                 list_del_init(&block_group->bg_list);
1267
1268                 space_info = block_group->space_info;
1269
1270                 if (ret || btrfs_mixed_space_info(space_info)) {
1271                         btrfs_put_block_group(block_group);
1272                         continue;
1273                 }
1274                 spin_unlock(&fs_info->unused_bgs_lock);
1275
1276                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
1277
1278                 /* Don't want to race with allocators so take the groups_sem */
1279                 down_write(&space_info->groups_sem);
1280                 spin_lock(&block_group->lock);
1281                 if (block_group->reserved || block_group->pinned ||
1282                     btrfs_block_group_used(&block_group->item) ||
1283                     block_group->ro ||
1284                     list_is_singular(&block_group->list)) {
1285                         /*
1286                          * We want to bail if we made new allocations or have
1287                          * outstanding allocations in this block group.  We do
1288                          * the ro check in case balance is currently acting on
1289                          * this block group.
1290                          */
1291                         trace_btrfs_skip_unused_block_group(block_group);
1292                         spin_unlock(&block_group->lock);
1293                         up_write(&space_info->groups_sem);
1294                         goto next;
1295                 }
1296                 spin_unlock(&block_group->lock);
1297
1298                 /* We don't want to force the issue, only flip if it's ok. */
1299                 ret = inc_block_group_ro(block_group, 0);
1300                 up_write(&space_info->groups_sem);
1301                 if (ret < 0) {
1302                         ret = 0;
1303                         goto next;
1304                 }
1305
1306                 /*
1307                  * Want to do this before we do anything else so we can recover
1308                  * properly if we fail to join the transaction.
1309                  */
1310                 trans = btrfs_start_trans_remove_block_group(fs_info,
1311                                                      block_group->key.objectid);
1312                 if (IS_ERR(trans)) {
1313                         btrfs_dec_block_group_ro(block_group);
1314                         ret = PTR_ERR(trans);
1315                         goto next;
1316                 }
1317
1318                 /*
1319                  * We could have pending pinned extents for this block group,
1320                  * just delete them, we don't care about them anymore.
1321                  */
1322                 start = block_group->key.objectid;
1323                 end = start + block_group->key.offset - 1;
1324                 /*
1325                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
1326                  * btrfs_finish_extent_commit(). If we are at transaction N,
1327                  * another task might be running finish_extent_commit() for the
1328                  * previous transaction N - 1, and have seen a range belonging
1329                  * to the block group in freed_extents[] before we were able to
1330                  * clear the whole block group range from freed_extents[]. This
1331                  * means that task can lookup for the block group after we
1332                  * unpinned it from freed_extents[] and removed it, leading to
1333                  * a BUG_ON() at btrfs_unpin_extent_range().
1334                  */
1335                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
1336                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
1337                                   EXTENT_DIRTY);
1338                 if (ret) {
1339                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1340                         btrfs_dec_block_group_ro(block_group);
1341                         goto end_trans;
1342                 }
1343                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
1344                                   EXTENT_DIRTY);
1345                 if (ret) {
1346                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1347                         btrfs_dec_block_group_ro(block_group);
1348                         goto end_trans;
1349                 }
1350                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1351
1352                 /* Reset pinned so btrfs_put_block_group doesn't complain */
1353                 spin_lock(&space_info->lock);
1354                 spin_lock(&block_group->lock);
1355
1356                 btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1357                                                      -block_group->pinned);
1358                 space_info->bytes_readonly += block_group->pinned;
1359                 percpu_counter_add_batch(&space_info->total_bytes_pinned,
1360                                    -block_group->pinned,
1361                                    BTRFS_TOTAL_BYTES_PINNED_BATCH);
1362                 block_group->pinned = 0;
1363
1364                 spin_unlock(&block_group->lock);
1365                 spin_unlock(&space_info->lock);
1366
1367                 /* DISCARD can flip during remount */
1368                 trimming = btrfs_test_opt(fs_info, DISCARD);
1369
1370                 /* Implicit trim during transaction commit. */
1371                 if (trimming)
1372                         btrfs_get_block_group_trimming(block_group);
1373
1374                 /*
1375                  * Btrfs_remove_chunk will abort the transaction if things go
1376                  * horribly wrong.
1377                  */
1378                 ret = btrfs_remove_chunk(trans, block_group->key.objectid);
1379
1380                 if (ret) {
1381                         if (trimming)
1382                                 btrfs_put_block_group_trimming(block_group);
1383                         goto end_trans;
1384                 }
1385
1386                 /*
1387                  * If we're not mounted with -odiscard, we can just forget
1388                  * about this block group. Otherwise we'll need to wait
1389                  * until transaction commit to do the actual discard.
1390                  */
1391                 if (trimming) {
1392                         spin_lock(&fs_info->unused_bgs_lock);
1393                         /*
1394                          * A concurrent scrub might have added us to the list
1395                          * fs_info->unused_bgs, so use a list_move operation
1396                          * to add the block group to the deleted_bgs list.
1397                          */
1398                         list_move(&block_group->bg_list,
1399                                   &trans->transaction->deleted_bgs);
1400                         spin_unlock(&fs_info->unused_bgs_lock);
1401                         btrfs_get_block_group(block_group);
1402                 }
1403 end_trans:
1404                 btrfs_end_transaction(trans);
1405 next:
1406                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
1407                 btrfs_put_block_group(block_group);
1408                 spin_lock(&fs_info->unused_bgs_lock);
1409         }
1410         spin_unlock(&fs_info->unused_bgs_lock);
1411 }
1412
1413 void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
1414 {
1415         struct btrfs_fs_info *fs_info = bg->fs_info;
1416
1417         spin_lock(&fs_info->unused_bgs_lock);
1418         if (list_empty(&bg->bg_list)) {
1419                 btrfs_get_block_group(bg);
1420                 trace_btrfs_add_unused_block_group(bg);
1421                 list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1422         }
1423         spin_unlock(&fs_info->unused_bgs_lock);
1424 }
1425
1426 static int find_first_block_group(struct btrfs_fs_info *fs_info,
1427                                   struct btrfs_path *path,
1428                                   struct btrfs_key *key)
1429 {
1430         struct btrfs_root *root = fs_info->extent_root;
1431         int ret = 0;
1432         struct btrfs_key found_key;
1433         struct extent_buffer *leaf;
1434         struct btrfs_block_group_item bg;
1435         u64 flags;
1436         int slot;
1437
1438         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1439         if (ret < 0)
1440                 goto out;
1441
1442         while (1) {
1443                 slot = path->slots[0];
1444                 leaf = path->nodes[0];
1445                 if (slot >= btrfs_header_nritems(leaf)) {
1446                         ret = btrfs_next_leaf(root, path);
1447                         if (ret == 0)
1448                                 continue;
1449                         if (ret < 0)
1450                                 goto out;
1451                         break;
1452                 }
1453                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1454
1455                 if (found_key.objectid >= key->objectid &&
1456                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1457                         struct extent_map_tree *em_tree;
1458                         struct extent_map *em;
1459
1460                         em_tree = &root->fs_info->mapping_tree;
1461                         read_lock(&em_tree->lock);
1462                         em = lookup_extent_mapping(em_tree, found_key.objectid,
1463                                                    found_key.offset);
1464                         read_unlock(&em_tree->lock);
1465                         if (!em) {
1466                                 btrfs_err(fs_info,
1467                         "logical %llu len %llu found bg but no related chunk",
1468                                           found_key.objectid, found_key.offset);
1469                                 ret = -ENOENT;
1470                         } else if (em->start != found_key.objectid ||
1471                                    em->len != found_key.offset) {
1472                                 btrfs_err(fs_info,
1473                 "block group %llu len %llu mismatch with chunk %llu len %llu",
1474                                           found_key.objectid, found_key.offset,
1475                                           em->start, em->len);
1476                                 ret = -EUCLEAN;
1477                         } else {
1478                                 read_extent_buffer(leaf, &bg,
1479                                         btrfs_item_ptr_offset(leaf, slot),
1480                                         sizeof(bg));
1481                                 flags = btrfs_block_group_flags(&bg) &
1482                                         BTRFS_BLOCK_GROUP_TYPE_MASK;
1483
1484                                 if (flags != (em->map_lookup->type &
1485                                               BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1486                                         btrfs_err(fs_info,
1487 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1488                                                 found_key.objectid,
1489                                                 found_key.offset, flags,
1490                                                 (BTRFS_BLOCK_GROUP_TYPE_MASK &
1491                                                  em->map_lookup->type));
1492                                         ret = -EUCLEAN;
1493                                 } else {
1494                                         ret = 0;
1495                                 }
1496                         }
1497                         free_extent_map(em);
1498                         goto out;
1499                 }
1500                 path->slots[0]++;
1501         }
1502 out:
1503         return ret;
1504 }
1505
1506 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1507 {
1508         u64 extra_flags = chunk_to_extended(flags) &
1509                                 BTRFS_EXTENDED_PROFILE_MASK;
1510
1511         write_seqlock(&fs_info->profiles_lock);
1512         if (flags & BTRFS_BLOCK_GROUP_DATA)
1513                 fs_info->avail_data_alloc_bits |= extra_flags;
1514         if (flags & BTRFS_BLOCK_GROUP_METADATA)
1515                 fs_info->avail_metadata_alloc_bits |= extra_flags;
1516         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1517                 fs_info->avail_system_alloc_bits |= extra_flags;
1518         write_sequnlock(&fs_info->profiles_lock);
1519 }
1520
1521 static int exclude_super_stripes(struct btrfs_block_group_cache *cache)
1522 {
1523         struct btrfs_fs_info *fs_info = cache->fs_info;
1524         u64 bytenr;
1525         u64 *logical;
1526         int stripe_len;
1527         int i, nr, ret;
1528
1529         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
1530                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
1531                 cache->bytes_super += stripe_len;
1532                 ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid,
1533                                                 stripe_len);
1534                 if (ret)
1535                         return ret;
1536         }
1537
1538         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1539                 bytenr = btrfs_sb_offset(i);
1540                 ret = btrfs_rmap_block(fs_info, cache->key.objectid,
1541                                        bytenr, &logical, &nr, &stripe_len);
1542                 if (ret)
1543                         return ret;
1544
1545                 while (nr--) {
1546                         u64 start, len;
1547
1548                         if (logical[nr] > cache->key.objectid +
1549                             cache->key.offset)
1550                                 continue;
1551
1552                         if (logical[nr] + stripe_len <= cache->key.objectid)
1553                                 continue;
1554
1555                         start = logical[nr];
1556                         if (start < cache->key.objectid) {
1557                                 start = cache->key.objectid;
1558                                 len = (logical[nr] + stripe_len) - start;
1559                         } else {
1560                                 len = min_t(u64, stripe_len,
1561                                             cache->key.objectid +
1562                                             cache->key.offset - start);
1563                         }
1564
1565                         cache->bytes_super += len;
1566                         ret = btrfs_add_excluded_extent(fs_info, start, len);
1567                         if (ret) {
1568                                 kfree(logical);
1569                                 return ret;
1570                         }
1571                 }
1572
1573                 kfree(logical);
1574         }
1575         return 0;
1576 }
1577
1578 static void link_block_group(struct btrfs_block_group_cache *cache)
1579 {
1580         struct btrfs_space_info *space_info = cache->space_info;
1581         int index = btrfs_bg_flags_to_raid_index(cache->flags);
1582         bool first = false;
1583
1584         down_write(&space_info->groups_sem);
1585         if (list_empty(&space_info->block_groups[index]))
1586                 first = true;
1587         list_add_tail(&cache->list, &space_info->block_groups[index]);
1588         up_write(&space_info->groups_sem);
1589
1590         if (first)
1591                 btrfs_sysfs_add_block_group_type(cache);
1592 }
1593
1594 static struct btrfs_block_group_cache *btrfs_create_block_group_cache(
1595                 struct btrfs_fs_info *fs_info, u64 start, u64 size)
1596 {
1597         struct btrfs_block_group_cache *cache;
1598
1599         cache = kzalloc(sizeof(*cache), GFP_NOFS);
1600         if (!cache)
1601                 return NULL;
1602
1603         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
1604                                         GFP_NOFS);
1605         if (!cache->free_space_ctl) {
1606                 kfree(cache);
1607                 return NULL;
1608         }
1609
1610         cache->key.objectid = start;
1611         cache->key.offset = size;
1612         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1613
1614         cache->fs_info = fs_info;
1615         cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
1616         set_free_space_tree_thresholds(cache);
1617
1618         atomic_set(&cache->count, 1);
1619         spin_lock_init(&cache->lock);
1620         init_rwsem(&cache->data_rwsem);
1621         INIT_LIST_HEAD(&cache->list);
1622         INIT_LIST_HEAD(&cache->cluster_list);
1623         INIT_LIST_HEAD(&cache->bg_list);
1624         INIT_LIST_HEAD(&cache->ro_list);
1625         INIT_LIST_HEAD(&cache->dirty_list);
1626         INIT_LIST_HEAD(&cache->io_list);
1627         btrfs_init_free_space_ctl(cache);
1628         atomic_set(&cache->trimming, 0);
1629         mutex_init(&cache->free_space_lock);
1630         btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
1631
1632         return cache;
1633 }
1634
1635 /*
1636  * Iterate all chunks and verify that each of them has the corresponding block
1637  * group
1638  */
1639 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
1640 {
1641         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
1642         struct extent_map *em;
1643         struct btrfs_block_group_cache *bg;
1644         u64 start = 0;
1645         int ret = 0;
1646
1647         while (1) {
1648                 read_lock(&map_tree->lock);
1649                 /*
1650                  * lookup_extent_mapping will return the first extent map
1651                  * intersecting the range, so setting @len to 1 is enough to
1652                  * get the first chunk.
1653                  */
1654                 em = lookup_extent_mapping(map_tree, start, 1);
1655                 read_unlock(&map_tree->lock);
1656                 if (!em)
1657                         break;
1658
1659                 bg = btrfs_lookup_block_group(fs_info, em->start);
1660                 if (!bg) {
1661                         btrfs_err(fs_info,
1662         "chunk start=%llu len=%llu doesn't have corresponding block group",
1663                                      em->start, em->len);
1664                         ret = -EUCLEAN;
1665                         free_extent_map(em);
1666                         break;
1667                 }
1668                 if (bg->key.objectid != em->start ||
1669                     bg->key.offset != em->len ||
1670                     (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
1671                     (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1672                         btrfs_err(fs_info,
1673 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1674                                 em->start, em->len,
1675                                 em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
1676                                 bg->key.objectid, bg->key.offset,
1677                                 bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
1678                         ret = -EUCLEAN;
1679                         free_extent_map(em);
1680                         btrfs_put_block_group(bg);
1681                         break;
1682                 }
1683                 start = em->start + em->len;
1684                 free_extent_map(em);
1685                 btrfs_put_block_group(bg);
1686         }
1687         return ret;
1688 }
1689
1690 int btrfs_read_block_groups(struct btrfs_fs_info *info)
1691 {
1692         struct btrfs_path *path;
1693         int ret;
1694         struct btrfs_block_group_cache *cache;
1695         struct btrfs_space_info *space_info;
1696         struct btrfs_key key;
1697         struct btrfs_key found_key;
1698         struct extent_buffer *leaf;
1699         int need_clear = 0;
1700         u64 cache_gen;
1701         u64 feature;
1702         int mixed;
1703
1704         feature = btrfs_super_incompat_flags(info->super_copy);
1705         mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
1706
1707         key.objectid = 0;
1708         key.offset = 0;
1709         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1710         path = btrfs_alloc_path();
1711         if (!path)
1712                 return -ENOMEM;
1713         path->reada = READA_FORWARD;
1714
1715         cache_gen = btrfs_super_cache_generation(info->super_copy);
1716         if (btrfs_test_opt(info, SPACE_CACHE) &&
1717             btrfs_super_generation(info->super_copy) != cache_gen)
1718                 need_clear = 1;
1719         if (btrfs_test_opt(info, CLEAR_CACHE))
1720                 need_clear = 1;
1721
1722         while (1) {
1723                 ret = find_first_block_group(info, path, &key);
1724                 if (ret > 0)
1725                         break;
1726                 if (ret != 0)
1727                         goto error;
1728
1729                 leaf = path->nodes[0];
1730                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1731
1732                 cache = btrfs_create_block_group_cache(info, found_key.objectid,
1733                                                        found_key.offset);
1734                 if (!cache) {
1735                         ret = -ENOMEM;
1736                         goto error;
1737                 }
1738
1739                 if (need_clear) {
1740                         /*
1741                          * When we mount with old space cache, we need to
1742                          * set BTRFS_DC_CLEAR and set dirty flag.
1743                          *
1744                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
1745                          *    truncate the old free space cache inode and
1746                          *    setup a new one.
1747                          * b) Setting 'dirty flag' makes sure that we flush
1748                          *    the new space cache info onto disk.
1749                          */
1750                         if (btrfs_test_opt(info, SPACE_CACHE))
1751                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
1752                 }
1753
1754                 read_extent_buffer(leaf, &cache->item,
1755                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
1756                                    sizeof(cache->item));
1757                 cache->flags = btrfs_block_group_flags(&cache->item);
1758                 if (!mixed &&
1759                     ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
1760                     (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
1761                         btrfs_err(info,
1762 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1763                                   cache->key.objectid);
1764                         btrfs_put_block_group(cache);
1765                         ret = -EINVAL;
1766                         goto error;
1767                 }
1768
1769                 key.objectid = found_key.objectid + found_key.offset;
1770                 btrfs_release_path(path);
1771
1772                 /*
1773                  * We need to exclude the super stripes now so that the space
1774                  * info has super bytes accounted for, otherwise we'll think
1775                  * we have more space than we actually do.
1776                  */
1777                 ret = exclude_super_stripes(cache);
1778                 if (ret) {
1779                         /*
1780                          * We may have excluded something, so call this just in
1781                          * case.
1782                          */
1783                         btrfs_free_excluded_extents(cache);
1784                         btrfs_put_block_group(cache);
1785                         goto error;
1786                 }
1787
1788                 /*
1789                  * Check for two cases, either we are full, and therefore
1790                  * don't need to bother with the caching work since we won't
1791                  * find any space, or we are empty, and we can just add all
1792                  * the space in and be done with it.  This saves us _a_lot_ of
1793                  * time, particularly in the full case.
1794                  */
1795                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
1796                         cache->last_byte_to_unpin = (u64)-1;
1797                         cache->cached = BTRFS_CACHE_FINISHED;
1798                         btrfs_free_excluded_extents(cache);
1799                 } else if (btrfs_block_group_used(&cache->item) == 0) {
1800                         cache->last_byte_to_unpin = (u64)-1;
1801                         cache->cached = BTRFS_CACHE_FINISHED;
1802                         add_new_free_space(cache, found_key.objectid,
1803                                            found_key.objectid +
1804                                            found_key.offset);
1805                         btrfs_free_excluded_extents(cache);
1806                 }
1807
1808                 ret = btrfs_add_block_group_cache(info, cache);
1809                 if (ret) {
1810                         btrfs_remove_free_space_cache(cache);
1811                         btrfs_put_block_group(cache);
1812                         goto error;
1813                 }
1814
1815                 trace_btrfs_add_block_group(info, cache, 0);
1816                 btrfs_update_space_info(info, cache->flags, found_key.offset,
1817                                         btrfs_block_group_used(&cache->item),
1818                                         cache->bytes_super, &space_info);
1819
1820                 cache->space_info = space_info;
1821
1822                 link_block_group(cache);
1823
1824                 set_avail_alloc_bits(info, cache->flags);
1825                 if (btrfs_chunk_readonly(info, cache->key.objectid)) {
1826                         inc_block_group_ro(cache, 1);
1827                 } else if (btrfs_block_group_used(&cache->item) == 0) {
1828                         ASSERT(list_empty(&cache->bg_list));
1829                         btrfs_mark_bg_unused(cache);
1830                 }
1831         }
1832
1833         list_for_each_entry_rcu(space_info, &info->space_info, list) {
1834                 if (!(btrfs_get_alloc_profile(info, space_info->flags) &
1835                       (BTRFS_BLOCK_GROUP_RAID10 |
1836                        BTRFS_BLOCK_GROUP_RAID1_MASK |
1837                        BTRFS_BLOCK_GROUP_RAID56_MASK |
1838                        BTRFS_BLOCK_GROUP_DUP)))
1839                         continue;
1840                 /*
1841                  * Avoid allocating from un-mirrored block group if there are
1842                  * mirrored block groups.
1843                  */
1844                 list_for_each_entry(cache,
1845                                 &space_info->block_groups[BTRFS_RAID_RAID0],
1846                                 list)
1847                         inc_block_group_ro(cache, 1);
1848                 list_for_each_entry(cache,
1849                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
1850                                 list)
1851                         inc_block_group_ro(cache, 1);
1852         }
1853
1854         btrfs_init_global_block_rsv(info);
1855         ret = check_chunk_block_group_mappings(info);
1856 error:
1857         btrfs_free_path(path);
1858         return ret;
1859 }
1860
1861 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
1862 {
1863         struct btrfs_fs_info *fs_info = trans->fs_info;
1864         struct btrfs_block_group_cache *block_group;
1865         struct btrfs_root *extent_root = fs_info->extent_root;
1866         struct btrfs_block_group_item item;
1867         struct btrfs_key key;
1868         int ret = 0;
1869
1870         if (!trans->can_flush_pending_bgs)
1871                 return;
1872
1873         while (!list_empty(&trans->new_bgs)) {
1874                 block_group = list_first_entry(&trans->new_bgs,
1875                                                struct btrfs_block_group_cache,
1876                                                bg_list);
1877                 if (ret)
1878                         goto next;
1879
1880                 spin_lock(&block_group->lock);
1881                 memcpy(&item, &block_group->item, sizeof(item));
1882                 memcpy(&key, &block_group->key, sizeof(key));
1883                 spin_unlock(&block_group->lock);
1884
1885                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
1886                                         sizeof(item));
1887                 if (ret)
1888                         btrfs_abort_transaction(trans, ret);
1889                 ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
1890                 if (ret)
1891                         btrfs_abort_transaction(trans, ret);
1892                 add_block_group_free_space(trans, block_group);
1893                 /* Already aborted the transaction if it failed. */
1894 next:
1895                 btrfs_delayed_refs_rsv_release(fs_info, 1);
1896                 list_del_init(&block_group->bg_list);
1897         }
1898         btrfs_trans_release_chunk_metadata(trans);
1899 }
1900
1901 int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
1902                            u64 type, u64 chunk_offset, u64 size)
1903 {
1904         struct btrfs_fs_info *fs_info = trans->fs_info;
1905         struct btrfs_block_group_cache *cache;
1906         int ret;
1907
1908         btrfs_set_log_full_commit(trans);
1909
1910         cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
1911         if (!cache)
1912                 return -ENOMEM;
1913
1914         btrfs_set_block_group_used(&cache->item, bytes_used);
1915         btrfs_set_block_group_chunk_objectid(&cache->item,
1916                                              BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1917         btrfs_set_block_group_flags(&cache->item, type);
1918
1919         cache->flags = type;
1920         cache->last_byte_to_unpin = (u64)-1;
1921         cache->cached = BTRFS_CACHE_FINISHED;
1922         cache->needs_free_space = 1;
1923         ret = exclude_super_stripes(cache);
1924         if (ret) {
1925                 /* We may have excluded something, so call this just in case */
1926                 btrfs_free_excluded_extents(cache);
1927                 btrfs_put_block_group(cache);
1928                 return ret;
1929         }
1930
1931         add_new_free_space(cache, chunk_offset, chunk_offset + size);
1932
1933         btrfs_free_excluded_extents(cache);
1934
1935 #ifdef CONFIG_BTRFS_DEBUG
1936         if (btrfs_should_fragment_free_space(cache)) {
1937                 u64 new_bytes_used = size - bytes_used;
1938
1939                 bytes_used += new_bytes_used >> 1;
1940                 fragment_free_space(cache);
1941         }
1942 #endif
1943         /*
1944          * Ensure the corresponding space_info object is created and
1945          * assigned to our block group. We want our bg to be added to the rbtree
1946          * with its ->space_info set.
1947          */
1948         cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
1949         ASSERT(cache->space_info);
1950
1951         ret = btrfs_add_block_group_cache(fs_info, cache);
1952         if (ret) {
1953                 btrfs_remove_free_space_cache(cache);
1954                 btrfs_put_block_group(cache);
1955                 return ret;
1956         }
1957
1958         /*
1959          * Now that our block group has its ->space_info set and is inserted in
1960          * the rbtree, update the space info's counters.
1961          */
1962         trace_btrfs_add_block_group(fs_info, cache, 1);
1963         btrfs_update_space_info(fs_info, cache->flags, size, bytes_used,
1964                                 cache->bytes_super, &cache->space_info);
1965         btrfs_update_global_block_rsv(fs_info);
1966
1967         link_block_group(cache);
1968
1969         list_add_tail(&cache->bg_list, &trans->new_bgs);
1970         trans->delayed_ref_updates++;
1971         btrfs_update_delayed_refs_rsv(trans);
1972
1973         set_avail_alloc_bits(fs_info, type);
1974         return 0;
1975 }
1976
1977 static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
1978 {
1979         u64 num_devices;
1980         u64 stripped;
1981
1982         /*
1983          * if restripe for this chunk_type is on pick target profile and
1984          * return, otherwise do the usual balance
1985          */
1986         stripped = get_restripe_target(fs_info, flags);
1987         if (stripped)
1988                 return extended_to_chunk(stripped);
1989
1990         num_devices = fs_info->fs_devices->rw_devices;
1991
1992         stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK |
1993                 BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10;
1994
1995         if (num_devices == 1) {
1996                 stripped |= BTRFS_BLOCK_GROUP_DUP;
1997                 stripped = flags & ~stripped;
1998
1999                 /* turn raid0 into single device chunks */
2000                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
2001                         return stripped;
2002
2003                 /* turn mirroring into duplication */
2004                 if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK |
2005                              BTRFS_BLOCK_GROUP_RAID10))
2006                         return stripped | BTRFS_BLOCK_GROUP_DUP;
2007         } else {
2008                 /* they already had raid on here, just return */
2009                 if (flags & stripped)
2010                         return flags;
2011
2012                 stripped |= BTRFS_BLOCK_GROUP_DUP;
2013                 stripped = flags & ~stripped;
2014
2015                 /* switch duplicated blocks with raid1 */
2016                 if (flags & BTRFS_BLOCK_GROUP_DUP)
2017                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
2018
2019                 /* this is drive concat, leave it alone */
2020         }
2021
2022         return flags;
2023 }
2024
2025 int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
2026
2027 {
2028         struct btrfs_fs_info *fs_info = cache->fs_info;
2029         struct btrfs_trans_handle *trans;
2030         u64 alloc_flags;
2031         int ret;
2032
2033 again:
2034         trans = btrfs_join_transaction(fs_info->extent_root);
2035         if (IS_ERR(trans))
2036                 return PTR_ERR(trans);
2037
2038         /*
2039          * we're not allowed to set block groups readonly after the dirty
2040          * block groups cache has started writing.  If it already started,
2041          * back off and let this transaction commit
2042          */
2043         mutex_lock(&fs_info->ro_block_group_mutex);
2044         if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2045                 u64 transid = trans->transid;
2046
2047                 mutex_unlock(&fs_info->ro_block_group_mutex);
2048                 btrfs_end_transaction(trans);
2049
2050                 ret = btrfs_wait_for_commit(fs_info, transid);
2051                 if (ret)
2052                         return ret;
2053                 goto again;
2054         }
2055
2056         /*
2057          * if we are changing raid levels, try to allocate a corresponding
2058          * block group with the new raid level.
2059          */
2060         alloc_flags = update_block_group_flags(fs_info, cache->flags);
2061         if (alloc_flags != cache->flags) {
2062                 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2063                 /*
2064                  * ENOSPC is allowed here, we may have enough space
2065                  * already allocated at the new raid level to
2066                  * carry on
2067                  */
2068                 if (ret == -ENOSPC)
2069                         ret = 0;
2070                 if (ret < 0)
2071                         goto out;
2072         }
2073
2074         ret = inc_block_group_ro(cache, 0);
2075         if (!ret)
2076                 goto out;
2077         alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2078         ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2079         if (ret < 0)
2080                 goto out;
2081         ret = inc_block_group_ro(cache, 0);
2082 out:
2083         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2084                 alloc_flags = update_block_group_flags(fs_info, cache->flags);
2085                 mutex_lock(&fs_info->chunk_mutex);
2086                 check_system_chunk(trans, alloc_flags);
2087                 mutex_unlock(&fs_info->chunk_mutex);
2088         }
2089         mutex_unlock(&fs_info->ro_block_group_mutex);
2090
2091         btrfs_end_transaction(trans);
2092         return ret;
2093 }
2094
2095 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
2096 {
2097         struct btrfs_space_info *sinfo = cache->space_info;
2098         u64 num_bytes;
2099
2100         BUG_ON(!cache->ro);
2101
2102         spin_lock(&sinfo->lock);
2103         spin_lock(&cache->lock);
2104         if (!--cache->ro) {
2105                 num_bytes = cache->key.offset - cache->reserved -
2106                             cache->pinned - cache->bytes_super -
2107                             btrfs_block_group_used(&cache->item);
2108                 sinfo->bytes_readonly -= num_bytes;
2109                 list_del_init(&cache->ro_list);
2110         }
2111         spin_unlock(&cache->lock);
2112         spin_unlock(&sinfo->lock);
2113 }
2114
2115 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2116                                  struct btrfs_path *path,
2117                                  struct btrfs_block_group_cache *cache)
2118 {
2119         struct btrfs_fs_info *fs_info = trans->fs_info;
2120         int ret;
2121         struct btrfs_root *extent_root = fs_info->extent_root;
2122         unsigned long bi;
2123         struct extent_buffer *leaf;
2124
2125         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2126         if (ret) {
2127                 if (ret > 0)
2128                         ret = -ENOENT;
2129                 goto fail;
2130         }
2131
2132         leaf = path->nodes[0];
2133         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2134         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2135         btrfs_mark_buffer_dirty(leaf);
2136 fail:
2137         btrfs_release_path(path);
2138         return ret;
2139
2140 }
2141
2142 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2143                             struct btrfs_trans_handle *trans,
2144                             struct btrfs_path *path)
2145 {
2146         struct btrfs_fs_info *fs_info = block_group->fs_info;
2147         struct btrfs_root *root = fs_info->tree_root;
2148         struct inode *inode = NULL;
2149         struct extent_changeset *data_reserved = NULL;
2150         u64 alloc_hint = 0;
2151         int dcs = BTRFS_DC_ERROR;
2152         u64 num_pages = 0;
2153         int retries = 0;
2154         int ret = 0;
2155
2156         /*
2157          * If this block group is smaller than 100 megs don't bother caching the
2158          * block group.
2159          */
2160         if (block_group->key.offset < (100 * SZ_1M)) {
2161                 spin_lock(&block_group->lock);
2162                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2163                 spin_unlock(&block_group->lock);
2164                 return 0;
2165         }
2166
2167         if (trans->aborted)
2168                 return 0;
2169 again:
2170         inode = lookup_free_space_inode(block_group, path);
2171         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2172                 ret = PTR_ERR(inode);
2173                 btrfs_release_path(path);
2174                 goto out;
2175         }
2176
2177         if (IS_ERR(inode)) {
2178                 BUG_ON(retries);
2179                 retries++;
2180
2181                 if (block_group->ro)
2182                         goto out_free;
2183
2184                 ret = create_free_space_inode(trans, block_group, path);
2185                 if (ret)
2186                         goto out_free;
2187                 goto again;
2188         }
2189
2190         /*
2191          * We want to set the generation to 0, that way if anything goes wrong
2192          * from here on out we know not to trust this cache when we load up next
2193          * time.
2194          */
2195         BTRFS_I(inode)->generation = 0;
2196         ret = btrfs_update_inode(trans, root, inode);
2197         if (ret) {
2198                 /*
2199                  * So theoretically we could recover from this, simply set the
2200                  * super cache generation to 0 so we know to invalidate the
2201                  * cache, but then we'd have to keep track of the block groups
2202                  * that fail this way so we know we _have_ to reset this cache
2203                  * before the next commit or risk reading stale cache.  So to
2204                  * limit our exposure to horrible edge cases lets just abort the
2205                  * transaction, this only happens in really bad situations
2206                  * anyway.
2207                  */
2208                 btrfs_abort_transaction(trans, ret);
2209                 goto out_put;
2210         }
2211         WARN_ON(ret);
2212
2213         /* We've already setup this transaction, go ahead and exit */
2214         if (block_group->cache_generation == trans->transid &&
2215             i_size_read(inode)) {
2216                 dcs = BTRFS_DC_SETUP;
2217                 goto out_put;
2218         }
2219
2220         if (i_size_read(inode) > 0) {
2221                 ret = btrfs_check_trunc_cache_free_space(fs_info,
2222                                         &fs_info->global_block_rsv);
2223                 if (ret)
2224                         goto out_put;
2225
2226                 ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2227                 if (ret)
2228                         goto out_put;
2229         }
2230
2231         spin_lock(&block_group->lock);
2232         if (block_group->cached != BTRFS_CACHE_FINISHED ||
2233             !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2234                 /*
2235                  * don't bother trying to write stuff out _if_
2236                  * a) we're not cached,
2237                  * b) we're with nospace_cache mount option,
2238                  * c) we're with v2 space_cache (FREE_SPACE_TREE).
2239                  */
2240                 dcs = BTRFS_DC_WRITTEN;
2241                 spin_unlock(&block_group->lock);
2242                 goto out_put;
2243         }
2244         spin_unlock(&block_group->lock);
2245
2246         /*
2247          * We hit an ENOSPC when setting up the cache in this transaction, just
2248          * skip doing the setup, we've already cleared the cache so we're safe.
2249          */
2250         if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2251                 ret = -ENOSPC;
2252                 goto out_put;
2253         }
2254
2255         /*
2256          * Try to preallocate enough space based on how big the block group is.
2257          * Keep in mind this has to include any pinned space which could end up
2258          * taking up quite a bit since it's not folded into the other space
2259          * cache.
2260          */
2261         num_pages = div_u64(block_group->key.offset, SZ_256M);
2262         if (!num_pages)
2263                 num_pages = 1;
2264
2265         num_pages *= 16;
2266         num_pages *= PAGE_SIZE;
2267
2268         ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
2269         if (ret)
2270                 goto out_put;
2271
2272         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2273                                               num_pages, num_pages,
2274                                               &alloc_hint);
2275         /*
2276          * Our cache requires contiguous chunks so that we don't modify a bunch
2277          * of metadata or split extents when writing the cache out, which means
2278          * we can enospc if we are heavily fragmented in addition to just normal
2279          * out of space conditions.  So if we hit this just skip setting up any
2280          * other block groups for this transaction, maybe we'll unpin enough
2281          * space the next time around.
2282          */
2283         if (!ret)
2284                 dcs = BTRFS_DC_SETUP;
2285         else if (ret == -ENOSPC)
2286                 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2287
2288 out_put:
2289         iput(inode);
2290 out_free:
2291         btrfs_release_path(path);
2292 out:
2293         spin_lock(&block_group->lock);
2294         if (!ret && dcs == BTRFS_DC_SETUP)
2295                 block_group->cache_generation = trans->transid;
2296         block_group->disk_cache_state = dcs;
2297         spin_unlock(&block_group->lock);
2298
2299         extent_changeset_free(data_reserved);
2300         return ret;
2301 }
2302
2303 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
2304 {
2305         struct btrfs_fs_info *fs_info = trans->fs_info;
2306         struct btrfs_block_group_cache *cache, *tmp;
2307         struct btrfs_transaction *cur_trans = trans->transaction;
2308         struct btrfs_path *path;
2309
2310         if (list_empty(&cur_trans->dirty_bgs) ||
2311             !btrfs_test_opt(fs_info, SPACE_CACHE))
2312                 return 0;
2313
2314         path = btrfs_alloc_path();
2315         if (!path)
2316                 return -ENOMEM;
2317
2318         /* Could add new block groups, use _safe just in case */
2319         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
2320                                  dirty_list) {
2321                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2322                         cache_save_setup(cache, trans, path);
2323         }
2324
2325         btrfs_free_path(path);
2326         return 0;
2327 }
2328
2329 /*
2330  * Transaction commit does final block group cache writeback during a critical
2331  * section where nothing is allowed to change the FS.  This is required in
2332  * order for the cache to actually match the block group, but can introduce a
2333  * lot of latency into the commit.
2334  *
2335  * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2336  * There's a chance we'll have to redo some of it if the block group changes
2337  * again during the commit, but it greatly reduces the commit latency by
2338  * getting rid of the easy block groups while we're still allowing others to
2339  * join the commit.
2340  */
2341 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
2342 {
2343         struct btrfs_fs_info *fs_info = trans->fs_info;
2344         struct btrfs_block_group_cache *cache;
2345         struct btrfs_transaction *cur_trans = trans->transaction;
2346         int ret = 0;
2347         int should_put;
2348         struct btrfs_path *path = NULL;
2349         LIST_HEAD(dirty);
2350         struct list_head *io = &cur_trans->io_bgs;
2351         int num_started = 0;
2352         int loops = 0;
2353
2354         spin_lock(&cur_trans->dirty_bgs_lock);
2355         if (list_empty(&cur_trans->dirty_bgs)) {
2356                 spin_unlock(&cur_trans->dirty_bgs_lock);
2357                 return 0;
2358         }
2359         list_splice_init(&cur_trans->dirty_bgs, &dirty);
2360         spin_unlock(&cur_trans->dirty_bgs_lock);
2361
2362 again:
2363         /* Make sure all the block groups on our dirty list actually exist */
2364         btrfs_create_pending_block_groups(trans);
2365
2366         if (!path) {
2367                 path = btrfs_alloc_path();
2368                 if (!path)
2369                         return -ENOMEM;
2370         }
2371
2372         /*
2373          * cache_write_mutex is here only to save us from balance or automatic
2374          * removal of empty block groups deleting this block group while we are
2375          * writing out the cache
2376          */
2377         mutex_lock(&trans->transaction->cache_write_mutex);
2378         while (!list_empty(&dirty)) {
2379                 bool drop_reserve = true;
2380
2381                 cache = list_first_entry(&dirty,
2382                                          struct btrfs_block_group_cache,
2383                                          dirty_list);
2384                 /*
2385                  * This can happen if something re-dirties a block group that
2386                  * is already under IO.  Just wait for it to finish and then do
2387                  * it all again
2388                  */
2389                 if (!list_empty(&cache->io_list)) {
2390                         list_del_init(&cache->io_list);
2391                         btrfs_wait_cache_io(trans, cache, path);
2392                         btrfs_put_block_group(cache);
2393                 }
2394
2395
2396                 /*
2397                  * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2398                  * it should update the cache_state.  Don't delete until after
2399                  * we wait.
2400                  *
2401                  * Since we're not running in the commit critical section
2402                  * we need the dirty_bgs_lock to protect from update_block_group
2403                  */
2404                 spin_lock(&cur_trans->dirty_bgs_lock);
2405                 list_del_init(&cache->dirty_list);
2406                 spin_unlock(&cur_trans->dirty_bgs_lock);
2407
2408                 should_put = 1;
2409
2410                 cache_save_setup(cache, trans, path);
2411
2412                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
2413                         cache->io_ctl.inode = NULL;
2414                         ret = btrfs_write_out_cache(trans, cache, path);
2415                         if (ret == 0 && cache->io_ctl.inode) {
2416                                 num_started++;
2417                                 should_put = 0;
2418
2419                                 /*
2420                                  * The cache_write_mutex is protecting the
2421                                  * io_list, also refer to the definition of
2422                                  * btrfs_transaction::io_bgs for more details
2423                                  */
2424                                 list_add_tail(&cache->io_list, io);
2425                         } else {
2426                                 /*
2427                                  * If we failed to write the cache, the
2428                                  * generation will be bad and life goes on
2429                                  */
2430                                 ret = 0;
2431                         }
2432                 }
2433                 if (!ret) {
2434                         ret = write_one_cache_group(trans, path, cache);
2435                         /*
2436                          * Our block group might still be attached to the list
2437                          * of new block groups in the transaction handle of some
2438                          * other task (struct btrfs_trans_handle->new_bgs). This
2439                          * means its block group item isn't yet in the extent
2440                          * tree. If this happens ignore the error, as we will
2441                          * try again later in the critical section of the
2442                          * transaction commit.
2443                          */
2444                         if (ret == -ENOENT) {
2445                                 ret = 0;
2446                                 spin_lock(&cur_trans->dirty_bgs_lock);
2447                                 if (list_empty(&cache->dirty_list)) {
2448                                         list_add_tail(&cache->dirty_list,
2449                                                       &cur_trans->dirty_bgs);
2450                                         btrfs_get_block_group(cache);
2451                                         drop_reserve = false;
2452                                 }
2453                                 spin_unlock(&cur_trans->dirty_bgs_lock);
2454                         } else if (ret) {
2455                                 btrfs_abort_transaction(trans, ret);
2456                         }
2457                 }
2458
2459                 /* If it's not on the io list, we need to put the block group */
2460                 if (should_put)
2461                         btrfs_put_block_group(cache);
2462                 if (drop_reserve)
2463                         btrfs_delayed_refs_rsv_release(fs_info, 1);
2464
2465                 if (ret)
2466                         break;
2467
2468                 /*
2469                  * Avoid blocking other tasks for too long. It might even save
2470                  * us from writing caches for block groups that are going to be
2471                  * removed.
2472                  */
2473                 mutex_unlock(&trans->transaction->cache_write_mutex);
2474                 mutex_lock(&trans->transaction->cache_write_mutex);
2475         }
2476         mutex_unlock(&trans->transaction->cache_write_mutex);
2477
2478         /*
2479          * Go through delayed refs for all the stuff we've just kicked off
2480          * and then loop back (just once)
2481          */
2482         ret = btrfs_run_delayed_refs(trans, 0);
2483         if (!ret && loops == 0) {
2484                 loops++;
2485                 spin_lock(&cur_trans->dirty_bgs_lock);
2486                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
2487                 /*
2488                  * dirty_bgs_lock protects us from concurrent block group
2489                  * deletes too (not just cache_write_mutex).
2490                  */
2491                 if (!list_empty(&dirty)) {
2492                         spin_unlock(&cur_trans->dirty_bgs_lock);
2493                         goto again;
2494                 }
2495                 spin_unlock(&cur_trans->dirty_bgs_lock);
2496         } else if (ret < 0) {
2497                 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
2498         }
2499
2500         btrfs_free_path(path);
2501         return ret;
2502 }
2503
2504 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
2505 {
2506         struct btrfs_fs_info *fs_info = trans->fs_info;
2507         struct btrfs_block_group_cache *cache;
2508         struct btrfs_transaction *cur_trans = trans->transaction;
2509         int ret = 0;
2510         int should_put;
2511         struct btrfs_path *path;
2512         struct list_head *io = &cur_trans->io_bgs;
2513         int num_started = 0;
2514
2515         path = btrfs_alloc_path();
2516         if (!path)
2517                 return -ENOMEM;
2518
2519         /*
2520          * Even though we are in the critical section of the transaction commit,
2521          * we can still have concurrent tasks adding elements to this
2522          * transaction's list of dirty block groups. These tasks correspond to
2523          * endio free space workers started when writeback finishes for a
2524          * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2525          * allocate new block groups as a result of COWing nodes of the root
2526          * tree when updating the free space inode. The writeback for the space
2527          * caches is triggered by an earlier call to
2528          * btrfs_start_dirty_block_groups() and iterations of the following
2529          * loop.
2530          * Also we want to do the cache_save_setup first and then run the
2531          * delayed refs to make sure we have the best chance at doing this all
2532          * in one shot.
2533          */
2534         spin_lock(&cur_trans->dirty_bgs_lock);
2535         while (!list_empty(&cur_trans->dirty_bgs)) {
2536                 cache = list_first_entry(&cur_trans->dirty_bgs,
2537                                          struct btrfs_block_group_cache,
2538                                          dirty_list);
2539
2540                 /*
2541                  * This can happen if cache_save_setup re-dirties a block group
2542                  * that is already under IO.  Just wait for it to finish and
2543                  * then do it all again
2544                  */
2545                 if (!list_empty(&cache->io_list)) {
2546                         spin_unlock(&cur_trans->dirty_bgs_lock);
2547                         list_del_init(&cache->io_list);
2548                         btrfs_wait_cache_io(trans, cache, path);
2549                         btrfs_put_block_group(cache);
2550                         spin_lock(&cur_trans->dirty_bgs_lock);
2551                 }
2552
2553                 /*
2554                  * Don't remove from the dirty list until after we've waited on
2555                  * any pending IO
2556                  */
2557                 list_del_init(&cache->dirty_list);
2558                 spin_unlock(&cur_trans->dirty_bgs_lock);
2559                 should_put = 1;
2560
2561                 cache_save_setup(cache, trans, path);
2562
2563                 if (!ret)
2564                         ret = btrfs_run_delayed_refs(trans,
2565                                                      (unsigned long) -1);
2566
2567                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
2568                         cache->io_ctl.inode = NULL;
2569                         ret = btrfs_write_out_cache(trans, cache, path);
2570                         if (ret == 0 && cache->io_ctl.inode) {
2571                                 num_started++;
2572                                 should_put = 0;
2573                                 list_add_tail(&cache->io_list, io);
2574                         } else {
2575                                 /*
2576                                  * If we failed to write the cache, the
2577                                  * generation will be bad and life goes on
2578                                  */
2579                                 ret = 0;
2580                         }
2581                 }
2582                 if (!ret) {
2583                         ret = write_one_cache_group(trans, path, cache);
2584                         /*
2585                          * One of the free space endio workers might have
2586                          * created a new block group while updating a free space
2587                          * cache's inode (at inode.c:btrfs_finish_ordered_io())
2588                          * and hasn't released its transaction handle yet, in
2589                          * which case the new block group is still attached to
2590                          * its transaction handle and its creation has not
2591                          * finished yet (no block group item in the extent tree
2592                          * yet, etc). If this is the case, wait for all free
2593                          * space endio workers to finish and retry. This is a
2594                          * a very rare case so no need for a more efficient and
2595                          * complex approach.
2596                          */
2597                         if (ret == -ENOENT) {
2598                                 wait_event(cur_trans->writer_wait,
2599                                    atomic_read(&cur_trans->num_writers) == 1);
2600                                 ret = write_one_cache_group(trans, path, cache);
2601                         }
2602                         if (ret)
2603                                 btrfs_abort_transaction(trans, ret);
2604                 }
2605
2606                 /* If its not on the io list, we need to put the block group */
2607                 if (should_put)
2608                         btrfs_put_block_group(cache);
2609                 btrfs_delayed_refs_rsv_release(fs_info, 1);
2610                 spin_lock(&cur_trans->dirty_bgs_lock);
2611         }
2612         spin_unlock(&cur_trans->dirty_bgs_lock);
2613
2614         /*
2615          * Refer to the definition of io_bgs member for details why it's safe
2616          * to use it without any locking
2617          */
2618         while (!list_empty(io)) {
2619                 cache = list_first_entry(io, struct btrfs_block_group_cache,
2620                                          io_list);
2621                 list_del_init(&cache->io_list);
2622                 btrfs_wait_cache_io(trans, cache, path);
2623                 btrfs_put_block_group(cache);
2624         }
2625
2626         btrfs_free_path(path);
2627         return ret;
2628 }
2629
2630 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
2631                              u64 bytenr, u64 num_bytes, int alloc)
2632 {
2633         struct btrfs_fs_info *info = trans->fs_info;
2634         struct btrfs_block_group_cache *cache = NULL;
2635         u64 total = num_bytes;
2636         u64 old_val;
2637         u64 byte_in_group;
2638         int factor;
2639         int ret = 0;
2640
2641         /* Block accounting for super block */
2642         spin_lock(&info->delalloc_root_lock);
2643         old_val = btrfs_super_bytes_used(info->super_copy);
2644         if (alloc)
2645                 old_val += num_bytes;
2646         else
2647                 old_val -= num_bytes;
2648         btrfs_set_super_bytes_used(info->super_copy, old_val);
2649         spin_unlock(&info->delalloc_root_lock);
2650
2651         while (total) {
2652                 cache = btrfs_lookup_block_group(info, bytenr);
2653                 if (!cache) {
2654                         ret = -ENOENT;
2655                         break;
2656                 }
2657                 factor = btrfs_bg_type_to_factor(cache->flags);
2658
2659                 /*
2660                  * If this block group has free space cache written out, we
2661                  * need to make sure to load it if we are removing space.  This
2662                  * is because we need the unpinning stage to actually add the
2663                  * space back to the block group, otherwise we will leak space.
2664                  */
2665                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
2666                         btrfs_cache_block_group(cache, 1);
2667
2668                 byte_in_group = bytenr - cache->key.objectid;
2669                 WARN_ON(byte_in_group > cache->key.offset);
2670
2671                 spin_lock(&cache->space_info->lock);
2672                 spin_lock(&cache->lock);
2673
2674                 if (btrfs_test_opt(info, SPACE_CACHE) &&
2675                     cache->disk_cache_state < BTRFS_DC_CLEAR)
2676                         cache->disk_cache_state = BTRFS_DC_CLEAR;
2677
2678                 old_val = btrfs_block_group_used(&cache->item);
2679                 num_bytes = min(total, cache->key.offset - byte_in_group);
2680                 if (alloc) {
2681                         old_val += num_bytes;
2682                         btrfs_set_block_group_used(&cache->item, old_val);
2683                         cache->reserved -= num_bytes;
2684                         cache->space_info->bytes_reserved -= num_bytes;
2685                         cache->space_info->bytes_used += num_bytes;
2686                         cache->space_info->disk_used += num_bytes * factor;
2687                         spin_unlock(&cache->lock);
2688                         spin_unlock(&cache->space_info->lock);
2689                 } else {
2690                         old_val -= num_bytes;
2691                         btrfs_set_block_group_used(&cache->item, old_val);
2692                         cache->pinned += num_bytes;
2693                         btrfs_space_info_update_bytes_pinned(info,
2694                                         cache->space_info, num_bytes);
2695                         cache->space_info->bytes_used -= num_bytes;
2696                         cache->space_info->disk_used -= num_bytes * factor;
2697                         spin_unlock(&cache->lock);
2698                         spin_unlock(&cache->space_info->lock);
2699
2700                         percpu_counter_add_batch(
2701                                         &cache->space_info->total_bytes_pinned,
2702                                         num_bytes,
2703                                         BTRFS_TOTAL_BYTES_PINNED_BATCH);
2704                         set_extent_dirty(info->pinned_extents,
2705                                          bytenr, bytenr + num_bytes - 1,
2706                                          GFP_NOFS | __GFP_NOFAIL);
2707                 }
2708
2709                 spin_lock(&trans->transaction->dirty_bgs_lock);
2710                 if (list_empty(&cache->dirty_list)) {
2711                         list_add_tail(&cache->dirty_list,
2712                                       &trans->transaction->dirty_bgs);
2713                         trans->delayed_ref_updates++;
2714                         btrfs_get_block_group(cache);
2715                 }
2716                 spin_unlock(&trans->transaction->dirty_bgs_lock);
2717
2718                 /*
2719                  * No longer have used bytes in this block group, queue it for
2720                  * deletion. We do this after adding the block group to the
2721                  * dirty list to avoid races between cleaner kthread and space
2722                  * cache writeout.
2723                  */
2724                 if (!alloc && old_val == 0)
2725                         btrfs_mark_bg_unused(cache);
2726
2727                 btrfs_put_block_group(cache);
2728                 total -= num_bytes;
2729                 bytenr += num_bytes;
2730         }
2731
2732         /* Modified block groups are accounted for in the delayed_refs_rsv. */
2733         btrfs_update_delayed_refs_rsv(trans);
2734         return ret;
2735 }
2736
2737 /**
2738  * btrfs_add_reserved_bytes - update the block_group and space info counters
2739  * @cache:      The cache we are manipulating
2740  * @ram_bytes:  The number of bytes of file content, and will be same to
2741  *              @num_bytes except for the compress path.
2742  * @num_bytes:  The number of bytes in question
2743  * @delalloc:   The blocks are allocated for the delalloc write
2744  *
2745  * This is called by the allocator when it reserves space. If this is a
2746  * reservation and the block group has become read only we cannot make the
2747  * reservation and return -EAGAIN, otherwise this function always succeeds.
2748  */
2749 int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
2750                              u64 ram_bytes, u64 num_bytes, int delalloc)
2751 {
2752         struct btrfs_space_info *space_info = cache->space_info;
2753         int ret = 0;
2754
2755         spin_lock(&space_info->lock);
2756         spin_lock(&cache->lock);
2757         if (cache->ro) {
2758                 ret = -EAGAIN;
2759         } else {
2760                 cache->reserved += num_bytes;
2761                 space_info->bytes_reserved += num_bytes;
2762                 trace_btrfs_space_reservation(cache->fs_info, "space_info",
2763                                               space_info->flags, num_bytes, 1);
2764                 btrfs_space_info_update_bytes_may_use(cache->fs_info,
2765                                                       space_info, -ram_bytes);
2766                 if (delalloc)
2767                         cache->delalloc_bytes += num_bytes;
2768         }
2769         spin_unlock(&cache->lock);
2770         spin_unlock(&space_info->lock);
2771         return ret;
2772 }
2773
2774 /**
2775  * btrfs_free_reserved_bytes - update the block_group and space info counters
2776  * @cache:      The cache we are manipulating
2777  * @num_bytes:  The number of bytes in question
2778  * @delalloc:   The blocks are allocated for the delalloc write
2779  *
2780  * This is called by somebody who is freeing space that was never actually used
2781  * on disk.  For example if you reserve some space for a new leaf in transaction
2782  * A and before transaction A commits you free that leaf, you call this with
2783  * reserve set to 0 in order to clear the reservation.
2784  */
2785 void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
2786                                u64 num_bytes, int delalloc)
2787 {
2788         struct btrfs_space_info *space_info = cache->space_info;
2789
2790         spin_lock(&space_info->lock);
2791         spin_lock(&cache->lock);
2792         if (cache->ro)
2793                 space_info->bytes_readonly += num_bytes;
2794         cache->reserved -= num_bytes;
2795         space_info->bytes_reserved -= num_bytes;
2796         space_info->max_extent_size = 0;
2797
2798         if (delalloc)
2799                 cache->delalloc_bytes -= num_bytes;
2800         spin_unlock(&cache->lock);
2801         spin_unlock(&space_info->lock);
2802 }
2803
2804 static void force_metadata_allocation(struct btrfs_fs_info *info)
2805 {
2806         struct list_head *head = &info->space_info;
2807         struct btrfs_space_info *found;
2808
2809         rcu_read_lock();
2810         list_for_each_entry_rcu(found, head, list) {
2811                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2812                         found->force_alloc = CHUNK_ALLOC_FORCE;
2813         }
2814         rcu_read_unlock();
2815 }
2816
2817 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
2818                               struct btrfs_space_info *sinfo, int force)
2819 {
2820         u64 bytes_used = btrfs_space_info_used(sinfo, false);
2821         u64 thresh;
2822
2823         if (force == CHUNK_ALLOC_FORCE)
2824                 return 1;
2825
2826         /*
2827          * in limited mode, we want to have some free space up to
2828          * about 1% of the FS size.
2829          */
2830         if (force == CHUNK_ALLOC_LIMITED) {
2831                 thresh = btrfs_super_total_bytes(fs_info->super_copy);
2832                 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
2833
2834                 if (sinfo->total_bytes - bytes_used < thresh)
2835                         return 1;
2836         }
2837
2838         if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
2839                 return 0;
2840         return 1;
2841 }
2842
2843 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
2844 {
2845         u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
2846
2847         return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2848 }
2849
2850 /*
2851  * If force is CHUNK_ALLOC_FORCE:
2852  *    - return 1 if it successfully allocates a chunk,
2853  *    - return errors including -ENOSPC otherwise.
2854  * If force is NOT CHUNK_ALLOC_FORCE:
2855  *    - return 0 if it doesn't need to allocate a new chunk,
2856  *    - return 1 if it successfully allocates a chunk,
2857  *    - return errors including -ENOSPC otherwise.
2858  */
2859 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
2860                       enum btrfs_chunk_alloc_enum force)
2861 {
2862         struct btrfs_fs_info *fs_info = trans->fs_info;
2863         struct btrfs_space_info *space_info;
2864         bool wait_for_alloc = false;
2865         bool should_alloc = false;
2866         int ret = 0;
2867
2868         /* Don't re-enter if we're already allocating a chunk */
2869         if (trans->allocating_chunk)
2870                 return -ENOSPC;
2871
2872         space_info = btrfs_find_space_info(fs_info, flags);
2873         ASSERT(space_info);
2874
2875         do {
2876                 spin_lock(&space_info->lock);
2877                 if (force < space_info->force_alloc)
2878                         force = space_info->force_alloc;
2879                 should_alloc = should_alloc_chunk(fs_info, space_info, force);
2880                 if (space_info->full) {
2881                         /* No more free physical space */
2882                         if (should_alloc)
2883                                 ret = -ENOSPC;
2884                         else
2885                                 ret = 0;
2886                         spin_unlock(&space_info->lock);
2887                         return ret;
2888                 } else if (!should_alloc) {
2889                         spin_unlock(&space_info->lock);
2890                         return 0;
2891                 } else if (space_info->chunk_alloc) {
2892                         /*
2893                          * Someone is already allocating, so we need to block
2894                          * until this someone is finished and then loop to
2895                          * recheck if we should continue with our allocation
2896                          * attempt.
2897                          */
2898                         wait_for_alloc = true;
2899                         spin_unlock(&space_info->lock);
2900                         mutex_lock(&fs_info->chunk_mutex);
2901                         mutex_unlock(&fs_info->chunk_mutex);
2902                 } else {
2903                         /* Proceed with allocation */
2904                         space_info->chunk_alloc = 1;
2905                         wait_for_alloc = false;
2906                         spin_unlock(&space_info->lock);
2907                 }
2908
2909                 cond_resched();
2910         } while (wait_for_alloc);
2911
2912         mutex_lock(&fs_info->chunk_mutex);
2913         trans->allocating_chunk = true;
2914
2915         /*
2916          * If we have mixed data/metadata chunks we want to make sure we keep
2917          * allocating mixed chunks instead of individual chunks.
2918          */
2919         if (btrfs_mixed_space_info(space_info))
2920                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
2921
2922         /*
2923          * if we're doing a data chunk, go ahead and make sure that
2924          * we keep a reasonable number of metadata chunks allocated in the
2925          * FS as well.
2926          */
2927         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
2928                 fs_info->data_chunk_allocations++;
2929                 if (!(fs_info->data_chunk_allocations %
2930                       fs_info->metadata_ratio))
2931                         force_metadata_allocation(fs_info);
2932         }
2933
2934         /*
2935          * Check if we have enough space in SYSTEM chunk because we may need
2936          * to update devices.
2937          */
2938         check_system_chunk(trans, flags);
2939
2940         ret = btrfs_alloc_chunk(trans, flags);
2941         trans->allocating_chunk = false;
2942
2943         spin_lock(&space_info->lock);
2944         if (ret < 0) {
2945                 if (ret == -ENOSPC)
2946                         space_info->full = 1;
2947                 else
2948                         goto out;
2949         } else {
2950                 ret = 1;
2951                 space_info->max_extent_size = 0;
2952         }
2953
2954         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
2955 out:
2956         space_info->chunk_alloc = 0;
2957         spin_unlock(&space_info->lock);
2958         mutex_unlock(&fs_info->chunk_mutex);
2959         /*
2960          * When we allocate a new chunk we reserve space in the chunk block
2961          * reserve to make sure we can COW nodes/leafs in the chunk tree or
2962          * add new nodes/leafs to it if we end up needing to do it when
2963          * inserting the chunk item and updating device items as part of the
2964          * second phase of chunk allocation, performed by
2965          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
2966          * large number of new block groups to create in our transaction
2967          * handle's new_bgs list to avoid exhausting the chunk block reserve
2968          * in extreme cases - like having a single transaction create many new
2969          * block groups when starting to write out the free space caches of all
2970          * the block groups that were made dirty during the lifetime of the
2971          * transaction.
2972          */
2973         if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
2974                 btrfs_create_pending_block_groups(trans);
2975
2976         return ret;
2977 }
2978
2979 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
2980 {
2981         u64 num_dev;
2982
2983         num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
2984         if (!num_dev)
2985                 num_dev = fs_info->fs_devices->rw_devices;
2986
2987         return num_dev;
2988 }
2989
2990 /*
2991  * If @is_allocation is true, reserve space in the system space info necessary
2992  * for allocating a chunk, otherwise if it's false, reserve space necessary for
2993  * removing a chunk.
2994  */
2995 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
2996 {
2997         struct btrfs_fs_info *fs_info = trans->fs_info;
2998         struct btrfs_space_info *info;
2999         u64 left;
3000         u64 thresh;
3001         int ret = 0;
3002         u64 num_devs;
3003
3004         /*
3005          * Needed because we can end up allocating a system chunk and for an
3006          * atomic and race free space reservation in the chunk block reserve.
3007          */
3008         lockdep_assert_held(&fs_info->chunk_mutex);
3009
3010         info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3011         spin_lock(&info->lock);
3012         left = info->total_bytes - btrfs_space_info_used(info, true);
3013         spin_unlock(&info->lock);
3014
3015         num_devs = get_profile_num_devs(fs_info, type);
3016
3017         /* num_devs device items to update and 1 chunk item to add or remove */
3018         thresh = btrfs_calc_metadata_size(fs_info, num_devs) +
3019                 btrfs_calc_insert_metadata_size(fs_info, 1);
3020
3021         if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3022                 btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3023                            left, thresh, type);
3024                 btrfs_dump_space_info(fs_info, info, 0, 0);
3025         }
3026
3027         if (left < thresh) {
3028                 u64 flags = btrfs_system_alloc_profile(fs_info);
3029
3030                 /*
3031                  * Ignore failure to create system chunk. We might end up not
3032                  * needing it, as we might not need to COW all nodes/leafs from
3033                  * the paths we visit in the chunk tree (they were already COWed
3034                  * or created in the current transaction for example).
3035                  */
3036                 ret = btrfs_alloc_chunk(trans, flags);
3037         }
3038
3039         if (!ret) {
3040                 ret = btrfs_block_rsv_add(fs_info->chunk_root,
3041                                           &fs_info->chunk_block_rsv,
3042                                           thresh, BTRFS_RESERVE_NO_FLUSH);
3043                 if (!ret)
3044                         trans->chunk_bytes_reserved += thresh;
3045         }
3046 }
3047
3048 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
3049 {
3050         struct btrfs_block_group_cache *block_group;
3051         u64 last = 0;
3052
3053         while (1) {
3054                 struct inode *inode;
3055
3056                 block_group = btrfs_lookup_first_block_group(info, last);
3057                 while (block_group) {
3058                         btrfs_wait_block_group_cache_done(block_group);
3059                         spin_lock(&block_group->lock);
3060                         if (block_group->iref)
3061                                 break;
3062                         spin_unlock(&block_group->lock);
3063                         block_group = btrfs_next_block_group(block_group);
3064                 }
3065                 if (!block_group) {
3066                         if (last == 0)
3067                                 break;
3068                         last = 0;
3069                         continue;
3070                 }
3071
3072                 inode = block_group->inode;
3073                 block_group->iref = 0;
3074                 block_group->inode = NULL;
3075                 spin_unlock(&block_group->lock);
3076                 ASSERT(block_group->io_ctl.inode == NULL);
3077                 iput(inode);
3078                 last = block_group->key.objectid + block_group->key.offset;
3079                 btrfs_put_block_group(block_group);
3080         }
3081 }
3082
3083 /*
3084  * Must be called only after stopping all workers, since we could have block
3085  * group caching kthreads running, and therefore they could race with us if we
3086  * freed the block groups before stopping them.
3087  */
3088 int btrfs_free_block_groups(struct btrfs_fs_info *info)
3089 {
3090         struct btrfs_block_group_cache *block_group;
3091         struct btrfs_space_info *space_info;
3092         struct btrfs_caching_control *caching_ctl;
3093         struct rb_node *n;
3094
3095         down_write(&info->commit_root_sem);
3096         while (!list_empty(&info->caching_block_groups)) {
3097                 caching_ctl = list_entry(info->caching_block_groups.next,
3098                                          struct btrfs_caching_control, list);
3099                 list_del(&caching_ctl->list);
3100                 btrfs_put_caching_control(caching_ctl);
3101         }
3102         up_write(&info->commit_root_sem);
3103
3104         spin_lock(&info->unused_bgs_lock);
3105         while (!list_empty(&info->unused_bgs)) {
3106                 block_group = list_first_entry(&info->unused_bgs,
3107                                                struct btrfs_block_group_cache,
3108                                                bg_list);
3109                 list_del_init(&block_group->bg_list);
3110                 btrfs_put_block_group(block_group);
3111         }
3112         spin_unlock(&info->unused_bgs_lock);
3113
3114         spin_lock(&info->block_group_cache_lock);
3115         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
3116                 block_group = rb_entry(n, struct btrfs_block_group_cache,
3117                                        cache_node);
3118                 rb_erase(&block_group->cache_node,
3119                          &info->block_group_cache_tree);
3120                 RB_CLEAR_NODE(&block_group->cache_node);
3121                 spin_unlock(&info->block_group_cache_lock);
3122
3123                 down_write(&block_group->space_info->groups_sem);
3124                 list_del(&block_group->list);
3125                 up_write(&block_group->space_info->groups_sem);
3126
3127                 /*
3128                  * We haven't cached this block group, which means we could
3129                  * possibly have excluded extents on this block group.
3130                  */
3131                 if (block_group->cached == BTRFS_CACHE_NO ||
3132                     block_group->cached == BTRFS_CACHE_ERROR)
3133                         btrfs_free_excluded_extents(block_group);
3134
3135                 btrfs_remove_free_space_cache(block_group);
3136                 ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
3137                 ASSERT(list_empty(&block_group->dirty_list));
3138                 ASSERT(list_empty(&block_group->io_list));
3139                 ASSERT(list_empty(&block_group->bg_list));
3140                 ASSERT(atomic_read(&block_group->count) == 1);
3141                 btrfs_put_block_group(block_group);
3142
3143                 spin_lock(&info->block_group_cache_lock);
3144         }
3145         spin_unlock(&info->block_group_cache_lock);
3146
3147         /*
3148          * Now that all the block groups are freed, go through and free all the
3149          * space_info structs.  This is only called during the final stages of
3150          * unmount, and so we know nobody is using them.  We call
3151          * synchronize_rcu() once before we start, just to be on the safe side.
3152          */
3153         synchronize_rcu();
3154
3155         btrfs_release_global_block_rsv(info);
3156
3157         while (!list_empty(&info->space_info)) {
3158                 space_info = list_entry(info->space_info.next,
3159                                         struct btrfs_space_info,
3160                                         list);
3161
3162                 /*
3163                  * Do not hide this behind enospc_debug, this is actually
3164                  * important and indicates a real bug if this happens.
3165                  */
3166                 if (WARN_ON(space_info->bytes_pinned > 0 ||
3167                             space_info->bytes_reserved > 0 ||
3168                             space_info->bytes_may_use > 0))
3169                         btrfs_dump_space_info(info, space_info, 0, 0);
3170                 list_del(&space_info->list);
3171                 btrfs_sysfs_remove_space_info(space_info);
3172         }
3173         return 0;
3174 }