1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "delayed-inode.h"
11 #include "transaction.h"
15 #define BTRFS_DELAYED_WRITEBACK 512
16 #define BTRFS_DELAYED_BACKGROUND 128
17 #define BTRFS_DELAYED_BATCH 16
19 static struct kmem_cache *delayed_node_cache;
21 int __init btrfs_delayed_inode_init(void)
23 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
24 sizeof(struct btrfs_delayed_node),
28 if (!delayed_node_cache)
33 void __cold btrfs_delayed_inode_exit(void)
35 kmem_cache_destroy(delayed_node_cache);
38 static inline void btrfs_init_delayed_node(
39 struct btrfs_delayed_node *delayed_node,
40 struct btrfs_root *root, u64 inode_id)
42 delayed_node->root = root;
43 delayed_node->inode_id = inode_id;
44 refcount_set(&delayed_node->refs, 0);
45 delayed_node->ins_root = RB_ROOT_CACHED;
46 delayed_node->del_root = RB_ROOT_CACHED;
47 mutex_init(&delayed_node->mutex);
48 INIT_LIST_HEAD(&delayed_node->n_list);
49 INIT_LIST_HEAD(&delayed_node->p_list);
52 static inline int btrfs_is_continuous_delayed_item(
53 struct btrfs_delayed_item *item1,
54 struct btrfs_delayed_item *item2)
56 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
57 item1->key.objectid == item2->key.objectid &&
58 item1->key.type == item2->key.type &&
59 item1->key.offset + 1 == item2->key.offset)
64 static struct btrfs_delayed_node *btrfs_get_delayed_node(
65 struct btrfs_inode *btrfs_inode)
67 struct btrfs_root *root = btrfs_inode->root;
68 u64 ino = btrfs_ino(btrfs_inode);
69 struct btrfs_delayed_node *node;
71 node = READ_ONCE(btrfs_inode->delayed_node);
73 refcount_inc(&node->refs);
77 spin_lock(&root->inode_lock);
78 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
81 if (btrfs_inode->delayed_node) {
82 refcount_inc(&node->refs); /* can be accessed */
83 BUG_ON(btrfs_inode->delayed_node != node);
84 spin_unlock(&root->inode_lock);
89 * It's possible that we're racing into the middle of removing
90 * this node from the radix tree. In this case, the refcount
91 * was zero and it should never go back to one. Just return
92 * NULL like it was never in the radix at all; our release
93 * function is in the process of removing it.
95 * Some implementations of refcount_inc refuse to bump the
96 * refcount once it has hit zero. If we don't do this dance
97 * here, refcount_inc() may decide to just WARN_ONCE() instead
98 * of actually bumping the refcount.
100 * If this node is properly in the radix, we want to bump the
101 * refcount twice, once for the inode and once for this get
104 if (refcount_inc_not_zero(&node->refs)) {
105 refcount_inc(&node->refs);
106 btrfs_inode->delayed_node = node;
111 spin_unlock(&root->inode_lock);
114 spin_unlock(&root->inode_lock);
119 /* Will return either the node or PTR_ERR(-ENOMEM) */
120 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
121 struct btrfs_inode *btrfs_inode)
123 struct btrfs_delayed_node *node;
124 struct btrfs_root *root = btrfs_inode->root;
125 u64 ino = btrfs_ino(btrfs_inode);
129 node = btrfs_get_delayed_node(btrfs_inode);
133 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
135 return ERR_PTR(-ENOMEM);
136 btrfs_init_delayed_node(node, root, ino);
138 /* cached in the btrfs inode and can be accessed */
139 refcount_set(&node->refs, 2);
141 ret = radix_tree_preload(GFP_NOFS);
143 kmem_cache_free(delayed_node_cache, node);
147 spin_lock(&root->inode_lock);
148 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
149 if (ret == -EEXIST) {
150 spin_unlock(&root->inode_lock);
151 kmem_cache_free(delayed_node_cache, node);
152 radix_tree_preload_end();
155 btrfs_inode->delayed_node = node;
156 spin_unlock(&root->inode_lock);
157 radix_tree_preload_end();
163 * Call it when holding delayed_node->mutex
165 * If mod = 1, add this node into the prepared list.
167 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
168 struct btrfs_delayed_node *node,
171 spin_lock(&root->lock);
172 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
173 if (!list_empty(&node->p_list))
174 list_move_tail(&node->p_list, &root->prepare_list);
176 list_add_tail(&node->p_list, &root->prepare_list);
178 list_add_tail(&node->n_list, &root->node_list);
179 list_add_tail(&node->p_list, &root->prepare_list);
180 refcount_inc(&node->refs); /* inserted into list */
182 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
184 spin_unlock(&root->lock);
187 /* Call it when holding delayed_node->mutex */
188 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
189 struct btrfs_delayed_node *node)
191 spin_lock(&root->lock);
192 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
194 refcount_dec(&node->refs); /* not in the list */
195 list_del_init(&node->n_list);
196 if (!list_empty(&node->p_list))
197 list_del_init(&node->p_list);
198 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
200 spin_unlock(&root->lock);
203 static struct btrfs_delayed_node *btrfs_first_delayed_node(
204 struct btrfs_delayed_root *delayed_root)
207 struct btrfs_delayed_node *node = NULL;
209 spin_lock(&delayed_root->lock);
210 if (list_empty(&delayed_root->node_list))
213 p = delayed_root->node_list.next;
214 node = list_entry(p, struct btrfs_delayed_node, n_list);
215 refcount_inc(&node->refs);
217 spin_unlock(&delayed_root->lock);
222 static struct btrfs_delayed_node *btrfs_next_delayed_node(
223 struct btrfs_delayed_node *node)
225 struct btrfs_delayed_root *delayed_root;
227 struct btrfs_delayed_node *next = NULL;
229 delayed_root = node->root->fs_info->delayed_root;
230 spin_lock(&delayed_root->lock);
231 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
232 /* not in the list */
233 if (list_empty(&delayed_root->node_list))
235 p = delayed_root->node_list.next;
236 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
239 p = node->n_list.next;
241 next = list_entry(p, struct btrfs_delayed_node, n_list);
242 refcount_inc(&next->refs);
244 spin_unlock(&delayed_root->lock);
249 static void __btrfs_release_delayed_node(
250 struct btrfs_delayed_node *delayed_node,
253 struct btrfs_delayed_root *delayed_root;
258 delayed_root = delayed_node->root->fs_info->delayed_root;
260 mutex_lock(&delayed_node->mutex);
261 if (delayed_node->count)
262 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
264 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
265 mutex_unlock(&delayed_node->mutex);
267 if (refcount_dec_and_test(&delayed_node->refs)) {
268 struct btrfs_root *root = delayed_node->root;
270 spin_lock(&root->inode_lock);
272 * Once our refcount goes to zero, nobody is allowed to bump it
273 * back up. We can delete it now.
275 ASSERT(refcount_read(&delayed_node->refs) == 0);
276 radix_tree_delete(&root->delayed_nodes_tree,
277 delayed_node->inode_id);
278 spin_unlock(&root->inode_lock);
279 kmem_cache_free(delayed_node_cache, delayed_node);
283 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
285 __btrfs_release_delayed_node(node, 0);
288 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
289 struct btrfs_delayed_root *delayed_root)
292 struct btrfs_delayed_node *node = NULL;
294 spin_lock(&delayed_root->lock);
295 if (list_empty(&delayed_root->prepare_list))
298 p = delayed_root->prepare_list.next;
300 node = list_entry(p, struct btrfs_delayed_node, p_list);
301 refcount_inc(&node->refs);
303 spin_unlock(&delayed_root->lock);
308 static inline void btrfs_release_prepared_delayed_node(
309 struct btrfs_delayed_node *node)
311 __btrfs_release_delayed_node(node, 1);
314 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
316 struct btrfs_delayed_item *item;
317 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
319 item->data_len = data_len;
320 item->ins_or_del = 0;
321 item->bytes_reserved = 0;
322 item->delayed_node = NULL;
323 refcount_set(&item->refs, 1);
329 * __btrfs_lookup_delayed_item - look up the delayed item by key
330 * @delayed_node: pointer to the delayed node
331 * @key: the key to look up
332 * @prev: used to store the prev item if the right item isn't found
333 * @next: used to store the next item if the right item isn't found
335 * Note: if we don't find the right item, we will return the prev item and
338 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
339 struct rb_root *root,
340 struct btrfs_key *key,
341 struct btrfs_delayed_item **prev,
342 struct btrfs_delayed_item **next)
344 struct rb_node *node, *prev_node = NULL;
345 struct btrfs_delayed_item *delayed_item = NULL;
348 node = root->rb_node;
351 delayed_item = rb_entry(node, struct btrfs_delayed_item,
354 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
356 node = node->rb_right;
358 node = node->rb_left;
367 *prev = delayed_item;
368 else if ((node = rb_prev(prev_node)) != NULL) {
369 *prev = rb_entry(node, struct btrfs_delayed_item,
379 *next = delayed_item;
380 else if ((node = rb_next(prev_node)) != NULL) {
381 *next = rb_entry(node, struct btrfs_delayed_item,
389 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
390 struct btrfs_delayed_node *delayed_node,
391 struct btrfs_key *key)
393 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
397 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
398 struct btrfs_delayed_item *ins,
401 struct rb_node **p, *node;
402 struct rb_node *parent_node = NULL;
403 struct rb_root_cached *root;
404 struct btrfs_delayed_item *item;
406 bool leftmost = true;
408 if (action == BTRFS_DELAYED_INSERTION_ITEM)
409 root = &delayed_node->ins_root;
410 else if (action == BTRFS_DELAYED_DELETION_ITEM)
411 root = &delayed_node->del_root;
414 p = &root->rb_root.rb_node;
415 node = &ins->rb_node;
419 item = rb_entry(parent_node, struct btrfs_delayed_item,
422 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
426 } else if (cmp > 0) {
433 rb_link_node(node, parent_node, p);
434 rb_insert_color_cached(node, root, leftmost);
435 ins->delayed_node = delayed_node;
436 ins->ins_or_del = action;
438 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
439 action == BTRFS_DELAYED_INSERTION_ITEM &&
440 ins->key.offset >= delayed_node->index_cnt)
441 delayed_node->index_cnt = ins->key.offset + 1;
443 delayed_node->count++;
444 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
448 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
449 struct btrfs_delayed_item *item)
451 return __btrfs_add_delayed_item(node, item,
452 BTRFS_DELAYED_INSERTION_ITEM);
455 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
456 struct btrfs_delayed_item *item)
458 return __btrfs_add_delayed_item(node, item,
459 BTRFS_DELAYED_DELETION_ITEM);
462 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
464 int seq = atomic_inc_return(&delayed_root->items_seq);
466 /* atomic_dec_return implies a barrier */
467 if ((atomic_dec_return(&delayed_root->items) <
468 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
469 cond_wake_up_nomb(&delayed_root->wait);
472 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
474 struct rb_root_cached *root;
475 struct btrfs_delayed_root *delayed_root;
477 /* Not associated with any delayed_node */
478 if (!delayed_item->delayed_node)
480 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
482 BUG_ON(!delayed_root);
483 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
484 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
486 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
487 root = &delayed_item->delayed_node->ins_root;
489 root = &delayed_item->delayed_node->del_root;
491 rb_erase_cached(&delayed_item->rb_node, root);
492 delayed_item->delayed_node->count--;
494 finish_one_item(delayed_root);
497 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
500 __btrfs_remove_delayed_item(item);
501 if (refcount_dec_and_test(&item->refs))
506 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
507 struct btrfs_delayed_node *delayed_node)
510 struct btrfs_delayed_item *item = NULL;
512 p = rb_first_cached(&delayed_node->ins_root);
514 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
519 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
520 struct btrfs_delayed_node *delayed_node)
523 struct btrfs_delayed_item *item = NULL;
525 p = rb_first_cached(&delayed_node->del_root);
527 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
532 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
533 struct btrfs_delayed_item *item)
536 struct btrfs_delayed_item *next = NULL;
538 p = rb_next(&item->rb_node);
540 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
545 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
546 struct btrfs_root *root,
547 struct btrfs_delayed_item *item)
549 struct btrfs_block_rsv *src_rsv;
550 struct btrfs_block_rsv *dst_rsv;
551 struct btrfs_fs_info *fs_info = root->fs_info;
555 if (!trans->bytes_reserved)
558 src_rsv = trans->block_rsv;
559 dst_rsv = &fs_info->delayed_block_rsv;
561 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
564 * Here we migrate space rsv from transaction rsv, since have already
565 * reserved space when starting a transaction. So no need to reserve
568 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
570 trace_btrfs_space_reservation(fs_info, "delayed_item",
573 item->bytes_reserved = num_bytes;
579 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
580 struct btrfs_delayed_item *item)
582 struct btrfs_block_rsv *rsv;
583 struct btrfs_fs_info *fs_info = root->fs_info;
585 if (!item->bytes_reserved)
588 rsv = &fs_info->delayed_block_rsv;
590 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
591 * to release/reserve qgroup space.
593 trace_btrfs_space_reservation(fs_info, "delayed_item",
594 item->key.objectid, item->bytes_reserved,
596 btrfs_block_rsv_release(fs_info, rsv,
597 item->bytes_reserved);
600 static int btrfs_delayed_inode_reserve_metadata(
601 struct btrfs_trans_handle *trans,
602 struct btrfs_root *root,
603 struct btrfs_inode *inode,
604 struct btrfs_delayed_node *node)
606 struct btrfs_fs_info *fs_info = root->fs_info;
607 struct btrfs_block_rsv *src_rsv;
608 struct btrfs_block_rsv *dst_rsv;
612 src_rsv = trans->block_rsv;
613 dst_rsv = &fs_info->delayed_block_rsv;
615 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
618 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
619 * which doesn't reserve space for speed. This is a problem since we
620 * still need to reserve space for this update, so try to reserve the
623 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
624 * we always reserve enough to update the inode item.
626 if (!src_rsv || (!trans->bytes_reserved &&
627 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
628 ret = btrfs_qgroup_reserve_meta_prealloc(root,
629 fs_info->nodesize, true);
632 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
633 BTRFS_RESERVE_NO_FLUSH);
635 * Since we're under a transaction reserve_metadata_bytes could
636 * try to commit the transaction which will make it return
637 * EAGAIN to make us stop the transaction we have, so return
638 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
640 if (ret == -EAGAIN) {
642 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
645 node->bytes_reserved = num_bytes;
646 trace_btrfs_space_reservation(fs_info,
651 btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
656 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
658 trace_btrfs_space_reservation(fs_info, "delayed_inode",
659 btrfs_ino(inode), num_bytes, 1);
660 node->bytes_reserved = num_bytes;
666 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
667 struct btrfs_delayed_node *node,
670 struct btrfs_block_rsv *rsv;
672 if (!node->bytes_reserved)
675 rsv = &fs_info->delayed_block_rsv;
676 trace_btrfs_space_reservation(fs_info, "delayed_inode",
677 node->inode_id, node->bytes_reserved, 0);
678 btrfs_block_rsv_release(fs_info, rsv,
679 node->bytes_reserved);
681 btrfs_qgroup_free_meta_prealloc(node->root,
682 node->bytes_reserved);
684 btrfs_qgroup_convert_reserved_meta(node->root,
685 node->bytes_reserved);
686 node->bytes_reserved = 0;
690 * This helper will insert some continuous items into the same leaf according
691 * to the free space of the leaf.
693 static int btrfs_batch_insert_items(struct btrfs_root *root,
694 struct btrfs_path *path,
695 struct btrfs_delayed_item *item)
697 struct btrfs_delayed_item *curr, *next;
699 int total_data_size = 0, total_size = 0;
700 struct extent_buffer *leaf;
702 struct btrfs_key *keys;
704 struct list_head head;
710 BUG_ON(!path->nodes[0]);
712 leaf = path->nodes[0];
713 free_space = btrfs_leaf_free_space(leaf);
714 INIT_LIST_HEAD(&head);
720 * count the number of the continuous items that we can insert in batch
722 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
724 total_data_size += next->data_len;
725 total_size += next->data_len + sizeof(struct btrfs_item);
726 list_add_tail(&next->tree_list, &head);
730 next = __btrfs_next_delayed_item(curr);
734 if (!btrfs_is_continuous_delayed_item(curr, next))
744 * we need allocate some memory space, but it might cause the task
745 * to sleep, so we set all locked nodes in the path to blocking locks
748 btrfs_set_path_blocking(path);
750 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
756 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
762 /* get keys of all the delayed items */
764 list_for_each_entry(next, &head, tree_list) {
766 data_size[i] = next->data_len;
770 /* insert the keys of the items */
771 setup_items_for_insert(root, path, keys, data_size,
772 total_data_size, total_size, nitems);
774 /* insert the dir index items */
775 slot = path->slots[0];
776 list_for_each_entry_safe(curr, next, &head, tree_list) {
777 data_ptr = btrfs_item_ptr(leaf, slot, char);
778 write_extent_buffer(leaf, &curr->data,
779 (unsigned long)data_ptr,
783 btrfs_delayed_item_release_metadata(root, curr);
785 list_del(&curr->tree_list);
786 btrfs_release_delayed_item(curr);
797 * This helper can just do simple insertion that needn't extend item for new
798 * data, such as directory name index insertion, inode insertion.
800 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
801 struct btrfs_root *root,
802 struct btrfs_path *path,
803 struct btrfs_delayed_item *delayed_item)
805 struct extent_buffer *leaf;
809 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
810 delayed_item->data_len);
811 if (ret < 0 && ret != -EEXIST)
814 leaf = path->nodes[0];
816 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
818 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
819 delayed_item->data_len);
820 btrfs_mark_buffer_dirty(leaf);
822 btrfs_delayed_item_release_metadata(root, delayed_item);
827 * we insert an item first, then if there are some continuous items, we try
828 * to insert those items into the same leaf.
830 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
831 struct btrfs_path *path,
832 struct btrfs_root *root,
833 struct btrfs_delayed_node *node)
835 struct btrfs_delayed_item *curr, *prev;
839 mutex_lock(&node->mutex);
840 curr = __btrfs_first_delayed_insertion_item(node);
844 ret = btrfs_insert_delayed_item(trans, root, path, curr);
846 btrfs_release_path(path);
851 curr = __btrfs_next_delayed_item(prev);
852 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
853 /* insert the continuous items into the same leaf */
855 btrfs_batch_insert_items(root, path, curr);
857 btrfs_release_delayed_item(prev);
858 btrfs_mark_buffer_dirty(path->nodes[0]);
860 btrfs_release_path(path);
861 mutex_unlock(&node->mutex);
865 mutex_unlock(&node->mutex);
869 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
870 struct btrfs_root *root,
871 struct btrfs_path *path,
872 struct btrfs_delayed_item *item)
874 struct btrfs_delayed_item *curr, *next;
875 struct extent_buffer *leaf;
876 struct btrfs_key key;
877 struct list_head head;
878 int nitems, i, last_item;
881 BUG_ON(!path->nodes[0]);
883 leaf = path->nodes[0];
886 last_item = btrfs_header_nritems(leaf) - 1;
888 return -ENOENT; /* FIXME: Is errno suitable? */
891 INIT_LIST_HEAD(&head);
892 btrfs_item_key_to_cpu(leaf, &key, i);
895 * count the number of the dir index items that we can delete in batch
897 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
898 list_add_tail(&next->tree_list, &head);
902 next = __btrfs_next_delayed_item(curr);
906 if (!btrfs_is_continuous_delayed_item(curr, next))
912 btrfs_item_key_to_cpu(leaf, &key, i);
918 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
922 list_for_each_entry_safe(curr, next, &head, tree_list) {
923 btrfs_delayed_item_release_metadata(root, curr);
924 list_del(&curr->tree_list);
925 btrfs_release_delayed_item(curr);
932 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
933 struct btrfs_path *path,
934 struct btrfs_root *root,
935 struct btrfs_delayed_node *node)
937 struct btrfs_delayed_item *curr, *prev;
941 mutex_lock(&node->mutex);
942 curr = __btrfs_first_delayed_deletion_item(node);
946 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
951 * can't find the item which the node points to, so this node
952 * is invalid, just drop it.
955 curr = __btrfs_next_delayed_item(prev);
956 btrfs_release_delayed_item(prev);
958 btrfs_release_path(path);
960 mutex_unlock(&node->mutex);
966 btrfs_batch_delete_items(trans, root, path, curr);
967 btrfs_release_path(path);
968 mutex_unlock(&node->mutex);
972 btrfs_release_path(path);
973 mutex_unlock(&node->mutex);
977 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
979 struct btrfs_delayed_root *delayed_root;
982 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
983 BUG_ON(!delayed_node->root);
984 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
985 delayed_node->count--;
987 delayed_root = delayed_node->root->fs_info->delayed_root;
988 finish_one_item(delayed_root);
992 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
994 struct btrfs_delayed_root *delayed_root;
996 ASSERT(delayed_node->root);
997 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
998 delayed_node->count--;
1000 delayed_root = delayed_node->root->fs_info->delayed_root;
1001 finish_one_item(delayed_root);
1004 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1005 struct btrfs_root *root,
1006 struct btrfs_path *path,
1007 struct btrfs_delayed_node *node)
1009 struct btrfs_fs_info *fs_info = root->fs_info;
1010 struct btrfs_key key;
1011 struct btrfs_inode_item *inode_item;
1012 struct extent_buffer *leaf;
1016 key.objectid = node->inode_id;
1017 key.type = BTRFS_INODE_ITEM_KEY;
1020 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1025 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1027 btrfs_release_path(path);
1029 } else if (ret < 0) {
1033 leaf = path->nodes[0];
1034 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1035 struct btrfs_inode_item);
1036 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1037 sizeof(struct btrfs_inode_item));
1038 btrfs_mark_buffer_dirty(leaf);
1040 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1044 if (path->slots[0] >= btrfs_header_nritems(leaf))
1047 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1048 if (key.objectid != node->inode_id)
1051 if (key.type != BTRFS_INODE_REF_KEY &&
1052 key.type != BTRFS_INODE_EXTREF_KEY)
1056 * Delayed iref deletion is for the inode who has only one link,
1057 * so there is only one iref. The case that several irefs are
1058 * in the same item doesn't exist.
1060 btrfs_del_item(trans, root, path);
1062 btrfs_release_delayed_iref(node);
1064 btrfs_release_path(path);
1066 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1067 btrfs_release_delayed_inode(node);
1072 btrfs_release_path(path);
1074 key.type = BTRFS_INODE_EXTREF_KEY;
1076 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1082 leaf = path->nodes[0];
1087 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1088 struct btrfs_root *root,
1089 struct btrfs_path *path,
1090 struct btrfs_delayed_node *node)
1094 mutex_lock(&node->mutex);
1095 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1096 mutex_unlock(&node->mutex);
1100 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1101 mutex_unlock(&node->mutex);
1106 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1107 struct btrfs_path *path,
1108 struct btrfs_delayed_node *node)
1112 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1116 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1120 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1125 * Called when committing the transaction.
1126 * Returns 0 on success.
1127 * Returns < 0 on error and returns with an aborted transaction with any
1128 * outstanding delayed items cleaned up.
1130 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1132 struct btrfs_fs_info *fs_info = trans->fs_info;
1133 struct btrfs_delayed_root *delayed_root;
1134 struct btrfs_delayed_node *curr_node, *prev_node;
1135 struct btrfs_path *path;
1136 struct btrfs_block_rsv *block_rsv;
1138 bool count = (nr > 0);
1143 path = btrfs_alloc_path();
1146 path->leave_spinning = 1;
1148 block_rsv = trans->block_rsv;
1149 trans->block_rsv = &fs_info->delayed_block_rsv;
1151 delayed_root = fs_info->delayed_root;
1153 curr_node = btrfs_first_delayed_node(delayed_root);
1154 while (curr_node && (!count || (count && nr--))) {
1155 ret = __btrfs_commit_inode_delayed_items(trans, path,
1158 btrfs_release_delayed_node(curr_node);
1160 btrfs_abort_transaction(trans, ret);
1164 prev_node = curr_node;
1165 curr_node = btrfs_next_delayed_node(curr_node);
1166 btrfs_release_delayed_node(prev_node);
1170 btrfs_release_delayed_node(curr_node);
1171 btrfs_free_path(path);
1172 trans->block_rsv = block_rsv;
1177 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1179 return __btrfs_run_delayed_items(trans, -1);
1182 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1184 return __btrfs_run_delayed_items(trans, nr);
1187 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1188 struct btrfs_inode *inode)
1190 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1191 struct btrfs_path *path;
1192 struct btrfs_block_rsv *block_rsv;
1198 mutex_lock(&delayed_node->mutex);
1199 if (!delayed_node->count) {
1200 mutex_unlock(&delayed_node->mutex);
1201 btrfs_release_delayed_node(delayed_node);
1204 mutex_unlock(&delayed_node->mutex);
1206 path = btrfs_alloc_path();
1208 btrfs_release_delayed_node(delayed_node);
1211 path->leave_spinning = 1;
1213 block_rsv = trans->block_rsv;
1214 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1216 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1218 btrfs_release_delayed_node(delayed_node);
1219 btrfs_free_path(path);
1220 trans->block_rsv = block_rsv;
1225 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1227 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1228 struct btrfs_trans_handle *trans;
1229 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1230 struct btrfs_path *path;
1231 struct btrfs_block_rsv *block_rsv;
1237 mutex_lock(&delayed_node->mutex);
1238 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1239 mutex_unlock(&delayed_node->mutex);
1240 btrfs_release_delayed_node(delayed_node);
1243 mutex_unlock(&delayed_node->mutex);
1245 trans = btrfs_join_transaction(delayed_node->root);
1246 if (IS_ERR(trans)) {
1247 ret = PTR_ERR(trans);
1251 path = btrfs_alloc_path();
1256 path->leave_spinning = 1;
1258 block_rsv = trans->block_rsv;
1259 trans->block_rsv = &fs_info->delayed_block_rsv;
1261 mutex_lock(&delayed_node->mutex);
1262 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1263 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1264 path, delayed_node);
1267 mutex_unlock(&delayed_node->mutex);
1269 btrfs_free_path(path);
1270 trans->block_rsv = block_rsv;
1272 btrfs_end_transaction(trans);
1273 btrfs_btree_balance_dirty(fs_info);
1275 btrfs_release_delayed_node(delayed_node);
1280 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1282 struct btrfs_delayed_node *delayed_node;
1284 delayed_node = READ_ONCE(inode->delayed_node);
1288 inode->delayed_node = NULL;
1289 btrfs_release_delayed_node(delayed_node);
1292 struct btrfs_async_delayed_work {
1293 struct btrfs_delayed_root *delayed_root;
1295 struct btrfs_work work;
1298 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1300 struct btrfs_async_delayed_work *async_work;
1301 struct btrfs_delayed_root *delayed_root;
1302 struct btrfs_trans_handle *trans;
1303 struct btrfs_path *path;
1304 struct btrfs_delayed_node *delayed_node = NULL;
1305 struct btrfs_root *root;
1306 struct btrfs_block_rsv *block_rsv;
1309 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1310 delayed_root = async_work->delayed_root;
1312 path = btrfs_alloc_path();
1317 if (atomic_read(&delayed_root->items) <
1318 BTRFS_DELAYED_BACKGROUND / 2)
1321 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1325 path->leave_spinning = 1;
1326 root = delayed_node->root;
1328 trans = btrfs_join_transaction(root);
1329 if (IS_ERR(trans)) {
1330 btrfs_release_path(path);
1331 btrfs_release_prepared_delayed_node(delayed_node);
1336 block_rsv = trans->block_rsv;
1337 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1339 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1341 trans->block_rsv = block_rsv;
1342 btrfs_end_transaction(trans);
1343 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1345 btrfs_release_path(path);
1346 btrfs_release_prepared_delayed_node(delayed_node);
1349 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1350 || total_done < async_work->nr);
1352 btrfs_free_path(path);
1354 wake_up(&delayed_root->wait);
1359 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1360 struct btrfs_fs_info *fs_info, int nr)
1362 struct btrfs_async_delayed_work *async_work;
1364 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1368 async_work->delayed_root = delayed_root;
1369 btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1370 btrfs_async_run_delayed_root, NULL, NULL);
1371 async_work->nr = nr;
1373 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1377 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1379 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1382 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1384 int val = atomic_read(&delayed_root->items_seq);
1386 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1389 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1395 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1397 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1399 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1400 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1403 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1407 seq = atomic_read(&delayed_root->items_seq);
1409 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1413 wait_event_interruptible(delayed_root->wait,
1414 could_end_wait(delayed_root, seq));
1418 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1421 /* Will return 0 or -ENOMEM */
1422 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1423 const char *name, int name_len,
1424 struct btrfs_inode *dir,
1425 struct btrfs_disk_key *disk_key, u8 type,
1428 struct btrfs_delayed_node *delayed_node;
1429 struct btrfs_delayed_item *delayed_item;
1430 struct btrfs_dir_item *dir_item;
1433 delayed_node = btrfs_get_or_create_delayed_node(dir);
1434 if (IS_ERR(delayed_node))
1435 return PTR_ERR(delayed_node);
1437 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1438 if (!delayed_item) {
1443 delayed_item->key.objectid = btrfs_ino(dir);
1444 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1445 delayed_item->key.offset = index;
1447 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1448 dir_item->location = *disk_key;
1449 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1450 btrfs_set_stack_dir_data_len(dir_item, 0);
1451 btrfs_set_stack_dir_name_len(dir_item, name_len);
1452 btrfs_set_stack_dir_type(dir_item, type);
1453 memcpy((char *)(dir_item + 1), name, name_len);
1455 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
1457 * we have reserved enough space when we start a new transaction,
1458 * so reserving metadata failure is impossible
1462 mutex_lock(&delayed_node->mutex);
1463 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1464 if (unlikely(ret)) {
1465 btrfs_err(trans->fs_info,
1466 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1467 name_len, name, delayed_node->root->root_key.objectid,
1468 delayed_node->inode_id, ret);
1471 mutex_unlock(&delayed_node->mutex);
1474 btrfs_release_delayed_node(delayed_node);
1478 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1479 struct btrfs_delayed_node *node,
1480 struct btrfs_key *key)
1482 struct btrfs_delayed_item *item;
1484 mutex_lock(&node->mutex);
1485 item = __btrfs_lookup_delayed_insertion_item(node, key);
1487 mutex_unlock(&node->mutex);
1491 btrfs_delayed_item_release_metadata(node->root, item);
1492 btrfs_release_delayed_item(item);
1493 mutex_unlock(&node->mutex);
1497 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1498 struct btrfs_inode *dir, u64 index)
1500 struct btrfs_delayed_node *node;
1501 struct btrfs_delayed_item *item;
1502 struct btrfs_key item_key;
1505 node = btrfs_get_or_create_delayed_node(dir);
1507 return PTR_ERR(node);
1509 item_key.objectid = btrfs_ino(dir);
1510 item_key.type = BTRFS_DIR_INDEX_KEY;
1511 item_key.offset = index;
1513 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1518 item = btrfs_alloc_delayed_item(0);
1524 item->key = item_key;
1526 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
1528 * we have reserved enough space when we start a new transaction,
1529 * so reserving metadata failure is impossible.
1532 btrfs_err(trans->fs_info,
1533 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1534 btrfs_release_delayed_item(item);
1538 mutex_lock(&node->mutex);
1539 ret = __btrfs_add_delayed_deletion_item(node, item);
1540 if (unlikely(ret)) {
1541 btrfs_err(trans->fs_info,
1542 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1543 index, node->root->root_key.objectid,
1544 node->inode_id, ret);
1545 btrfs_delayed_item_release_metadata(dir->root, item);
1546 btrfs_release_delayed_item(item);
1548 mutex_unlock(&node->mutex);
1550 btrfs_release_delayed_node(node);
1554 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1556 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1562 * Since we have held i_mutex of this directory, it is impossible that
1563 * a new directory index is added into the delayed node and index_cnt
1564 * is updated now. So we needn't lock the delayed node.
1566 if (!delayed_node->index_cnt) {
1567 btrfs_release_delayed_node(delayed_node);
1571 inode->index_cnt = delayed_node->index_cnt;
1572 btrfs_release_delayed_node(delayed_node);
1576 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1577 struct list_head *ins_list,
1578 struct list_head *del_list)
1580 struct btrfs_delayed_node *delayed_node;
1581 struct btrfs_delayed_item *item;
1583 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1588 * We can only do one readdir with delayed items at a time because of
1589 * item->readdir_list.
1591 inode_unlock_shared(inode);
1594 mutex_lock(&delayed_node->mutex);
1595 item = __btrfs_first_delayed_insertion_item(delayed_node);
1597 refcount_inc(&item->refs);
1598 list_add_tail(&item->readdir_list, ins_list);
1599 item = __btrfs_next_delayed_item(item);
1602 item = __btrfs_first_delayed_deletion_item(delayed_node);
1604 refcount_inc(&item->refs);
1605 list_add_tail(&item->readdir_list, del_list);
1606 item = __btrfs_next_delayed_item(item);
1608 mutex_unlock(&delayed_node->mutex);
1610 * This delayed node is still cached in the btrfs inode, so refs
1611 * must be > 1 now, and we needn't check it is going to be freed
1614 * Besides that, this function is used to read dir, we do not
1615 * insert/delete delayed items in this period. So we also needn't
1616 * requeue or dequeue this delayed node.
1618 refcount_dec(&delayed_node->refs);
1623 void btrfs_readdir_put_delayed_items(struct inode *inode,
1624 struct list_head *ins_list,
1625 struct list_head *del_list)
1627 struct btrfs_delayed_item *curr, *next;
1629 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1630 list_del(&curr->readdir_list);
1631 if (refcount_dec_and_test(&curr->refs))
1635 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1636 list_del(&curr->readdir_list);
1637 if (refcount_dec_and_test(&curr->refs))
1642 * The VFS is going to do up_read(), so we need to downgrade back to a
1645 downgrade_write(&inode->i_rwsem);
1648 int btrfs_should_delete_dir_index(struct list_head *del_list,
1651 struct btrfs_delayed_item *curr;
1654 list_for_each_entry(curr, del_list, readdir_list) {
1655 if (curr->key.offset > index)
1657 if (curr->key.offset == index) {
1666 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1669 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1670 struct list_head *ins_list)
1672 struct btrfs_dir_item *di;
1673 struct btrfs_delayed_item *curr, *next;
1674 struct btrfs_key location;
1678 unsigned char d_type;
1680 if (list_empty(ins_list))
1684 * Changing the data of the delayed item is impossible. So
1685 * we needn't lock them. And we have held i_mutex of the
1686 * directory, nobody can delete any directory indexes now.
1688 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1689 list_del(&curr->readdir_list);
1691 if (curr->key.offset < ctx->pos) {
1692 if (refcount_dec_and_test(&curr->refs))
1697 ctx->pos = curr->key.offset;
1699 di = (struct btrfs_dir_item *)curr->data;
1700 name = (char *)(di + 1);
1701 name_len = btrfs_stack_dir_name_len(di);
1703 d_type = fs_ftype_to_dtype(di->type);
1704 btrfs_disk_key_to_cpu(&location, &di->location);
1706 over = !dir_emit(ctx, name, name_len,
1707 location.objectid, d_type);
1709 if (refcount_dec_and_test(&curr->refs))
1719 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1720 struct btrfs_inode_item *inode_item,
1721 struct inode *inode)
1723 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1724 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1725 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1726 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1727 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1728 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1729 btrfs_set_stack_inode_generation(inode_item,
1730 BTRFS_I(inode)->generation);
1731 btrfs_set_stack_inode_sequence(inode_item,
1732 inode_peek_iversion(inode));
1733 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1734 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1735 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1736 btrfs_set_stack_inode_block_group(inode_item, 0);
1738 btrfs_set_stack_timespec_sec(&inode_item->atime,
1739 inode->i_atime.tv_sec);
1740 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1741 inode->i_atime.tv_nsec);
1743 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1744 inode->i_mtime.tv_sec);
1745 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1746 inode->i_mtime.tv_nsec);
1748 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1749 inode->i_ctime.tv_sec);
1750 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1751 inode->i_ctime.tv_nsec);
1753 btrfs_set_stack_timespec_sec(&inode_item->otime,
1754 BTRFS_I(inode)->i_otime.tv_sec);
1755 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1756 BTRFS_I(inode)->i_otime.tv_nsec);
1759 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1761 struct btrfs_delayed_node *delayed_node;
1762 struct btrfs_inode_item *inode_item;
1764 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1768 mutex_lock(&delayed_node->mutex);
1769 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1770 mutex_unlock(&delayed_node->mutex);
1771 btrfs_release_delayed_node(delayed_node);
1775 inode_item = &delayed_node->inode_item;
1777 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1778 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1779 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1780 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1781 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1782 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1783 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1784 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1786 inode_set_iversion_queried(inode,
1787 btrfs_stack_inode_sequence(inode_item));
1789 *rdev = btrfs_stack_inode_rdev(inode_item);
1790 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1792 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1793 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1795 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1796 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1798 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1799 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1801 BTRFS_I(inode)->i_otime.tv_sec =
1802 btrfs_stack_timespec_sec(&inode_item->otime);
1803 BTRFS_I(inode)->i_otime.tv_nsec =
1804 btrfs_stack_timespec_nsec(&inode_item->otime);
1806 inode->i_generation = BTRFS_I(inode)->generation;
1807 BTRFS_I(inode)->index_cnt = (u64)-1;
1809 mutex_unlock(&delayed_node->mutex);
1810 btrfs_release_delayed_node(delayed_node);
1814 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1815 struct btrfs_root *root, struct inode *inode)
1817 struct btrfs_delayed_node *delayed_node;
1820 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1821 if (IS_ERR(delayed_node))
1822 return PTR_ERR(delayed_node);
1824 mutex_lock(&delayed_node->mutex);
1825 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1826 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1830 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1835 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1836 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1837 delayed_node->count++;
1838 atomic_inc(&root->fs_info->delayed_root->items);
1840 mutex_unlock(&delayed_node->mutex);
1841 btrfs_release_delayed_node(delayed_node);
1845 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1847 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1848 struct btrfs_delayed_node *delayed_node;
1851 * we don't do delayed inode updates during log recovery because it
1852 * leads to enospc problems. This means we also can't do
1853 * delayed inode refs
1855 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1858 delayed_node = btrfs_get_or_create_delayed_node(inode);
1859 if (IS_ERR(delayed_node))
1860 return PTR_ERR(delayed_node);
1863 * We don't reserve space for inode ref deletion is because:
1864 * - We ONLY do async inode ref deletion for the inode who has only
1865 * one link(i_nlink == 1), it means there is only one inode ref.
1866 * And in most case, the inode ref and the inode item are in the
1867 * same leaf, and we will deal with them at the same time.
1868 * Since we are sure we will reserve the space for the inode item,
1869 * it is unnecessary to reserve space for inode ref deletion.
1870 * - If the inode ref and the inode item are not in the same leaf,
1871 * We also needn't worry about enospc problem, because we reserve
1872 * much more space for the inode update than it needs.
1873 * - At the worst, we can steal some space from the global reservation.
1876 mutex_lock(&delayed_node->mutex);
1877 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1880 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1881 delayed_node->count++;
1882 atomic_inc(&fs_info->delayed_root->items);
1884 mutex_unlock(&delayed_node->mutex);
1885 btrfs_release_delayed_node(delayed_node);
1889 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1891 struct btrfs_root *root = delayed_node->root;
1892 struct btrfs_fs_info *fs_info = root->fs_info;
1893 struct btrfs_delayed_item *curr_item, *prev_item;
1895 mutex_lock(&delayed_node->mutex);
1896 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1898 btrfs_delayed_item_release_metadata(root, curr_item);
1899 prev_item = curr_item;
1900 curr_item = __btrfs_next_delayed_item(prev_item);
1901 btrfs_release_delayed_item(prev_item);
1904 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1906 btrfs_delayed_item_release_metadata(root, curr_item);
1907 prev_item = curr_item;
1908 curr_item = __btrfs_next_delayed_item(prev_item);
1909 btrfs_release_delayed_item(prev_item);
1912 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1913 btrfs_release_delayed_iref(delayed_node);
1915 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1916 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
1917 btrfs_release_delayed_inode(delayed_node);
1919 mutex_unlock(&delayed_node->mutex);
1922 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1924 struct btrfs_delayed_node *delayed_node;
1926 delayed_node = btrfs_get_delayed_node(inode);
1930 __btrfs_kill_delayed_node(delayed_node);
1931 btrfs_release_delayed_node(delayed_node);
1934 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1937 struct btrfs_delayed_node *delayed_nodes[8];
1941 spin_lock(&root->inode_lock);
1942 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1943 (void **)delayed_nodes, inode_id,
1944 ARRAY_SIZE(delayed_nodes));
1946 spin_unlock(&root->inode_lock);
1950 inode_id = delayed_nodes[n - 1]->inode_id + 1;
1952 for (i = 0; i < n; i++)
1953 refcount_inc(&delayed_nodes[i]->refs);
1954 spin_unlock(&root->inode_lock);
1956 for (i = 0; i < n; i++) {
1957 __btrfs_kill_delayed_node(delayed_nodes[i]);
1958 btrfs_release_delayed_node(delayed_nodes[i]);
1963 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1965 struct btrfs_delayed_node *curr_node, *prev_node;
1967 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
1969 __btrfs_kill_delayed_node(curr_node);
1971 prev_node = curr_node;
1972 curr_node = btrfs_next_delayed_node(curr_node);
1973 btrfs_release_delayed_node(prev_node);