2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
31 #include "print-tree.h"
35 #include "free-space-cache.h"
40 #undef SCRAMBLE_DELAYED_REFS
43 * control flags for do_chunk_alloc's force field
44 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45 * if we really need one.
47 * CHUNK_ALLOC_LIMITED means to only try and allocate one
48 * if we have very few chunks already allocated. This is
49 * used as part of the clustering code to help make sure
50 * we have a good pool of storage to cluster in, without
51 * filling the FS with empty chunks
53 * CHUNK_ALLOC_FORCE means it must try to allocate one
57 CHUNK_ALLOC_NO_FORCE = 0,
58 CHUNK_ALLOC_LIMITED = 1,
59 CHUNK_ALLOC_FORCE = 2,
63 * Control how reservations are dealt with.
65 * RESERVE_FREE - freeing a reservation.
66 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
68 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69 * bytes_may_use as the ENOSPC accounting is done elsewhere
74 RESERVE_ALLOC_NO_ACCOUNT = 2,
77 static int update_block_group(struct btrfs_trans_handle *trans,
78 struct btrfs_root *root, u64 bytenr,
79 u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81 struct btrfs_root *root,
82 struct btrfs_delayed_ref_node *node, u64 parent,
83 u64 root_objectid, u64 owner_objectid,
84 u64 owner_offset, int refs_to_drop,
85 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87 struct extent_buffer *leaf,
88 struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90 struct btrfs_root *root,
91 u64 parent, u64 root_objectid,
92 u64 flags, u64 owner, u64 offset,
93 struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root,
96 u64 parent, u64 root_objectid,
97 u64 flags, struct btrfs_disk_key *key,
98 int level, struct btrfs_key *ins);
99 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
100 struct btrfs_root *extent_root, u64 flags,
102 static int find_next_key(struct btrfs_path *path, int level,
103 struct btrfs_key *key);
104 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
105 int dump_block_groups);
106 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
107 u64 num_bytes, int reserve,
109 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111 int btrfs_pin_extent(struct btrfs_root *root,
112 u64 bytenr, u64 num_bytes, int reserved);
115 block_group_cache_done(struct btrfs_block_group_cache *cache)
118 return cache->cached == BTRFS_CACHE_FINISHED ||
119 cache->cached == BTRFS_CACHE_ERROR;
122 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 return (cache->flags & bits) == bits;
127 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 atomic_inc(&cache->count);
132 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 if (atomic_dec_and_test(&cache->count)) {
135 WARN_ON(cache->pinned > 0);
136 WARN_ON(cache->reserved > 0);
137 kfree(cache->free_space_ctl);
143 * this adds the block group to the fs_info rb tree for the block group
146 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
147 struct btrfs_block_group_cache *block_group)
150 struct rb_node *parent = NULL;
151 struct btrfs_block_group_cache *cache;
153 spin_lock(&info->block_group_cache_lock);
154 p = &info->block_group_cache_tree.rb_node;
158 cache = rb_entry(parent, struct btrfs_block_group_cache,
160 if (block_group->key.objectid < cache->key.objectid) {
162 } else if (block_group->key.objectid > cache->key.objectid) {
165 spin_unlock(&info->block_group_cache_lock);
170 rb_link_node(&block_group->cache_node, parent, p);
171 rb_insert_color(&block_group->cache_node,
172 &info->block_group_cache_tree);
174 if (info->first_logical_byte > block_group->key.objectid)
175 info->first_logical_byte = block_group->key.objectid;
177 spin_unlock(&info->block_group_cache_lock);
183 * This will return the block group at or after bytenr if contains is 0, else
184 * it will return the block group that contains the bytenr
186 static struct btrfs_block_group_cache *
187 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
190 struct btrfs_block_group_cache *cache, *ret = NULL;
194 spin_lock(&info->block_group_cache_lock);
195 n = info->block_group_cache_tree.rb_node;
198 cache = rb_entry(n, struct btrfs_block_group_cache,
200 end = cache->key.objectid + cache->key.offset - 1;
201 start = cache->key.objectid;
203 if (bytenr < start) {
204 if (!contains && (!ret || start < ret->key.objectid))
207 } else if (bytenr > start) {
208 if (contains && bytenr <= end) {
219 btrfs_get_block_group(ret);
220 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
221 info->first_logical_byte = ret->key.objectid;
223 spin_unlock(&info->block_group_cache_lock);
228 static int add_excluded_extent(struct btrfs_root *root,
229 u64 start, u64 num_bytes)
231 u64 end = start + num_bytes - 1;
232 set_extent_bits(&root->fs_info->freed_extents[0],
233 start, end, EXTENT_UPTODATE, GFP_NOFS);
234 set_extent_bits(&root->fs_info->freed_extents[1],
235 start, end, EXTENT_UPTODATE, GFP_NOFS);
239 static void free_excluded_extents(struct btrfs_root *root,
240 struct btrfs_block_group_cache *cache)
244 start = cache->key.objectid;
245 end = start + cache->key.offset - 1;
247 clear_extent_bits(&root->fs_info->freed_extents[0],
248 start, end, EXTENT_UPTODATE, GFP_NOFS);
249 clear_extent_bits(&root->fs_info->freed_extents[1],
250 start, end, EXTENT_UPTODATE, GFP_NOFS);
253 static int exclude_super_stripes(struct btrfs_root *root,
254 struct btrfs_block_group_cache *cache)
261 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
262 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
263 cache->bytes_super += stripe_len;
264 ret = add_excluded_extent(root, cache->key.objectid,
270 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
271 bytenr = btrfs_sb_offset(i);
272 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
273 cache->key.objectid, bytenr,
274 0, &logical, &nr, &stripe_len);
281 if (logical[nr] > cache->key.objectid +
285 if (logical[nr] + stripe_len <= cache->key.objectid)
289 if (start < cache->key.objectid) {
290 start = cache->key.objectid;
291 len = (logical[nr] + stripe_len) - start;
293 len = min_t(u64, stripe_len,
294 cache->key.objectid +
295 cache->key.offset - start);
298 cache->bytes_super += len;
299 ret = add_excluded_extent(root, start, len);
311 static struct btrfs_caching_control *
312 get_caching_control(struct btrfs_block_group_cache *cache)
314 struct btrfs_caching_control *ctl;
316 spin_lock(&cache->lock);
317 if (!cache->caching_ctl) {
318 spin_unlock(&cache->lock);
322 ctl = cache->caching_ctl;
323 atomic_inc(&ctl->count);
324 spin_unlock(&cache->lock);
328 static void put_caching_control(struct btrfs_caching_control *ctl)
330 if (atomic_dec_and_test(&ctl->count))
334 #ifdef CONFIG_BTRFS_DEBUG
335 static void fragment_free_space(struct btrfs_root *root,
336 struct btrfs_block_group_cache *block_group)
338 u64 start = block_group->key.objectid;
339 u64 len = block_group->key.offset;
340 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
341 root->nodesize : root->sectorsize;
342 u64 step = chunk << 1;
344 while (len > chunk) {
345 btrfs_remove_free_space(block_group, start, chunk);
356 * this is only called by cache_block_group, since we could have freed extents
357 * we need to check the pinned_extents for any extents that can't be used yet
358 * since their free space will be released as soon as the transaction commits.
360 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
361 struct btrfs_fs_info *info, u64 start, u64 end)
363 u64 extent_start, extent_end, size, total_added = 0;
366 while (start < end) {
367 ret = find_first_extent_bit(info->pinned_extents, start,
368 &extent_start, &extent_end,
369 EXTENT_DIRTY | EXTENT_UPTODATE,
374 if (extent_start <= start) {
375 start = extent_end + 1;
376 } else if (extent_start > start && extent_start < end) {
377 size = extent_start - start;
379 ret = btrfs_add_free_space(block_group, start,
381 BUG_ON(ret); /* -ENOMEM or logic error */
382 start = extent_end + 1;
391 ret = btrfs_add_free_space(block_group, start, size);
392 BUG_ON(ret); /* -ENOMEM or logic error */
398 static noinline void caching_thread(struct btrfs_work *work)
400 struct btrfs_block_group_cache *block_group;
401 struct btrfs_fs_info *fs_info;
402 struct btrfs_caching_control *caching_ctl;
403 struct btrfs_root *extent_root;
404 struct btrfs_path *path;
405 struct extent_buffer *leaf;
406 struct btrfs_key key;
413 caching_ctl = container_of(work, struct btrfs_caching_control, work);
414 block_group = caching_ctl->block_group;
415 fs_info = block_group->fs_info;
416 extent_root = fs_info->extent_root;
418 path = btrfs_alloc_path();
422 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
424 #ifdef CONFIG_BTRFS_DEBUG
426 * If we're fragmenting we don't want to make anybody think we can
427 * allocate from this block group until we've had a chance to fragment
430 if (btrfs_should_fragment_free_space(extent_root, block_group))
434 * We don't want to deadlock with somebody trying to allocate a new
435 * extent for the extent root while also trying to search the extent
436 * root to add free space. So we skip locking and search the commit
437 * root, since its read-only
439 path->skip_locking = 1;
440 path->search_commit_root = 1;
445 key.type = BTRFS_EXTENT_ITEM_KEY;
447 mutex_lock(&caching_ctl->mutex);
448 /* need to make sure the commit_root doesn't disappear */
449 down_read(&fs_info->commit_root_sem);
452 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
456 leaf = path->nodes[0];
457 nritems = btrfs_header_nritems(leaf);
460 if (btrfs_fs_closing(fs_info) > 1) {
465 if (path->slots[0] < nritems) {
466 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
468 ret = find_next_key(path, 0, &key);
472 if (need_resched() ||
473 rwsem_is_contended(&fs_info->commit_root_sem)) {
475 caching_ctl->progress = last;
476 btrfs_release_path(path);
477 up_read(&fs_info->commit_root_sem);
478 mutex_unlock(&caching_ctl->mutex);
483 ret = btrfs_next_leaf(extent_root, path);
488 leaf = path->nodes[0];
489 nritems = btrfs_header_nritems(leaf);
493 if (key.objectid < last) {
496 key.type = BTRFS_EXTENT_ITEM_KEY;
499 caching_ctl->progress = last;
500 btrfs_release_path(path);
504 if (key.objectid < block_group->key.objectid) {
509 if (key.objectid >= block_group->key.objectid +
510 block_group->key.offset)
513 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
514 key.type == BTRFS_METADATA_ITEM_KEY) {
515 total_found += add_new_free_space(block_group,
518 if (key.type == BTRFS_METADATA_ITEM_KEY)
519 last = key.objectid +
520 fs_info->tree_root->nodesize;
522 last = key.objectid + key.offset;
524 if (total_found > (1024 * 1024 * 2)) {
527 wake_up(&caching_ctl->wait);
534 total_found += add_new_free_space(block_group, fs_info, last,
535 block_group->key.objectid +
536 block_group->key.offset);
537 spin_lock(&block_group->lock);
538 block_group->caching_ctl = NULL;
539 block_group->cached = BTRFS_CACHE_FINISHED;
540 spin_unlock(&block_group->lock);
542 #ifdef CONFIG_BTRFS_DEBUG
543 if (btrfs_should_fragment_free_space(extent_root, block_group)) {
546 spin_lock(&block_group->space_info->lock);
547 spin_lock(&block_group->lock);
548 bytes_used = block_group->key.offset -
549 btrfs_block_group_used(&block_group->item);
550 block_group->space_info->bytes_used += bytes_used >> 1;
551 spin_unlock(&block_group->lock);
552 spin_unlock(&block_group->space_info->lock);
553 fragment_free_space(extent_root, block_group);
557 caching_ctl->progress = (u64)-1;
559 btrfs_free_path(path);
560 up_read(&fs_info->commit_root_sem);
562 free_excluded_extents(extent_root, block_group);
564 mutex_unlock(&caching_ctl->mutex);
567 spin_lock(&block_group->lock);
568 block_group->caching_ctl = NULL;
569 block_group->cached = BTRFS_CACHE_ERROR;
570 spin_unlock(&block_group->lock);
572 wake_up(&caching_ctl->wait);
574 put_caching_control(caching_ctl);
575 btrfs_put_block_group(block_group);
578 static int cache_block_group(struct btrfs_block_group_cache *cache,
582 struct btrfs_fs_info *fs_info = cache->fs_info;
583 struct btrfs_caching_control *caching_ctl;
586 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
590 INIT_LIST_HEAD(&caching_ctl->list);
591 mutex_init(&caching_ctl->mutex);
592 init_waitqueue_head(&caching_ctl->wait);
593 caching_ctl->block_group = cache;
594 caching_ctl->progress = cache->key.objectid;
595 atomic_set(&caching_ctl->count, 1);
596 btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
597 caching_thread, NULL, NULL);
599 spin_lock(&cache->lock);
601 * This should be a rare occasion, but this could happen I think in the
602 * case where one thread starts to load the space cache info, and then
603 * some other thread starts a transaction commit which tries to do an
604 * allocation while the other thread is still loading the space cache
605 * info. The previous loop should have kept us from choosing this block
606 * group, but if we've moved to the state where we will wait on caching
607 * block groups we need to first check if we're doing a fast load here,
608 * so we can wait for it to finish, otherwise we could end up allocating
609 * from a block group who's cache gets evicted for one reason or
612 while (cache->cached == BTRFS_CACHE_FAST) {
613 struct btrfs_caching_control *ctl;
615 ctl = cache->caching_ctl;
616 atomic_inc(&ctl->count);
617 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
618 spin_unlock(&cache->lock);
622 finish_wait(&ctl->wait, &wait);
623 put_caching_control(ctl);
624 spin_lock(&cache->lock);
627 if (cache->cached != BTRFS_CACHE_NO) {
628 spin_unlock(&cache->lock);
632 WARN_ON(cache->caching_ctl);
633 cache->caching_ctl = caching_ctl;
634 cache->cached = BTRFS_CACHE_FAST;
635 spin_unlock(&cache->lock);
637 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
638 mutex_lock(&caching_ctl->mutex);
639 ret = load_free_space_cache(fs_info, cache);
641 spin_lock(&cache->lock);
643 cache->caching_ctl = NULL;
644 cache->cached = BTRFS_CACHE_FINISHED;
645 cache->last_byte_to_unpin = (u64)-1;
646 caching_ctl->progress = (u64)-1;
648 if (load_cache_only) {
649 cache->caching_ctl = NULL;
650 cache->cached = BTRFS_CACHE_NO;
652 cache->cached = BTRFS_CACHE_STARTED;
653 cache->has_caching_ctl = 1;
656 spin_unlock(&cache->lock);
657 #ifdef CONFIG_BTRFS_DEBUG
659 btrfs_should_fragment_free_space(fs_info->extent_root,
663 spin_lock(&cache->space_info->lock);
664 spin_lock(&cache->lock);
665 bytes_used = cache->key.offset -
666 btrfs_block_group_used(&cache->item);
667 cache->space_info->bytes_used += bytes_used >> 1;
668 spin_unlock(&cache->lock);
669 spin_unlock(&cache->space_info->lock);
670 fragment_free_space(fs_info->extent_root, cache);
673 mutex_unlock(&caching_ctl->mutex);
675 wake_up(&caching_ctl->wait);
677 put_caching_control(caching_ctl);
678 free_excluded_extents(fs_info->extent_root, cache);
683 * We are not going to do the fast caching, set cached to the
684 * appropriate value and wakeup any waiters.
686 spin_lock(&cache->lock);
687 if (load_cache_only) {
688 cache->caching_ctl = NULL;
689 cache->cached = BTRFS_CACHE_NO;
691 cache->cached = BTRFS_CACHE_STARTED;
692 cache->has_caching_ctl = 1;
694 spin_unlock(&cache->lock);
695 wake_up(&caching_ctl->wait);
698 if (load_cache_only) {
699 put_caching_control(caching_ctl);
703 down_write(&fs_info->commit_root_sem);
704 atomic_inc(&caching_ctl->count);
705 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
706 up_write(&fs_info->commit_root_sem);
708 btrfs_get_block_group(cache);
710 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
716 * return the block group that starts at or after bytenr
718 static struct btrfs_block_group_cache *
719 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
721 struct btrfs_block_group_cache *cache;
723 cache = block_group_cache_tree_search(info, bytenr, 0);
729 * return the block group that contains the given bytenr
731 struct btrfs_block_group_cache *btrfs_lookup_block_group(
732 struct btrfs_fs_info *info,
735 struct btrfs_block_group_cache *cache;
737 cache = block_group_cache_tree_search(info, bytenr, 1);
742 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
745 struct list_head *head = &info->space_info;
746 struct btrfs_space_info *found;
748 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
751 list_for_each_entry_rcu(found, head, list) {
752 if (found->flags & flags) {
762 * after adding space to the filesystem, we need to clear the full flags
763 * on all the space infos.
765 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
767 struct list_head *head = &info->space_info;
768 struct btrfs_space_info *found;
771 list_for_each_entry_rcu(found, head, list)
776 /* simple helper to search for an existing data extent at a given offset */
777 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
780 struct btrfs_key key;
781 struct btrfs_path *path;
783 path = btrfs_alloc_path();
787 key.objectid = start;
789 key.type = BTRFS_EXTENT_ITEM_KEY;
790 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
792 btrfs_free_path(path);
797 * helper function to lookup reference count and flags of a tree block.
799 * the head node for delayed ref is used to store the sum of all the
800 * reference count modifications queued up in the rbtree. the head
801 * node may also store the extent flags to set. This way you can check
802 * to see what the reference count and extent flags would be if all of
803 * the delayed refs are not processed.
805 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
806 struct btrfs_root *root, u64 bytenr,
807 u64 offset, int metadata, u64 *refs, u64 *flags)
809 struct btrfs_delayed_ref_head *head;
810 struct btrfs_delayed_ref_root *delayed_refs;
811 struct btrfs_path *path;
812 struct btrfs_extent_item *ei;
813 struct extent_buffer *leaf;
814 struct btrfs_key key;
821 * If we don't have skinny metadata, don't bother doing anything
824 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
825 offset = root->nodesize;
829 path = btrfs_alloc_path();
834 path->skip_locking = 1;
835 path->search_commit_root = 1;
839 key.objectid = bytenr;
842 key.type = BTRFS_METADATA_ITEM_KEY;
844 key.type = BTRFS_EXTENT_ITEM_KEY;
846 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
851 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
852 if (path->slots[0]) {
854 btrfs_item_key_to_cpu(path->nodes[0], &key,
856 if (key.objectid == bytenr &&
857 key.type == BTRFS_EXTENT_ITEM_KEY &&
858 key.offset == root->nodesize)
864 leaf = path->nodes[0];
865 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
866 if (item_size >= sizeof(*ei)) {
867 ei = btrfs_item_ptr(leaf, path->slots[0],
868 struct btrfs_extent_item);
869 num_refs = btrfs_extent_refs(leaf, ei);
870 extent_flags = btrfs_extent_flags(leaf, ei);
872 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
873 struct btrfs_extent_item_v0 *ei0;
874 BUG_ON(item_size != sizeof(*ei0));
875 ei0 = btrfs_item_ptr(leaf, path->slots[0],
876 struct btrfs_extent_item_v0);
877 num_refs = btrfs_extent_refs_v0(leaf, ei0);
878 /* FIXME: this isn't correct for data */
879 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
884 BUG_ON(num_refs == 0);
894 delayed_refs = &trans->transaction->delayed_refs;
895 spin_lock(&delayed_refs->lock);
896 head = btrfs_find_delayed_ref_head(trans, bytenr);
898 if (!mutex_trylock(&head->mutex)) {
899 atomic_inc(&head->node.refs);
900 spin_unlock(&delayed_refs->lock);
902 btrfs_release_path(path);
905 * Mutex was contended, block until it's released and try
908 mutex_lock(&head->mutex);
909 mutex_unlock(&head->mutex);
910 btrfs_put_delayed_ref(&head->node);
913 spin_lock(&head->lock);
914 if (head->extent_op && head->extent_op->update_flags)
915 extent_flags |= head->extent_op->flags_to_set;
917 BUG_ON(num_refs == 0);
919 num_refs += head->node.ref_mod;
920 spin_unlock(&head->lock);
921 mutex_unlock(&head->mutex);
923 spin_unlock(&delayed_refs->lock);
925 WARN_ON(num_refs == 0);
929 *flags = extent_flags;
931 btrfs_free_path(path);
936 * Back reference rules. Back refs have three main goals:
938 * 1) differentiate between all holders of references to an extent so that
939 * when a reference is dropped we can make sure it was a valid reference
940 * before freeing the extent.
942 * 2) Provide enough information to quickly find the holders of an extent
943 * if we notice a given block is corrupted or bad.
945 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
946 * maintenance. This is actually the same as #2, but with a slightly
947 * different use case.
949 * There are two kinds of back refs. The implicit back refs is optimized
950 * for pointers in non-shared tree blocks. For a given pointer in a block,
951 * back refs of this kind provide information about the block's owner tree
952 * and the pointer's key. These information allow us to find the block by
953 * b-tree searching. The full back refs is for pointers in tree blocks not
954 * referenced by their owner trees. The location of tree block is recorded
955 * in the back refs. Actually the full back refs is generic, and can be
956 * used in all cases the implicit back refs is used. The major shortcoming
957 * of the full back refs is its overhead. Every time a tree block gets
958 * COWed, we have to update back refs entry for all pointers in it.
960 * For a newly allocated tree block, we use implicit back refs for
961 * pointers in it. This means most tree related operations only involve
962 * implicit back refs. For a tree block created in old transaction, the
963 * only way to drop a reference to it is COW it. So we can detect the
964 * event that tree block loses its owner tree's reference and do the
965 * back refs conversion.
967 * When a tree block is COW'd through a tree, there are four cases:
969 * The reference count of the block is one and the tree is the block's
970 * owner tree. Nothing to do in this case.
972 * The reference count of the block is one and the tree is not the
973 * block's owner tree. In this case, full back refs is used for pointers
974 * in the block. Remove these full back refs, add implicit back refs for
975 * every pointers in the new block.
977 * The reference count of the block is greater than one and the tree is
978 * the block's owner tree. In this case, implicit back refs is used for
979 * pointers in the block. Add full back refs for every pointers in the
980 * block, increase lower level extents' reference counts. The original
981 * implicit back refs are entailed to the new block.
983 * The reference count of the block is greater than one and the tree is
984 * not the block's owner tree. Add implicit back refs for every pointer in
985 * the new block, increase lower level extents' reference count.
987 * Back Reference Key composing:
989 * The key objectid corresponds to the first byte in the extent,
990 * The key type is used to differentiate between types of back refs.
991 * There are different meanings of the key offset for different types
994 * File extents can be referenced by:
996 * - multiple snapshots, subvolumes, or different generations in one subvol
997 * - different files inside a single subvolume
998 * - different offsets inside a file (bookend extents in file.c)
1000 * The extent ref structure for the implicit back refs has fields for:
1002 * - Objectid of the subvolume root
1003 * - objectid of the file holding the reference
1004 * - original offset in the file
1005 * - how many bookend extents
1007 * The key offset for the implicit back refs is hash of the first
1010 * The extent ref structure for the full back refs has field for:
1012 * - number of pointers in the tree leaf
1014 * The key offset for the implicit back refs is the first byte of
1017 * When a file extent is allocated, The implicit back refs is used.
1018 * the fields are filled in:
1020 * (root_key.objectid, inode objectid, offset in file, 1)
1022 * When a file extent is removed file truncation, we find the
1023 * corresponding implicit back refs and check the following fields:
1025 * (btrfs_header_owner(leaf), inode objectid, offset in file)
1027 * Btree extents can be referenced by:
1029 * - Different subvolumes
1031 * Both the implicit back refs and the full back refs for tree blocks
1032 * only consist of key. The key offset for the implicit back refs is
1033 * objectid of block's owner tree. The key offset for the full back refs
1034 * is the first byte of parent block.
1036 * When implicit back refs is used, information about the lowest key and
1037 * level of the tree block are required. These information are stored in
1038 * tree block info structure.
1041 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1042 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1043 struct btrfs_root *root,
1044 struct btrfs_path *path,
1045 u64 owner, u32 extra_size)
1047 struct btrfs_extent_item *item;
1048 struct btrfs_extent_item_v0 *ei0;
1049 struct btrfs_extent_ref_v0 *ref0;
1050 struct btrfs_tree_block_info *bi;
1051 struct extent_buffer *leaf;
1052 struct btrfs_key key;
1053 struct btrfs_key found_key;
1054 u32 new_size = sizeof(*item);
1058 leaf = path->nodes[0];
1059 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1061 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1062 ei0 = btrfs_item_ptr(leaf, path->slots[0],
1063 struct btrfs_extent_item_v0);
1064 refs = btrfs_extent_refs_v0(leaf, ei0);
1066 if (owner == (u64)-1) {
1068 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1069 ret = btrfs_next_leaf(root, path);
1072 BUG_ON(ret > 0); /* Corruption */
1073 leaf = path->nodes[0];
1075 btrfs_item_key_to_cpu(leaf, &found_key,
1077 BUG_ON(key.objectid != found_key.objectid);
1078 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1082 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1083 struct btrfs_extent_ref_v0);
1084 owner = btrfs_ref_objectid_v0(leaf, ref0);
1088 btrfs_release_path(path);
1090 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1091 new_size += sizeof(*bi);
1093 new_size -= sizeof(*ei0);
1094 ret = btrfs_search_slot(trans, root, &key, path,
1095 new_size + extra_size, 1);
1098 BUG_ON(ret); /* Corruption */
1100 btrfs_extend_item(root, path, new_size);
1102 leaf = path->nodes[0];
1103 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1104 btrfs_set_extent_refs(leaf, item, refs);
1105 /* FIXME: get real generation */
1106 btrfs_set_extent_generation(leaf, item, 0);
1107 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1108 btrfs_set_extent_flags(leaf, item,
1109 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1110 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1111 bi = (struct btrfs_tree_block_info *)(item + 1);
1112 /* FIXME: get first key of the block */
1113 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1114 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1116 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1118 btrfs_mark_buffer_dirty(leaf);
1123 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1125 u32 high_crc = ~(u32)0;
1126 u32 low_crc = ~(u32)0;
1129 lenum = cpu_to_le64(root_objectid);
1130 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1131 lenum = cpu_to_le64(owner);
1132 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1133 lenum = cpu_to_le64(offset);
1134 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1136 return ((u64)high_crc << 31) ^ (u64)low_crc;
1139 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1140 struct btrfs_extent_data_ref *ref)
1142 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1143 btrfs_extent_data_ref_objectid(leaf, ref),
1144 btrfs_extent_data_ref_offset(leaf, ref));
1147 static int match_extent_data_ref(struct extent_buffer *leaf,
1148 struct btrfs_extent_data_ref *ref,
1149 u64 root_objectid, u64 owner, u64 offset)
1151 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1152 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1153 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1158 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1159 struct btrfs_root *root,
1160 struct btrfs_path *path,
1161 u64 bytenr, u64 parent,
1163 u64 owner, u64 offset)
1165 struct btrfs_key key;
1166 struct btrfs_extent_data_ref *ref;
1167 struct extent_buffer *leaf;
1173 key.objectid = bytenr;
1175 key.type = BTRFS_SHARED_DATA_REF_KEY;
1176 key.offset = parent;
1178 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1179 key.offset = hash_extent_data_ref(root_objectid,
1184 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1193 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1194 key.type = BTRFS_EXTENT_REF_V0_KEY;
1195 btrfs_release_path(path);
1196 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1207 leaf = path->nodes[0];
1208 nritems = btrfs_header_nritems(leaf);
1210 if (path->slots[0] >= nritems) {
1211 ret = btrfs_next_leaf(root, path);
1217 leaf = path->nodes[0];
1218 nritems = btrfs_header_nritems(leaf);
1222 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1223 if (key.objectid != bytenr ||
1224 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1227 ref = btrfs_item_ptr(leaf, path->slots[0],
1228 struct btrfs_extent_data_ref);
1230 if (match_extent_data_ref(leaf, ref, root_objectid,
1233 btrfs_release_path(path);
1245 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1246 struct btrfs_root *root,
1247 struct btrfs_path *path,
1248 u64 bytenr, u64 parent,
1249 u64 root_objectid, u64 owner,
1250 u64 offset, int refs_to_add)
1252 struct btrfs_key key;
1253 struct extent_buffer *leaf;
1258 key.objectid = bytenr;
1260 key.type = BTRFS_SHARED_DATA_REF_KEY;
1261 key.offset = parent;
1262 size = sizeof(struct btrfs_shared_data_ref);
1264 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1265 key.offset = hash_extent_data_ref(root_objectid,
1267 size = sizeof(struct btrfs_extent_data_ref);
1270 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1271 if (ret && ret != -EEXIST)
1274 leaf = path->nodes[0];
1276 struct btrfs_shared_data_ref *ref;
1277 ref = btrfs_item_ptr(leaf, path->slots[0],
1278 struct btrfs_shared_data_ref);
1280 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1282 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1283 num_refs += refs_to_add;
1284 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1287 struct btrfs_extent_data_ref *ref;
1288 while (ret == -EEXIST) {
1289 ref = btrfs_item_ptr(leaf, path->slots[0],
1290 struct btrfs_extent_data_ref);
1291 if (match_extent_data_ref(leaf, ref, root_objectid,
1294 btrfs_release_path(path);
1296 ret = btrfs_insert_empty_item(trans, root, path, &key,
1298 if (ret && ret != -EEXIST)
1301 leaf = path->nodes[0];
1303 ref = btrfs_item_ptr(leaf, path->slots[0],
1304 struct btrfs_extent_data_ref);
1306 btrfs_set_extent_data_ref_root(leaf, ref,
1308 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1309 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1310 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1312 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1313 num_refs += refs_to_add;
1314 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1317 btrfs_mark_buffer_dirty(leaf);
1320 btrfs_release_path(path);
1324 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1325 struct btrfs_root *root,
1326 struct btrfs_path *path,
1327 int refs_to_drop, int *last_ref)
1329 struct btrfs_key key;
1330 struct btrfs_extent_data_ref *ref1 = NULL;
1331 struct btrfs_shared_data_ref *ref2 = NULL;
1332 struct extent_buffer *leaf;
1336 leaf = path->nodes[0];
1337 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1339 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1340 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1341 struct btrfs_extent_data_ref);
1342 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1343 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1344 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1345 struct btrfs_shared_data_ref);
1346 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1348 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1349 struct btrfs_extent_ref_v0 *ref0;
1350 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1351 struct btrfs_extent_ref_v0);
1352 num_refs = btrfs_ref_count_v0(leaf, ref0);
1358 BUG_ON(num_refs < refs_to_drop);
1359 num_refs -= refs_to_drop;
1361 if (num_refs == 0) {
1362 ret = btrfs_del_item(trans, root, path);
1365 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1366 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1367 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1368 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1369 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1371 struct btrfs_extent_ref_v0 *ref0;
1372 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1373 struct btrfs_extent_ref_v0);
1374 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1377 btrfs_mark_buffer_dirty(leaf);
1382 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1383 struct btrfs_extent_inline_ref *iref)
1385 struct btrfs_key key;
1386 struct extent_buffer *leaf;
1387 struct btrfs_extent_data_ref *ref1;
1388 struct btrfs_shared_data_ref *ref2;
1391 leaf = path->nodes[0];
1392 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1394 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1395 BTRFS_EXTENT_DATA_REF_KEY) {
1396 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1397 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1399 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1400 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1402 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1403 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1404 struct btrfs_extent_data_ref);
1405 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1406 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1407 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1408 struct btrfs_shared_data_ref);
1409 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1411 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1412 struct btrfs_extent_ref_v0 *ref0;
1413 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1414 struct btrfs_extent_ref_v0);
1415 num_refs = btrfs_ref_count_v0(leaf, ref0);
1423 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1424 struct btrfs_root *root,
1425 struct btrfs_path *path,
1426 u64 bytenr, u64 parent,
1429 struct btrfs_key key;
1432 key.objectid = bytenr;
1434 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1435 key.offset = parent;
1437 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1438 key.offset = root_objectid;
1441 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1444 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1445 if (ret == -ENOENT && parent) {
1446 btrfs_release_path(path);
1447 key.type = BTRFS_EXTENT_REF_V0_KEY;
1448 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1456 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1457 struct btrfs_root *root,
1458 struct btrfs_path *path,
1459 u64 bytenr, u64 parent,
1462 struct btrfs_key key;
1465 key.objectid = bytenr;
1467 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1468 key.offset = parent;
1470 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1471 key.offset = root_objectid;
1474 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1475 btrfs_release_path(path);
1479 static inline int extent_ref_type(u64 parent, u64 owner)
1482 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1484 type = BTRFS_SHARED_BLOCK_REF_KEY;
1486 type = BTRFS_TREE_BLOCK_REF_KEY;
1489 type = BTRFS_SHARED_DATA_REF_KEY;
1491 type = BTRFS_EXTENT_DATA_REF_KEY;
1496 static int find_next_key(struct btrfs_path *path, int level,
1497 struct btrfs_key *key)
1500 for (; level < BTRFS_MAX_LEVEL; level++) {
1501 if (!path->nodes[level])
1503 if (path->slots[level] + 1 >=
1504 btrfs_header_nritems(path->nodes[level]))
1507 btrfs_item_key_to_cpu(path->nodes[level], key,
1508 path->slots[level] + 1);
1510 btrfs_node_key_to_cpu(path->nodes[level], key,
1511 path->slots[level] + 1);
1518 * look for inline back ref. if back ref is found, *ref_ret is set
1519 * to the address of inline back ref, and 0 is returned.
1521 * if back ref isn't found, *ref_ret is set to the address where it
1522 * should be inserted, and -ENOENT is returned.
1524 * if insert is true and there are too many inline back refs, the path
1525 * points to the extent item, and -EAGAIN is returned.
1527 * NOTE: inline back refs are ordered in the same way that back ref
1528 * items in the tree are ordered.
1530 static noinline_for_stack
1531 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1532 struct btrfs_root *root,
1533 struct btrfs_path *path,
1534 struct btrfs_extent_inline_ref **ref_ret,
1535 u64 bytenr, u64 num_bytes,
1536 u64 parent, u64 root_objectid,
1537 u64 owner, u64 offset, int insert)
1539 struct btrfs_key key;
1540 struct extent_buffer *leaf;
1541 struct btrfs_extent_item *ei;
1542 struct btrfs_extent_inline_ref *iref;
1552 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1555 key.objectid = bytenr;
1556 key.type = BTRFS_EXTENT_ITEM_KEY;
1557 key.offset = num_bytes;
1559 want = extent_ref_type(parent, owner);
1561 extra_size = btrfs_extent_inline_ref_size(want);
1562 path->keep_locks = 1;
1567 * Owner is our parent level, so we can just add one to get the level
1568 * for the block we are interested in.
1570 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1571 key.type = BTRFS_METADATA_ITEM_KEY;
1576 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1583 * We may be a newly converted file system which still has the old fat
1584 * extent entries for metadata, so try and see if we have one of those.
1586 if (ret > 0 && skinny_metadata) {
1587 skinny_metadata = false;
1588 if (path->slots[0]) {
1590 btrfs_item_key_to_cpu(path->nodes[0], &key,
1592 if (key.objectid == bytenr &&
1593 key.type == BTRFS_EXTENT_ITEM_KEY &&
1594 key.offset == num_bytes)
1598 key.objectid = bytenr;
1599 key.type = BTRFS_EXTENT_ITEM_KEY;
1600 key.offset = num_bytes;
1601 btrfs_release_path(path);
1606 if (ret && !insert) {
1609 } else if (WARN_ON(ret)) {
1614 leaf = path->nodes[0];
1615 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1616 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1617 if (item_size < sizeof(*ei)) {
1622 ret = convert_extent_item_v0(trans, root, path, owner,
1628 leaf = path->nodes[0];
1629 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1632 BUG_ON(item_size < sizeof(*ei));
1634 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1635 flags = btrfs_extent_flags(leaf, ei);
1637 ptr = (unsigned long)(ei + 1);
1638 end = (unsigned long)ei + item_size;
1640 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1641 ptr += sizeof(struct btrfs_tree_block_info);
1651 iref = (struct btrfs_extent_inline_ref *)ptr;
1652 type = btrfs_extent_inline_ref_type(leaf, iref);
1656 ptr += btrfs_extent_inline_ref_size(type);
1660 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1661 struct btrfs_extent_data_ref *dref;
1662 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1663 if (match_extent_data_ref(leaf, dref, root_objectid,
1668 if (hash_extent_data_ref_item(leaf, dref) <
1669 hash_extent_data_ref(root_objectid, owner, offset))
1673 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1675 if (parent == ref_offset) {
1679 if (ref_offset < parent)
1682 if (root_objectid == ref_offset) {
1686 if (ref_offset < root_objectid)
1690 ptr += btrfs_extent_inline_ref_size(type);
1692 if (err == -ENOENT && insert) {
1693 if (item_size + extra_size >=
1694 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1699 * To add new inline back ref, we have to make sure
1700 * there is no corresponding back ref item.
1701 * For simplicity, we just do not add new inline back
1702 * ref if there is any kind of item for this block
1704 if (find_next_key(path, 0, &key) == 0 &&
1705 key.objectid == bytenr &&
1706 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1711 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1714 path->keep_locks = 0;
1715 btrfs_unlock_up_safe(path, 1);
1721 * helper to add new inline back ref
1723 static noinline_for_stack
1724 void setup_inline_extent_backref(struct btrfs_root *root,
1725 struct btrfs_path *path,
1726 struct btrfs_extent_inline_ref *iref,
1727 u64 parent, u64 root_objectid,
1728 u64 owner, u64 offset, int refs_to_add,
1729 struct btrfs_delayed_extent_op *extent_op)
1731 struct extent_buffer *leaf;
1732 struct btrfs_extent_item *ei;
1735 unsigned long item_offset;
1740 leaf = path->nodes[0];
1741 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1742 item_offset = (unsigned long)iref - (unsigned long)ei;
1744 type = extent_ref_type(parent, owner);
1745 size = btrfs_extent_inline_ref_size(type);
1747 btrfs_extend_item(root, path, size);
1749 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1750 refs = btrfs_extent_refs(leaf, ei);
1751 refs += refs_to_add;
1752 btrfs_set_extent_refs(leaf, ei, refs);
1754 __run_delayed_extent_op(extent_op, leaf, ei);
1756 ptr = (unsigned long)ei + item_offset;
1757 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1758 if (ptr < end - size)
1759 memmove_extent_buffer(leaf, ptr + size, ptr,
1762 iref = (struct btrfs_extent_inline_ref *)ptr;
1763 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1764 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1765 struct btrfs_extent_data_ref *dref;
1766 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1767 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1768 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1769 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1770 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1771 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1772 struct btrfs_shared_data_ref *sref;
1773 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1774 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1775 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1776 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1777 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1779 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1781 btrfs_mark_buffer_dirty(leaf);
1784 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1785 struct btrfs_root *root,
1786 struct btrfs_path *path,
1787 struct btrfs_extent_inline_ref **ref_ret,
1788 u64 bytenr, u64 num_bytes, u64 parent,
1789 u64 root_objectid, u64 owner, u64 offset)
1793 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1794 bytenr, num_bytes, parent,
1795 root_objectid, owner, offset, 0);
1799 btrfs_release_path(path);
1802 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1803 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1806 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1807 root_objectid, owner, offset);
1813 * helper to update/remove inline back ref
1815 static noinline_for_stack
1816 void update_inline_extent_backref(struct btrfs_root *root,
1817 struct btrfs_path *path,
1818 struct btrfs_extent_inline_ref *iref,
1820 struct btrfs_delayed_extent_op *extent_op,
1823 struct extent_buffer *leaf;
1824 struct btrfs_extent_item *ei;
1825 struct btrfs_extent_data_ref *dref = NULL;
1826 struct btrfs_shared_data_ref *sref = NULL;
1834 leaf = path->nodes[0];
1835 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1836 refs = btrfs_extent_refs(leaf, ei);
1837 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1838 refs += refs_to_mod;
1839 btrfs_set_extent_refs(leaf, ei, refs);
1841 __run_delayed_extent_op(extent_op, leaf, ei);
1843 type = btrfs_extent_inline_ref_type(leaf, iref);
1845 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1846 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1847 refs = btrfs_extent_data_ref_count(leaf, dref);
1848 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1849 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1850 refs = btrfs_shared_data_ref_count(leaf, sref);
1853 BUG_ON(refs_to_mod != -1);
1856 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1857 refs += refs_to_mod;
1860 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1861 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1863 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1866 size = btrfs_extent_inline_ref_size(type);
1867 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1868 ptr = (unsigned long)iref;
1869 end = (unsigned long)ei + item_size;
1870 if (ptr + size < end)
1871 memmove_extent_buffer(leaf, ptr, ptr + size,
1874 btrfs_truncate_item(root, path, item_size, 1);
1876 btrfs_mark_buffer_dirty(leaf);
1879 static noinline_for_stack
1880 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1881 struct btrfs_root *root,
1882 struct btrfs_path *path,
1883 u64 bytenr, u64 num_bytes, u64 parent,
1884 u64 root_objectid, u64 owner,
1885 u64 offset, int refs_to_add,
1886 struct btrfs_delayed_extent_op *extent_op)
1888 struct btrfs_extent_inline_ref *iref;
1891 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1892 bytenr, num_bytes, parent,
1893 root_objectid, owner, offset, 1);
1895 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1896 update_inline_extent_backref(root, path, iref,
1897 refs_to_add, extent_op, NULL);
1898 } else if (ret == -ENOENT) {
1899 setup_inline_extent_backref(root, path, iref, parent,
1900 root_objectid, owner, offset,
1901 refs_to_add, extent_op);
1907 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1908 struct btrfs_root *root,
1909 struct btrfs_path *path,
1910 u64 bytenr, u64 parent, u64 root_objectid,
1911 u64 owner, u64 offset, int refs_to_add)
1914 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1915 BUG_ON(refs_to_add != 1);
1916 ret = insert_tree_block_ref(trans, root, path, bytenr,
1917 parent, root_objectid);
1919 ret = insert_extent_data_ref(trans, root, path, bytenr,
1920 parent, root_objectid,
1921 owner, offset, refs_to_add);
1926 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1927 struct btrfs_root *root,
1928 struct btrfs_path *path,
1929 struct btrfs_extent_inline_ref *iref,
1930 int refs_to_drop, int is_data, int *last_ref)
1934 BUG_ON(!is_data && refs_to_drop != 1);
1936 update_inline_extent_backref(root, path, iref,
1937 -refs_to_drop, NULL, last_ref);
1938 } else if (is_data) {
1939 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1943 ret = btrfs_del_item(trans, root, path);
1948 #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
1949 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1950 u64 *discarded_bytes)
1953 u64 bytes_left, end;
1954 u64 aligned_start = ALIGN(start, 1 << 9);
1956 if (WARN_ON(start != aligned_start)) {
1957 len -= aligned_start - start;
1958 len = round_down(len, 1 << 9);
1959 start = aligned_start;
1962 *discarded_bytes = 0;
1970 /* Skip any superblocks on this device. */
1971 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1972 u64 sb_start = btrfs_sb_offset(j);
1973 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1974 u64 size = sb_start - start;
1976 if (!in_range(sb_start, start, bytes_left) &&
1977 !in_range(sb_end, start, bytes_left) &&
1978 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1982 * Superblock spans beginning of range. Adjust start and
1985 if (sb_start <= start) {
1986 start += sb_end - start;
1991 bytes_left = end - start;
1996 ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1999 *discarded_bytes += size;
2000 else if (ret != -EOPNOTSUPP)
2009 bytes_left = end - start;
2013 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2016 *discarded_bytes += bytes_left;
2021 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2022 u64 num_bytes, u64 *actual_bytes)
2025 u64 discarded_bytes = 0;
2026 struct btrfs_bio *bbio = NULL;
2029 /* Tell the block device(s) that the sectors can be discarded */
2030 ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2031 bytenr, &num_bytes, &bbio, 0);
2032 /* Error condition is -ENOMEM */
2034 struct btrfs_bio_stripe *stripe = bbio->stripes;
2038 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2040 if (!stripe->dev->can_discard)
2043 ret = btrfs_issue_discard(stripe->dev->bdev,
2048 discarded_bytes += bytes;
2049 else if (ret != -EOPNOTSUPP)
2050 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2053 * Just in case we get back EOPNOTSUPP for some reason,
2054 * just ignore the return value so we don't screw up
2055 * people calling discard_extent.
2059 btrfs_put_bbio(bbio);
2063 *actual_bytes = discarded_bytes;
2066 if (ret == -EOPNOTSUPP)
2071 /* Can return -ENOMEM */
2072 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2073 struct btrfs_root *root,
2074 u64 bytenr, u64 num_bytes, u64 parent,
2075 u64 root_objectid, u64 owner, u64 offset)
2078 struct btrfs_fs_info *fs_info = root->fs_info;
2080 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2081 root_objectid == BTRFS_TREE_LOG_OBJECTID);
2083 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2084 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2086 parent, root_objectid, (int)owner,
2087 BTRFS_ADD_DELAYED_REF, NULL);
2089 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2090 num_bytes, parent, root_objectid,
2092 BTRFS_ADD_DELAYED_REF, NULL);
2097 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2098 struct btrfs_root *root,
2099 struct btrfs_delayed_ref_node *node,
2100 u64 parent, u64 root_objectid,
2101 u64 owner, u64 offset, int refs_to_add,
2102 struct btrfs_delayed_extent_op *extent_op)
2104 struct btrfs_fs_info *fs_info = root->fs_info;
2105 struct btrfs_path *path;
2106 struct extent_buffer *leaf;
2107 struct btrfs_extent_item *item;
2108 struct btrfs_key key;
2109 u64 bytenr = node->bytenr;
2110 u64 num_bytes = node->num_bytes;
2114 path = btrfs_alloc_path();
2119 path->leave_spinning = 1;
2120 /* this will setup the path even if it fails to insert the back ref */
2121 ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2122 bytenr, num_bytes, parent,
2123 root_objectid, owner, offset,
2124 refs_to_add, extent_op);
2125 if ((ret < 0 && ret != -EAGAIN) || !ret)
2129 * Ok we had -EAGAIN which means we didn't have space to insert and
2130 * inline extent ref, so just update the reference count and add a
2133 leaf = path->nodes[0];
2134 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2135 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2136 refs = btrfs_extent_refs(leaf, item);
2137 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2139 __run_delayed_extent_op(extent_op, leaf, item);
2141 btrfs_mark_buffer_dirty(leaf);
2142 btrfs_release_path(path);
2145 path->leave_spinning = 1;
2146 /* now insert the actual backref */
2147 ret = insert_extent_backref(trans, root->fs_info->extent_root,
2148 path, bytenr, parent, root_objectid,
2149 owner, offset, refs_to_add);
2151 btrfs_abort_transaction(trans, root, ret);
2153 btrfs_free_path(path);
2157 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2158 struct btrfs_root *root,
2159 struct btrfs_delayed_ref_node *node,
2160 struct btrfs_delayed_extent_op *extent_op,
2161 int insert_reserved)
2164 struct btrfs_delayed_data_ref *ref;
2165 struct btrfs_key ins;
2170 ins.objectid = node->bytenr;
2171 ins.offset = node->num_bytes;
2172 ins.type = BTRFS_EXTENT_ITEM_KEY;
2174 ref = btrfs_delayed_node_to_data_ref(node);
2175 trace_run_delayed_data_ref(node, ref, node->action);
2177 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2178 parent = ref->parent;
2179 ref_root = ref->root;
2181 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2183 flags |= extent_op->flags_to_set;
2184 ret = alloc_reserved_file_extent(trans, root,
2185 parent, ref_root, flags,
2186 ref->objectid, ref->offset,
2187 &ins, node->ref_mod);
2188 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2189 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2190 ref_root, ref->objectid,
2191 ref->offset, node->ref_mod,
2193 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2194 ret = __btrfs_free_extent(trans, root, node, parent,
2195 ref_root, ref->objectid,
2196 ref->offset, node->ref_mod,
2204 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2205 struct extent_buffer *leaf,
2206 struct btrfs_extent_item *ei)
2208 u64 flags = btrfs_extent_flags(leaf, ei);
2209 if (extent_op->update_flags) {
2210 flags |= extent_op->flags_to_set;
2211 btrfs_set_extent_flags(leaf, ei, flags);
2214 if (extent_op->update_key) {
2215 struct btrfs_tree_block_info *bi;
2216 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2217 bi = (struct btrfs_tree_block_info *)(ei + 1);
2218 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2222 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2223 struct btrfs_root *root,
2224 struct btrfs_delayed_ref_node *node,
2225 struct btrfs_delayed_extent_op *extent_op)
2227 struct btrfs_key key;
2228 struct btrfs_path *path;
2229 struct btrfs_extent_item *ei;
2230 struct extent_buffer *leaf;
2234 int metadata = !extent_op->is_data;
2239 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2242 path = btrfs_alloc_path();
2246 key.objectid = node->bytenr;
2249 key.type = BTRFS_METADATA_ITEM_KEY;
2250 key.offset = extent_op->level;
2252 key.type = BTRFS_EXTENT_ITEM_KEY;
2253 key.offset = node->num_bytes;
2258 path->leave_spinning = 1;
2259 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2267 if (path->slots[0] > 0) {
2269 btrfs_item_key_to_cpu(path->nodes[0], &key,
2271 if (key.objectid == node->bytenr &&
2272 key.type == BTRFS_EXTENT_ITEM_KEY &&
2273 key.offset == node->num_bytes)
2277 btrfs_release_path(path);
2280 key.objectid = node->bytenr;
2281 key.offset = node->num_bytes;
2282 key.type = BTRFS_EXTENT_ITEM_KEY;
2291 leaf = path->nodes[0];
2292 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2294 if (item_size < sizeof(*ei)) {
2295 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2301 leaf = path->nodes[0];
2302 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2305 BUG_ON(item_size < sizeof(*ei));
2306 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2307 __run_delayed_extent_op(extent_op, leaf, ei);
2309 btrfs_mark_buffer_dirty(leaf);
2311 btrfs_free_path(path);
2315 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2316 struct btrfs_root *root,
2317 struct btrfs_delayed_ref_node *node,
2318 struct btrfs_delayed_extent_op *extent_op,
2319 int insert_reserved)
2322 struct btrfs_delayed_tree_ref *ref;
2323 struct btrfs_key ins;
2326 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2329 ref = btrfs_delayed_node_to_tree_ref(node);
2330 trace_run_delayed_tree_ref(node, ref, node->action);
2332 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2333 parent = ref->parent;
2334 ref_root = ref->root;
2336 ins.objectid = node->bytenr;
2337 if (skinny_metadata) {
2338 ins.offset = ref->level;
2339 ins.type = BTRFS_METADATA_ITEM_KEY;
2341 ins.offset = node->num_bytes;
2342 ins.type = BTRFS_EXTENT_ITEM_KEY;
2345 BUG_ON(node->ref_mod != 1);
2346 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2347 BUG_ON(!extent_op || !extent_op->update_flags);
2348 ret = alloc_reserved_tree_block(trans, root,
2350 extent_op->flags_to_set,
2353 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2354 ret = __btrfs_inc_extent_ref(trans, root, node,
2358 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2359 ret = __btrfs_free_extent(trans, root, node,
2361 ref->level, 0, 1, extent_op);
2368 /* helper function to actually process a single delayed ref entry */
2369 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2370 struct btrfs_root *root,
2371 struct btrfs_delayed_ref_node *node,
2372 struct btrfs_delayed_extent_op *extent_op,
2373 int insert_reserved)
2377 if (trans->aborted) {
2378 if (insert_reserved)
2379 btrfs_pin_extent(root, node->bytenr,
2380 node->num_bytes, 1);
2384 if (btrfs_delayed_ref_is_head(node)) {
2385 struct btrfs_delayed_ref_head *head;
2387 * we've hit the end of the chain and we were supposed
2388 * to insert this extent into the tree. But, it got
2389 * deleted before we ever needed to insert it, so all
2390 * we have to do is clean up the accounting
2393 head = btrfs_delayed_node_to_head(node);
2394 trace_run_delayed_ref_head(node, head, node->action);
2396 if (insert_reserved) {
2397 btrfs_pin_extent(root, node->bytenr,
2398 node->num_bytes, 1);
2399 if (head->is_data) {
2400 ret = btrfs_del_csums(trans, root,
2406 /* Also free its reserved qgroup space */
2407 btrfs_qgroup_free_delayed_ref(root->fs_info,
2408 head->qgroup_ref_root,
2409 head->qgroup_reserved);
2413 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2414 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2415 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2417 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2418 node->type == BTRFS_SHARED_DATA_REF_KEY)
2419 ret = run_delayed_data_ref(trans, root, node, extent_op,
2426 static inline struct btrfs_delayed_ref_node *
2427 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2429 struct btrfs_delayed_ref_node *ref;
2431 if (list_empty(&head->ref_list))
2435 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2436 * This is to prevent a ref count from going down to zero, which deletes
2437 * the extent item from the extent tree, when there still are references
2438 * to add, which would fail because they would not find the extent item.
2440 list_for_each_entry(ref, &head->ref_list, list) {
2441 if (ref->action == BTRFS_ADD_DELAYED_REF)
2445 return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2450 * Returns 0 on success or if called with an already aborted transaction.
2451 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2453 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2454 struct btrfs_root *root,
2457 struct btrfs_delayed_ref_root *delayed_refs;
2458 struct btrfs_delayed_ref_node *ref;
2459 struct btrfs_delayed_ref_head *locked_ref = NULL;
2460 struct btrfs_delayed_extent_op *extent_op;
2461 struct btrfs_fs_info *fs_info = root->fs_info;
2462 ktime_t start = ktime_get();
2464 unsigned long count = 0;
2465 unsigned long actual_count = 0;
2466 int must_insert_reserved = 0;
2468 delayed_refs = &trans->transaction->delayed_refs;
2474 spin_lock(&delayed_refs->lock);
2475 locked_ref = btrfs_select_ref_head(trans);
2477 spin_unlock(&delayed_refs->lock);
2481 /* grab the lock that says we are going to process
2482 * all the refs for this head */
2483 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2484 spin_unlock(&delayed_refs->lock);
2486 * we may have dropped the spin lock to get the head
2487 * mutex lock, and that might have given someone else
2488 * time to free the head. If that's true, it has been
2489 * removed from our list and we can move on.
2491 if (ret == -EAGAIN) {
2499 * We need to try and merge add/drops of the same ref since we
2500 * can run into issues with relocate dropping the implicit ref
2501 * and then it being added back again before the drop can
2502 * finish. If we merged anything we need to re-loop so we can
2504 * Or we can get node references of the same type that weren't
2505 * merged when created due to bumps in the tree mod seq, and
2506 * we need to merge them to prevent adding an inline extent
2507 * backref before dropping it (triggering a BUG_ON at
2508 * insert_inline_extent_backref()).
2510 spin_lock(&locked_ref->lock);
2511 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2515 * locked_ref is the head node, so we have to go one
2516 * node back for any delayed ref updates
2518 ref = select_delayed_ref(locked_ref);
2520 if (ref && ref->seq &&
2521 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2522 spin_unlock(&locked_ref->lock);
2523 spin_lock(&delayed_refs->lock);
2524 locked_ref->processing = 0;
2525 delayed_refs->num_heads_ready++;
2526 spin_unlock(&delayed_refs->lock);
2527 btrfs_delayed_ref_unlock(locked_ref);
2535 * record the must insert reserved flag before we
2536 * drop the spin lock.
2538 must_insert_reserved = locked_ref->must_insert_reserved;
2539 locked_ref->must_insert_reserved = 0;
2541 extent_op = locked_ref->extent_op;
2542 locked_ref->extent_op = NULL;
2547 /* All delayed refs have been processed, Go ahead
2548 * and send the head node to run_one_delayed_ref,
2549 * so that any accounting fixes can happen
2551 ref = &locked_ref->node;
2553 if (extent_op && must_insert_reserved) {
2554 btrfs_free_delayed_extent_op(extent_op);
2559 spin_unlock(&locked_ref->lock);
2560 ret = run_delayed_extent_op(trans, root,
2562 btrfs_free_delayed_extent_op(extent_op);
2566 * Need to reset must_insert_reserved if
2567 * there was an error so the abort stuff
2568 * can cleanup the reserved space
2571 if (must_insert_reserved)
2572 locked_ref->must_insert_reserved = 1;
2573 spin_lock(&delayed_refs->lock);
2574 locked_ref->processing = 0;
2575 delayed_refs->num_heads_ready++;
2576 spin_unlock(&delayed_refs->lock);
2577 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2578 btrfs_delayed_ref_unlock(locked_ref);
2585 * Need to drop our head ref lock and re-aqcuire the
2586 * delayed ref lock and then re-check to make sure
2589 spin_unlock(&locked_ref->lock);
2590 spin_lock(&delayed_refs->lock);
2591 spin_lock(&locked_ref->lock);
2592 if (!list_empty(&locked_ref->ref_list) ||
2593 locked_ref->extent_op) {
2594 spin_unlock(&locked_ref->lock);
2595 spin_unlock(&delayed_refs->lock);
2599 delayed_refs->num_heads--;
2600 rb_erase(&locked_ref->href_node,
2601 &delayed_refs->href_root);
2602 spin_unlock(&delayed_refs->lock);
2606 list_del(&ref->list);
2608 atomic_dec(&delayed_refs->num_entries);
2610 if (!btrfs_delayed_ref_is_head(ref)) {
2612 * when we play the delayed ref, also correct the
2615 switch (ref->action) {
2616 case BTRFS_ADD_DELAYED_REF:
2617 case BTRFS_ADD_DELAYED_EXTENT:
2618 locked_ref->node.ref_mod -= ref->ref_mod;
2620 case BTRFS_DROP_DELAYED_REF:
2621 locked_ref->node.ref_mod += ref->ref_mod;
2627 spin_unlock(&locked_ref->lock);
2629 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2630 must_insert_reserved);
2632 btrfs_free_delayed_extent_op(extent_op);
2634 locked_ref->processing = 0;
2635 btrfs_delayed_ref_unlock(locked_ref);
2636 btrfs_put_delayed_ref(ref);
2637 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2642 * If this node is a head, that means all the refs in this head
2643 * have been dealt with, and we will pick the next head to deal
2644 * with, so we must unlock the head and drop it from the cluster
2645 * list before we release it.
2647 if (btrfs_delayed_ref_is_head(ref)) {
2648 if (locked_ref->is_data &&
2649 locked_ref->total_ref_mod < 0) {
2650 spin_lock(&delayed_refs->lock);
2651 delayed_refs->pending_csums -= ref->num_bytes;
2652 spin_unlock(&delayed_refs->lock);
2654 btrfs_delayed_ref_unlock(locked_ref);
2657 btrfs_put_delayed_ref(ref);
2663 * We don't want to include ref heads since we can have empty ref heads
2664 * and those will drastically skew our runtime down since we just do
2665 * accounting, no actual extent tree updates.
2667 if (actual_count > 0) {
2668 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2672 * We weigh the current average higher than our current runtime
2673 * to avoid large swings in the average.
2675 spin_lock(&delayed_refs->lock);
2676 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2677 fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
2678 spin_unlock(&delayed_refs->lock);
2683 #ifdef SCRAMBLE_DELAYED_REFS
2685 * Normally delayed refs get processed in ascending bytenr order. This
2686 * correlates in most cases to the order added. To expose dependencies on this
2687 * order, we start to process the tree in the middle instead of the beginning
2689 static u64 find_middle(struct rb_root *root)
2691 struct rb_node *n = root->rb_node;
2692 struct btrfs_delayed_ref_node *entry;
2695 u64 first = 0, last = 0;
2699 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2700 first = entry->bytenr;
2704 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2705 last = entry->bytenr;
2710 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2711 WARN_ON(!entry->in_tree);
2713 middle = entry->bytenr;
2726 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2730 num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2731 sizeof(struct btrfs_extent_inline_ref));
2732 if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2733 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2736 * We don't ever fill up leaves all the way so multiply by 2 just to be
2737 * closer to what we're really going to want to ouse.
2739 return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2743 * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2744 * would require to store the csums for that many bytes.
2746 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2749 u64 num_csums_per_leaf;
2752 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2753 num_csums_per_leaf = div64_u64(csum_size,
2754 (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2755 num_csums = div64_u64(csum_bytes, root->sectorsize);
2756 num_csums += num_csums_per_leaf - 1;
2757 num_csums = div64_u64(num_csums, num_csums_per_leaf);
2761 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2762 struct btrfs_root *root)
2764 struct btrfs_block_rsv *global_rsv;
2765 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2766 u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2767 u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2768 u64 num_bytes, num_dirty_bgs_bytes;
2771 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2772 num_heads = heads_to_leaves(root, num_heads);
2774 num_bytes += (num_heads - 1) * root->nodesize;
2776 num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2777 num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2779 global_rsv = &root->fs_info->global_block_rsv;
2782 * If we can't allocate any more chunks lets make sure we have _lots_ of
2783 * wiggle room since running delayed refs can create more delayed refs.
2785 if (global_rsv->space_info->full) {
2786 num_dirty_bgs_bytes <<= 1;
2790 spin_lock(&global_rsv->lock);
2791 if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2793 spin_unlock(&global_rsv->lock);
2797 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2798 struct btrfs_root *root)
2800 struct btrfs_fs_info *fs_info = root->fs_info;
2802 atomic_read(&trans->transaction->delayed_refs.num_entries);
2807 avg_runtime = fs_info->avg_delayed_ref_runtime;
2808 val = num_entries * avg_runtime;
2809 if (num_entries * avg_runtime >= NSEC_PER_SEC)
2811 if (val >= NSEC_PER_SEC / 2)
2814 return btrfs_check_space_for_delayed_refs(trans, root);
2817 struct async_delayed_refs {
2818 struct btrfs_root *root;
2822 struct completion wait;
2823 struct btrfs_work work;
2826 static void delayed_ref_async_start(struct btrfs_work *work)
2828 struct async_delayed_refs *async;
2829 struct btrfs_trans_handle *trans;
2832 async = container_of(work, struct async_delayed_refs, work);
2834 trans = btrfs_join_transaction(async->root);
2835 if (IS_ERR(trans)) {
2836 async->error = PTR_ERR(trans);
2841 * trans->sync means that when we call end_transaciton, we won't
2842 * wait on delayed refs
2845 ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2849 ret = btrfs_end_transaction(trans, async->root);
2850 if (ret && !async->error)
2854 complete(&async->wait);
2859 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2860 unsigned long count, int wait)
2862 struct async_delayed_refs *async;
2865 async = kmalloc(sizeof(*async), GFP_NOFS);
2869 async->root = root->fs_info->tree_root;
2870 async->count = count;
2876 init_completion(&async->wait);
2878 btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2879 delayed_ref_async_start, NULL, NULL);
2881 btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2884 wait_for_completion(&async->wait);
2893 * this starts processing the delayed reference count updates and
2894 * extent insertions we have queued up so far. count can be
2895 * 0, which means to process everything in the tree at the start
2896 * of the run (but not newly added entries), or it can be some target
2897 * number you'd like to process.
2899 * Returns 0 on success or if called with an aborted transaction
2900 * Returns <0 on error and aborts the transaction
2902 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2903 struct btrfs_root *root, unsigned long count)
2905 struct rb_node *node;
2906 struct btrfs_delayed_ref_root *delayed_refs;
2907 struct btrfs_delayed_ref_head *head;
2909 int run_all = count == (unsigned long)-1;
2910 bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2912 /* We'll clean this up in btrfs_cleanup_transaction */
2916 if (root == root->fs_info->extent_root)
2917 root = root->fs_info->tree_root;
2919 delayed_refs = &trans->transaction->delayed_refs;
2921 count = atomic_read(&delayed_refs->num_entries) * 2;
2924 #ifdef SCRAMBLE_DELAYED_REFS
2925 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2927 trans->can_flush_pending_bgs = false;
2928 ret = __btrfs_run_delayed_refs(trans, root, count);
2930 btrfs_abort_transaction(trans, root, ret);
2935 if (!list_empty(&trans->new_bgs))
2936 btrfs_create_pending_block_groups(trans, root);
2938 spin_lock(&delayed_refs->lock);
2939 node = rb_first(&delayed_refs->href_root);
2941 spin_unlock(&delayed_refs->lock);
2944 count = (unsigned long)-1;
2947 head = rb_entry(node, struct btrfs_delayed_ref_head,
2949 if (btrfs_delayed_ref_is_head(&head->node)) {
2950 struct btrfs_delayed_ref_node *ref;
2953 atomic_inc(&ref->refs);
2955 spin_unlock(&delayed_refs->lock);
2957 * Mutex was contended, block until it's
2958 * released and try again
2960 mutex_lock(&head->mutex);
2961 mutex_unlock(&head->mutex);
2963 btrfs_put_delayed_ref(ref);
2969 node = rb_next(node);
2971 spin_unlock(&delayed_refs->lock);
2976 assert_qgroups_uptodate(trans);
2977 trans->can_flush_pending_bgs = can_flush_pending_bgs;
2981 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2982 struct btrfs_root *root,
2983 u64 bytenr, u64 num_bytes, u64 flags,
2984 int level, int is_data)
2986 struct btrfs_delayed_extent_op *extent_op;
2989 extent_op = btrfs_alloc_delayed_extent_op();
2993 extent_op->flags_to_set = flags;
2994 extent_op->update_flags = 1;
2995 extent_op->update_key = 0;
2996 extent_op->is_data = is_data ? 1 : 0;
2997 extent_op->level = level;
2999 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3000 num_bytes, extent_op);
3002 btrfs_free_delayed_extent_op(extent_op);
3006 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3007 struct btrfs_root *root,
3008 struct btrfs_path *path,
3009 u64 objectid, u64 offset, u64 bytenr)
3011 struct btrfs_delayed_ref_head *head;
3012 struct btrfs_delayed_ref_node *ref;
3013 struct btrfs_delayed_data_ref *data_ref;
3014 struct btrfs_delayed_ref_root *delayed_refs;
3017 delayed_refs = &trans->transaction->delayed_refs;
3018 spin_lock(&delayed_refs->lock);
3019 head = btrfs_find_delayed_ref_head(trans, bytenr);
3021 spin_unlock(&delayed_refs->lock);
3025 if (!mutex_trylock(&head->mutex)) {
3026 atomic_inc(&head->node.refs);
3027 spin_unlock(&delayed_refs->lock);
3029 btrfs_release_path(path);
3032 * Mutex was contended, block until it's released and let
3035 mutex_lock(&head->mutex);
3036 mutex_unlock(&head->mutex);
3037 btrfs_put_delayed_ref(&head->node);
3040 spin_unlock(&delayed_refs->lock);
3042 spin_lock(&head->lock);
3043 list_for_each_entry(ref, &head->ref_list, list) {
3044 /* If it's a shared ref we know a cross reference exists */
3045 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3050 data_ref = btrfs_delayed_node_to_data_ref(ref);
3053 * If our ref doesn't match the one we're currently looking at
3054 * then we have a cross reference.
3056 if (data_ref->root != root->root_key.objectid ||
3057 data_ref->objectid != objectid ||
3058 data_ref->offset != offset) {
3063 spin_unlock(&head->lock);
3064 mutex_unlock(&head->mutex);
3068 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3069 struct btrfs_root *root,
3070 struct btrfs_path *path,
3071 u64 objectid, u64 offset, u64 bytenr)
3073 struct btrfs_root *extent_root = root->fs_info->extent_root;
3074 struct extent_buffer *leaf;
3075 struct btrfs_extent_data_ref *ref;
3076 struct btrfs_extent_inline_ref *iref;
3077 struct btrfs_extent_item *ei;
3078 struct btrfs_key key;
3082 key.objectid = bytenr;
3083 key.offset = (u64)-1;
3084 key.type = BTRFS_EXTENT_ITEM_KEY;
3086 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3089 BUG_ON(ret == 0); /* Corruption */
3092 if (path->slots[0] == 0)
3096 leaf = path->nodes[0];
3097 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3099 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3103 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3104 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3105 if (item_size < sizeof(*ei)) {
3106 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3110 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3112 if (item_size != sizeof(*ei) +
3113 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3116 if (btrfs_extent_generation(leaf, ei) <=
3117 btrfs_root_last_snapshot(&root->root_item))
3120 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3121 if (btrfs_extent_inline_ref_type(leaf, iref) !=
3122 BTRFS_EXTENT_DATA_REF_KEY)
3125 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3126 if (btrfs_extent_refs(leaf, ei) !=
3127 btrfs_extent_data_ref_count(leaf, ref) ||
3128 btrfs_extent_data_ref_root(leaf, ref) !=
3129 root->root_key.objectid ||
3130 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3131 btrfs_extent_data_ref_offset(leaf, ref) != offset)
3139 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3140 struct btrfs_root *root,
3141 u64 objectid, u64 offset, u64 bytenr)
3143 struct btrfs_path *path;
3147 path = btrfs_alloc_path();
3152 ret = check_committed_ref(trans, root, path, objectid,
3154 if (ret && ret != -ENOENT)
3157 ret2 = check_delayed_ref(trans, root, path, objectid,
3159 } while (ret2 == -EAGAIN);
3161 if (ret2 && ret2 != -ENOENT) {
3166 if (ret != -ENOENT || ret2 != -ENOENT)
3169 btrfs_free_path(path);
3170 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3175 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3176 struct btrfs_root *root,
3177 struct extent_buffer *buf,
3178 int full_backref, int inc)
3185 struct btrfs_key key;
3186 struct btrfs_file_extent_item *fi;
3190 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3191 u64, u64, u64, u64, u64, u64);
3194 if (btrfs_test_is_dummy_root(root))
3197 ref_root = btrfs_header_owner(buf);
3198 nritems = btrfs_header_nritems(buf);
3199 level = btrfs_header_level(buf);
3201 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3205 process_func = btrfs_inc_extent_ref;
3207 process_func = btrfs_free_extent;
3210 parent = buf->start;
3214 for (i = 0; i < nritems; i++) {
3216 btrfs_item_key_to_cpu(buf, &key, i);
3217 if (key.type != BTRFS_EXTENT_DATA_KEY)
3219 fi = btrfs_item_ptr(buf, i,
3220 struct btrfs_file_extent_item);
3221 if (btrfs_file_extent_type(buf, fi) ==
3222 BTRFS_FILE_EXTENT_INLINE)
3224 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3228 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3229 key.offset -= btrfs_file_extent_offset(buf, fi);
3230 ret = process_func(trans, root, bytenr, num_bytes,
3231 parent, ref_root, key.objectid,
3236 bytenr = btrfs_node_blockptr(buf, i);
3237 num_bytes = root->nodesize;
3238 ret = process_func(trans, root, bytenr, num_bytes,
3239 parent, ref_root, level - 1, 0);
3249 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3250 struct extent_buffer *buf, int full_backref)
3252 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3255 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3256 struct extent_buffer *buf, int full_backref)
3258 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3261 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3262 struct btrfs_root *root,
3263 struct btrfs_path *path,
3264 struct btrfs_block_group_cache *cache)
3267 struct btrfs_root *extent_root = root->fs_info->extent_root;
3269 struct extent_buffer *leaf;
3271 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3278 leaf = path->nodes[0];
3279 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3280 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3281 btrfs_mark_buffer_dirty(leaf);
3283 btrfs_release_path(path);
3288 static struct btrfs_block_group_cache *
3289 next_block_group(struct btrfs_root *root,
3290 struct btrfs_block_group_cache *cache)
3292 struct rb_node *node;
3294 spin_lock(&root->fs_info->block_group_cache_lock);
3296 /* If our block group was removed, we need a full search. */
3297 if (RB_EMPTY_NODE(&cache->cache_node)) {
3298 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3300 spin_unlock(&root->fs_info->block_group_cache_lock);
3301 btrfs_put_block_group(cache);
3302 cache = btrfs_lookup_first_block_group(root->fs_info,
3306 node = rb_next(&cache->cache_node);
3307 btrfs_put_block_group(cache);
3309 cache = rb_entry(node, struct btrfs_block_group_cache,
3311 btrfs_get_block_group(cache);
3314 spin_unlock(&root->fs_info->block_group_cache_lock);
3318 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3319 struct btrfs_trans_handle *trans,
3320 struct btrfs_path *path)
3322 struct btrfs_root *root = block_group->fs_info->tree_root;
3323 struct inode *inode = NULL;
3325 int dcs = BTRFS_DC_ERROR;
3331 * If this block group is smaller than 100 megs don't bother caching the
3334 if (block_group->key.offset < (100 * 1024 * 1024)) {
3335 spin_lock(&block_group->lock);
3336 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3337 spin_unlock(&block_group->lock);
3344 inode = lookup_free_space_inode(root, block_group, path);
3345 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3346 ret = PTR_ERR(inode);
3347 btrfs_release_path(path);
3351 if (IS_ERR(inode)) {
3355 if (block_group->ro)
3358 ret = create_free_space_inode(root, trans, block_group, path);
3364 /* We've already setup this transaction, go ahead and exit */
3365 if (block_group->cache_generation == trans->transid &&
3366 i_size_read(inode)) {
3367 dcs = BTRFS_DC_SETUP;
3372 * We want to set the generation to 0, that way if anything goes wrong
3373 * from here on out we know not to trust this cache when we load up next
3376 BTRFS_I(inode)->generation = 0;
3377 ret = btrfs_update_inode(trans, root, inode);
3380 * So theoretically we could recover from this, simply set the
3381 * super cache generation to 0 so we know to invalidate the
3382 * cache, but then we'd have to keep track of the block groups
3383 * that fail this way so we know we _have_ to reset this cache
3384 * before the next commit or risk reading stale cache. So to
3385 * limit our exposure to horrible edge cases lets just abort the
3386 * transaction, this only happens in really bad situations
3389 btrfs_abort_transaction(trans, root, ret);
3394 if (i_size_read(inode) > 0) {
3395 ret = btrfs_check_trunc_cache_free_space(root,
3396 &root->fs_info->global_block_rsv);
3400 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3405 spin_lock(&block_group->lock);
3406 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3407 !btrfs_test_opt(root, SPACE_CACHE)) {
3409 * don't bother trying to write stuff out _if_
3410 * a) we're not cached,
3411 * b) we're with nospace_cache mount option.
3413 dcs = BTRFS_DC_WRITTEN;
3414 spin_unlock(&block_group->lock);
3417 spin_unlock(&block_group->lock);
3420 * We hit an ENOSPC when setting up the cache in this transaction, just
3421 * skip doing the setup, we've already cleared the cache so we're safe.
3423 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3429 * Try to preallocate enough space based on how big the block group is.
3430 * Keep in mind this has to include any pinned space which could end up
3431 * taking up quite a bit since it's not folded into the other space
3434 num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3439 num_pages *= PAGE_CACHE_SIZE;
3441 ret = btrfs_check_data_free_space(inode, 0, num_pages);
3445 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3446 num_pages, num_pages,
3449 * Our cache requires contiguous chunks so that we don't modify a bunch
3450 * of metadata or split extents when writing the cache out, which means
3451 * we can enospc if we are heavily fragmented in addition to just normal
3452 * out of space conditions. So if we hit this just skip setting up any
3453 * other block groups for this transaction, maybe we'll unpin enough
3454 * space the next time around.
3457 dcs = BTRFS_DC_SETUP;
3458 else if (ret == -ENOSPC)
3459 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3460 btrfs_free_reserved_data_space(inode, 0, num_pages);
3465 btrfs_release_path(path);
3467 spin_lock(&block_group->lock);
3468 if (!ret && dcs == BTRFS_DC_SETUP)
3469 block_group->cache_generation = trans->transid;
3470 block_group->disk_cache_state = dcs;
3471 spin_unlock(&block_group->lock);
3476 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3477 struct btrfs_root *root)
3479 struct btrfs_block_group_cache *cache, *tmp;
3480 struct btrfs_transaction *cur_trans = trans->transaction;
3481 struct btrfs_path *path;
3483 if (list_empty(&cur_trans->dirty_bgs) ||
3484 !btrfs_test_opt(root, SPACE_CACHE))
3487 path = btrfs_alloc_path();
3491 /* Could add new block groups, use _safe just in case */
3492 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3494 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3495 cache_save_setup(cache, trans, path);
3498 btrfs_free_path(path);
3503 * transaction commit does final block group cache writeback during a
3504 * critical section where nothing is allowed to change the FS. This is
3505 * required in order for the cache to actually match the block group,
3506 * but can introduce a lot of latency into the commit.
3508 * So, btrfs_start_dirty_block_groups is here to kick off block group
3509 * cache IO. There's a chance we'll have to redo some of it if the
3510 * block group changes again during the commit, but it greatly reduces
3511 * the commit latency by getting rid of the easy block groups while
3512 * we're still allowing others to join the commit.
3514 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3515 struct btrfs_root *root)
3517 struct btrfs_block_group_cache *cache;
3518 struct btrfs_transaction *cur_trans = trans->transaction;
3521 struct btrfs_path *path = NULL;
3523 struct list_head *io = &cur_trans->io_bgs;
3524 int num_started = 0;
3527 spin_lock(&cur_trans->dirty_bgs_lock);
3528 if (list_empty(&cur_trans->dirty_bgs)) {
3529 spin_unlock(&cur_trans->dirty_bgs_lock);
3532 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3533 spin_unlock(&cur_trans->dirty_bgs_lock);
3537 * make sure all the block groups on our dirty list actually
3540 btrfs_create_pending_block_groups(trans, root);
3543 path = btrfs_alloc_path();
3549 * cache_write_mutex is here only to save us from balance or automatic
3550 * removal of empty block groups deleting this block group while we are
3551 * writing out the cache
3553 mutex_lock(&trans->transaction->cache_write_mutex);
3554 while (!list_empty(&dirty)) {
3555 cache = list_first_entry(&dirty,
3556 struct btrfs_block_group_cache,
3559 * this can happen if something re-dirties a block
3560 * group that is already under IO. Just wait for it to
3561 * finish and then do it all again
3563 if (!list_empty(&cache->io_list)) {
3564 list_del_init(&cache->io_list);
3565 btrfs_wait_cache_io(root, trans, cache,
3566 &cache->io_ctl, path,
3567 cache->key.objectid);
3568 btrfs_put_block_group(cache);
3573 * btrfs_wait_cache_io uses the cache->dirty_list to decide
3574 * if it should update the cache_state. Don't delete
3575 * until after we wait.
3577 * Since we're not running in the commit critical section
3578 * we need the dirty_bgs_lock to protect from update_block_group
3580 spin_lock(&cur_trans->dirty_bgs_lock);
3581 list_del_init(&cache->dirty_list);
3582 spin_unlock(&cur_trans->dirty_bgs_lock);
3586 cache_save_setup(cache, trans, path);
3588 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3589 cache->io_ctl.inode = NULL;
3590 ret = btrfs_write_out_cache(root, trans, cache, path);
3591 if (ret == 0 && cache->io_ctl.inode) {
3596 * the cache_write_mutex is protecting
3599 list_add_tail(&cache->io_list, io);
3602 * if we failed to write the cache, the
3603 * generation will be bad and life goes on
3609 ret = write_one_cache_group(trans, root, path, cache);
3611 * Our block group might still be attached to the list
3612 * of new block groups in the transaction handle of some
3613 * other task (struct btrfs_trans_handle->new_bgs). This
3614 * means its block group item isn't yet in the extent
3615 * tree. If this happens ignore the error, as we will
3616 * try again later in the critical section of the
3617 * transaction commit.
3619 if (ret == -ENOENT) {
3621 spin_lock(&cur_trans->dirty_bgs_lock);
3622 if (list_empty(&cache->dirty_list)) {
3623 list_add_tail(&cache->dirty_list,
3624 &cur_trans->dirty_bgs);
3625 btrfs_get_block_group(cache);
3627 spin_unlock(&cur_trans->dirty_bgs_lock);
3629 btrfs_abort_transaction(trans, root, ret);
3633 /* if its not on the io list, we need to put the block group */
3635 btrfs_put_block_group(cache);
3641 * Avoid blocking other tasks for too long. It might even save
3642 * us from writing caches for block groups that are going to be
3645 mutex_unlock(&trans->transaction->cache_write_mutex);
3646 mutex_lock(&trans->transaction->cache_write_mutex);
3648 mutex_unlock(&trans->transaction->cache_write_mutex);
3651 * go through delayed refs for all the stuff we've just kicked off
3652 * and then loop back (just once)
3654 ret = btrfs_run_delayed_refs(trans, root, 0);
3655 if (!ret && loops == 0) {
3657 spin_lock(&cur_trans->dirty_bgs_lock);
3658 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3660 * dirty_bgs_lock protects us from concurrent block group
3661 * deletes too (not just cache_write_mutex).
3663 if (!list_empty(&dirty)) {
3664 spin_unlock(&cur_trans->dirty_bgs_lock);
3667 spin_unlock(&cur_trans->dirty_bgs_lock);
3670 btrfs_free_path(path);
3674 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3675 struct btrfs_root *root)
3677 struct btrfs_block_group_cache *cache;
3678 struct btrfs_transaction *cur_trans = trans->transaction;
3681 struct btrfs_path *path;
3682 struct list_head *io = &cur_trans->io_bgs;
3683 int num_started = 0;
3685 path = btrfs_alloc_path();
3690 * We don't need the lock here since we are protected by the transaction
3691 * commit. We want to do the cache_save_setup first and then run the
3692 * delayed refs to make sure we have the best chance at doing this all
3695 while (!list_empty(&cur_trans->dirty_bgs)) {
3696 cache = list_first_entry(&cur_trans->dirty_bgs,
3697 struct btrfs_block_group_cache,
3701 * this can happen if cache_save_setup re-dirties a block
3702 * group that is already under IO. Just wait for it to
3703 * finish and then do it all again
3705 if (!list_empty(&cache->io_list)) {
3706 list_del_init(&cache->io_list);
3707 btrfs_wait_cache_io(root, trans, cache,
3708 &cache->io_ctl, path,
3709 cache->key.objectid);
3710 btrfs_put_block_group(cache);
3714 * don't remove from the dirty list until after we've waited
3717 list_del_init(&cache->dirty_list);
3720 cache_save_setup(cache, trans, path);
3723 ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3725 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3726 cache->io_ctl.inode = NULL;
3727 ret = btrfs_write_out_cache(root, trans, cache, path);
3728 if (ret == 0 && cache->io_ctl.inode) {
3731 list_add_tail(&cache->io_list, io);
3734 * if we failed to write the cache, the
3735 * generation will be bad and life goes on
3741 ret = write_one_cache_group(trans, root, path, cache);
3743 btrfs_abort_transaction(trans, root, ret);
3746 /* if its not on the io list, we need to put the block group */
3748 btrfs_put_block_group(cache);
3751 while (!list_empty(io)) {
3752 cache = list_first_entry(io, struct btrfs_block_group_cache,
3754 list_del_init(&cache->io_list);
3755 btrfs_wait_cache_io(root, trans, cache,
3756 &cache->io_ctl, path, cache->key.objectid);
3757 btrfs_put_block_group(cache);
3760 btrfs_free_path(path);
3764 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3766 struct btrfs_block_group_cache *block_group;
3769 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3770 if (!block_group || block_group->ro)
3773 btrfs_put_block_group(block_group);
3777 static const char *alloc_name(u64 flags)
3780 case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3782 case BTRFS_BLOCK_GROUP_METADATA:
3784 case BTRFS_BLOCK_GROUP_DATA:
3786 case BTRFS_BLOCK_GROUP_SYSTEM:
3790 return "invalid-combination";
3794 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3795 u64 total_bytes, u64 bytes_used,
3796 struct btrfs_space_info **space_info)
3798 struct btrfs_space_info *found;
3803 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3804 BTRFS_BLOCK_GROUP_RAID10))
3809 found = __find_space_info(info, flags);
3811 spin_lock(&found->lock);
3812 found->total_bytes += total_bytes;
3813 found->disk_total += total_bytes * factor;
3814 found->bytes_used += bytes_used;
3815 found->disk_used += bytes_used * factor;
3816 if (total_bytes > 0)
3818 spin_unlock(&found->lock);
3819 *space_info = found;
3822 found = kzalloc(sizeof(*found), GFP_NOFS);
3826 ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3832 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3833 INIT_LIST_HEAD(&found->block_groups[i]);
3834 init_rwsem(&found->groups_sem);
3835 spin_lock_init(&found->lock);
3836 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3837 found->total_bytes = total_bytes;
3838 found->disk_total = total_bytes * factor;
3839 found->bytes_used = bytes_used;
3840 found->disk_used = bytes_used * factor;
3841 found->bytes_pinned = 0;
3842 found->bytes_reserved = 0;
3843 found->bytes_readonly = 0;
3844 found->bytes_may_use = 0;
3846 found->max_extent_size = 0;
3847 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3848 found->chunk_alloc = 0;
3850 init_waitqueue_head(&found->wait);
3851 INIT_LIST_HEAD(&found->ro_bgs);
3853 ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3854 info->space_info_kobj, "%s",
3855 alloc_name(found->flags));
3861 *space_info = found;
3862 list_add_rcu(&found->list, &info->space_info);
3863 if (flags & BTRFS_BLOCK_GROUP_DATA)
3864 info->data_sinfo = found;
3869 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3871 u64 extra_flags = chunk_to_extended(flags) &
3872 BTRFS_EXTENDED_PROFILE_MASK;
3874 write_seqlock(&fs_info->profiles_lock);
3875 if (flags & BTRFS_BLOCK_GROUP_DATA)
3876 fs_info->avail_data_alloc_bits |= extra_flags;
3877 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3878 fs_info->avail_metadata_alloc_bits |= extra_flags;
3879 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3880 fs_info->avail_system_alloc_bits |= extra_flags;
3881 write_sequnlock(&fs_info->profiles_lock);
3885 * returns target flags in extended format or 0 if restripe for this
3886 * chunk_type is not in progress
3888 * should be called with either volume_mutex or balance_lock held
3890 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3892 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3898 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3899 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3900 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3901 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3902 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3903 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3904 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3905 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3906 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3913 * @flags: available profiles in extended format (see ctree.h)
3915 * Returns reduced profile in chunk format. If profile changing is in
3916 * progress (either running or paused) picks the target profile (if it's
3917 * already available), otherwise falls back to plain reducing.
3919 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3921 u64 num_devices = root->fs_info->fs_devices->rw_devices;
3927 * see if restripe for this chunk_type is in progress, if so
3928 * try to reduce to the target profile
3930 spin_lock(&root->fs_info->balance_lock);
3931 target = get_restripe_target(root->fs_info, flags);
3933 /* pick target profile only if it's already available */
3934 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3935 spin_unlock(&root->fs_info->balance_lock);
3936 return extended_to_chunk(target);
3939 spin_unlock(&root->fs_info->balance_lock);
3941 /* First, mask out the RAID levels which aren't possible */
3942 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3943 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3944 allowed |= btrfs_raid_group[raid_type];
3948 if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3949 allowed = BTRFS_BLOCK_GROUP_RAID6;
3950 else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
3951 allowed = BTRFS_BLOCK_GROUP_RAID5;
3952 else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
3953 allowed = BTRFS_BLOCK_GROUP_RAID10;
3954 else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
3955 allowed = BTRFS_BLOCK_GROUP_RAID1;
3956 else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
3957 allowed = BTRFS_BLOCK_GROUP_RAID0;
3959 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
3961 return extended_to_chunk(flags | allowed);
3964 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3971 seq = read_seqbegin(&root->fs_info->profiles_lock);
3973 if (flags & BTRFS_BLOCK_GROUP_DATA)
3974 flags |= root->fs_info->avail_data_alloc_bits;
3975 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3976 flags |= root->fs_info->avail_system_alloc_bits;
3977 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3978 flags |= root->fs_info->avail_metadata_alloc_bits;
3979 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3981 return btrfs_reduce_alloc_profile(root, flags);
3984 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3990 flags = BTRFS_BLOCK_GROUP_DATA;
3991 else if (root == root->fs_info->chunk_root)
3992 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3994 flags = BTRFS_BLOCK_GROUP_METADATA;
3996 ret = get_alloc_profile(root, flags);
4000 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4002 struct btrfs_space_info *data_sinfo;
4003 struct btrfs_root *root = BTRFS_I(inode)->root;
4004 struct btrfs_fs_info *fs_info = root->fs_info;
4007 int need_commit = 2;
4008 int have_pinned_space;
4010 /* make sure bytes are sectorsize aligned */
4011 bytes = ALIGN(bytes, root->sectorsize);
4013 if (btrfs_is_free_space_inode(inode)) {
4015 ASSERT(current->journal_info);
4018 data_sinfo = fs_info->data_sinfo;
4023 /* make sure we have enough space to handle the data first */
4024 spin_lock(&data_sinfo->lock);
4025 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4026 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4027 data_sinfo->bytes_may_use;
4029 if (used + bytes > data_sinfo->total_bytes) {
4030 struct btrfs_trans_handle *trans;
4033 * if we don't have enough free bytes in this space then we need
4034 * to alloc a new chunk.
4036 if (!data_sinfo->full) {
4039 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4040 spin_unlock(&data_sinfo->lock);
4042 alloc_target = btrfs_get_alloc_profile(root, 1);
4044 * It is ugly that we don't call nolock join
4045 * transaction for the free space inode case here.
4046 * But it is safe because we only do the data space
4047 * reservation for the free space cache in the
4048 * transaction context, the common join transaction
4049 * just increase the counter of the current transaction
4050 * handler, doesn't try to acquire the trans_lock of
4053 trans = btrfs_join_transaction(root);
4055 return PTR_ERR(trans);
4057 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4059 CHUNK_ALLOC_NO_FORCE);
4060 btrfs_end_transaction(trans, root);
4065 have_pinned_space = 1;
4071 data_sinfo = fs_info->data_sinfo;
4077 * If we don't have enough pinned space to deal with this
4078 * allocation, and no removed chunk in current transaction,
4079 * don't bother committing the transaction.
4081 have_pinned_space = percpu_counter_compare(
4082 &data_sinfo->total_bytes_pinned,
4083 used + bytes - data_sinfo->total_bytes);
4084 spin_unlock(&data_sinfo->lock);
4086 /* commit the current transaction and try again */
4089 !atomic_read(&root->fs_info->open_ioctl_trans)) {
4092 if (need_commit > 0) {
4093 btrfs_start_delalloc_roots(fs_info, 0, -1);
4094 btrfs_wait_ordered_roots(fs_info, -1);
4097 trans = btrfs_join_transaction(root);
4099 return PTR_ERR(trans);
4100 if (have_pinned_space >= 0 ||
4101 test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4102 &trans->transaction->flags) ||
4104 ret = btrfs_commit_transaction(trans, root);
4108 * The cleaner kthread might still be doing iput
4109 * operations. Wait for it to finish so that
4110 * more space is released.
4112 mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
4113 mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
4116 btrfs_end_transaction(trans, root);
4120 trace_btrfs_space_reservation(root->fs_info,
4121 "space_info:enospc",
4122 data_sinfo->flags, bytes, 1);
4125 data_sinfo->bytes_may_use += bytes;
4126 trace_btrfs_space_reservation(root->fs_info, "space_info",
4127 data_sinfo->flags, bytes, 1);
4128 spin_unlock(&data_sinfo->lock);
4134 * New check_data_free_space() with ability for precious data reservation
4135 * Will replace old btrfs_check_data_free_space(), but for patch split,
4136 * add a new function first and then replace it.
4138 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4140 struct btrfs_root *root = BTRFS_I(inode)->root;
4143 /* align the range */
4144 len = round_up(start + len, root->sectorsize) -
4145 round_down(start, root->sectorsize);
4146 start = round_down(start, root->sectorsize);
4148 ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4153 * Use new btrfs_qgroup_reserve_data to reserve precious data space
4155 * TODO: Find a good method to avoid reserve data space for NOCOW
4156 * range, but don't impact performance on quota disable case.
4158 ret = btrfs_qgroup_reserve_data(inode, start, len);
4163 * Called if we need to clear a data reservation for this inode
4164 * Normally in a error case.
4166 * This one will *NOT* use accurate qgroup reserved space API, just for case
4167 * which we can't sleep and is sure it won't affect qgroup reserved space.
4168 * Like clear_bit_hook().
4170 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4173 struct btrfs_root *root = BTRFS_I(inode)->root;
4174 struct btrfs_space_info *data_sinfo;
4176 /* Make sure the range is aligned to sectorsize */
4177 len = round_up(start + len, root->sectorsize) -
4178 round_down(start, root->sectorsize);
4179 start = round_down(start, root->sectorsize);
4181 data_sinfo = root->fs_info->data_sinfo;
4182 spin_lock(&data_sinfo->lock);
4183 if (WARN_ON(data_sinfo->bytes_may_use < len))
4184 data_sinfo->bytes_may_use = 0;
4186 data_sinfo->bytes_may_use -= len;
4187 trace_btrfs_space_reservation(root->fs_info, "space_info",
4188 data_sinfo->flags, len, 0);
4189 spin_unlock(&data_sinfo->lock);
4193 * Called if we need to clear a data reservation for this inode
4194 * Normally in a error case.
4196 * This one will handle the per-indoe data rsv map for accurate reserved
4199 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4201 btrfs_free_reserved_data_space_noquota(inode, start, len);
4202 btrfs_qgroup_free_data(inode, start, len);
4205 static void force_metadata_allocation(struct btrfs_fs_info *info)
4207 struct list_head *head = &info->space_info;
4208 struct btrfs_space_info *found;
4211 list_for_each_entry_rcu(found, head, list) {
4212 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4213 found->force_alloc = CHUNK_ALLOC_FORCE;
4218 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4220 return (global->size << 1);
4223 static int should_alloc_chunk(struct btrfs_root *root,
4224 struct btrfs_space_info *sinfo, int force)
4226 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4227 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4228 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4231 if (force == CHUNK_ALLOC_FORCE)
4235 * We need to take into account the global rsv because for all intents
4236 * and purposes it's used space. Don't worry about locking the
4237 * global_rsv, it doesn't change except when the transaction commits.
4239 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4240 num_allocated += calc_global_rsv_need_space(global_rsv);
4243 * in limited mode, we want to have some free space up to
4244 * about 1% of the FS size.
4246 if (force == CHUNK_ALLOC_LIMITED) {
4247 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4248 thresh = max_t(u64, 64 * 1024 * 1024,
4249 div_factor_fine(thresh, 1));
4251 if (num_bytes - num_allocated < thresh)
4255 if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4260 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4264 if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4265 BTRFS_BLOCK_GROUP_RAID0 |
4266 BTRFS_BLOCK_GROUP_RAID5 |
4267 BTRFS_BLOCK_GROUP_RAID6))
4268 num_dev = root->fs_info->fs_devices->rw_devices;
4269 else if (type & BTRFS_BLOCK_GROUP_RAID1)
4272 num_dev = 1; /* DUP or single */
4278 * If @is_allocation is true, reserve space in the system space info necessary
4279 * for allocating a chunk, otherwise if it's false, reserve space necessary for
4282 void check_system_chunk(struct btrfs_trans_handle *trans,
4283 struct btrfs_root *root,
4286 struct btrfs_space_info *info;
4293 * Needed because we can end up allocating a system chunk and for an
4294 * atomic and race free space reservation in the chunk block reserve.
4296 ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4298 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4299 spin_lock(&info->lock);
4300 left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4301 info->bytes_reserved - info->bytes_readonly -
4302 info->bytes_may_use;
4303 spin_unlock(&info->lock);
4305 num_devs = get_profile_num_devs(root, type);
4307 /* num_devs device items to update and 1 chunk item to add or remove */
4308 thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4309 btrfs_calc_trans_metadata_size(root, 1);
4311 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4312 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4313 left, thresh, type);
4314 dump_space_info(info, 0, 0);
4317 if (left < thresh) {
4320 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4322 * Ignore failure to create system chunk. We might end up not
4323 * needing it, as we might not need to COW all nodes/leafs from
4324 * the paths we visit in the chunk tree (they were already COWed
4325 * or created in the current transaction for example).
4327 ret = btrfs_alloc_chunk(trans, root, flags);
4331 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4332 &root->fs_info->chunk_block_rsv,
4333 thresh, BTRFS_RESERVE_NO_FLUSH);
4335 trans->chunk_bytes_reserved += thresh;
4339 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4340 struct btrfs_root *extent_root, u64 flags, int force)
4342 struct btrfs_space_info *space_info;
4343 struct btrfs_fs_info *fs_info = extent_root->fs_info;
4344 int wait_for_alloc = 0;
4347 /* Don't re-enter if we're already allocating a chunk */
4348 if (trans->allocating_chunk)
4351 space_info = __find_space_info(extent_root->fs_info, flags);
4353 ret = update_space_info(extent_root->fs_info, flags,
4355 BUG_ON(ret); /* -ENOMEM */
4357 BUG_ON(!space_info); /* Logic error */
4360 spin_lock(&space_info->lock);
4361 if (force < space_info->force_alloc)
4362 force = space_info->force_alloc;
4363 if (space_info->full) {
4364 if (should_alloc_chunk(extent_root, space_info, force))
4368 spin_unlock(&space_info->lock);
4372 if (!should_alloc_chunk(extent_root, space_info, force)) {
4373 spin_unlock(&space_info->lock);
4375 } else if (space_info->chunk_alloc) {
4378 space_info->chunk_alloc = 1;
4381 spin_unlock(&space_info->lock);
4383 mutex_lock(&fs_info->chunk_mutex);
4386 * The chunk_mutex is held throughout the entirety of a chunk
4387 * allocation, so once we've acquired the chunk_mutex we know that the
4388 * other guy is done and we need to recheck and see if we should
4391 if (wait_for_alloc) {
4392 mutex_unlock(&fs_info->chunk_mutex);
4397 trans->allocating_chunk = true;
4400 * If we have mixed data/metadata chunks we want to make sure we keep
4401 * allocating mixed chunks instead of individual chunks.
4403 if (btrfs_mixed_space_info(space_info))
4404 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4407 * if we're doing a data chunk, go ahead and make sure that
4408 * we keep a reasonable number of metadata chunks allocated in the
4411 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4412 fs_info->data_chunk_allocations++;
4413 if (!(fs_info->data_chunk_allocations %
4414 fs_info->metadata_ratio))
4415 force_metadata_allocation(fs_info);
4419 * Check if we have enough space in SYSTEM chunk because we may need
4420 * to update devices.
4422 check_system_chunk(trans, extent_root, flags);
4424 ret = btrfs_alloc_chunk(trans, extent_root, flags);
4425 trans->allocating_chunk = false;
4427 spin_lock(&space_info->lock);
4428 if (ret < 0 && ret != -ENOSPC)
4431 space_info->full = 1;
4435 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4437 space_info->chunk_alloc = 0;
4438 spin_unlock(&space_info->lock);
4439 mutex_unlock(&fs_info->chunk_mutex);
4441 * When we allocate a new chunk we reserve space in the chunk block
4442 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4443 * add new nodes/leafs to it if we end up needing to do it when
4444 * inserting the chunk item and updating device items as part of the
4445 * second phase of chunk allocation, performed by
4446 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4447 * large number of new block groups to create in our transaction
4448 * handle's new_bgs list to avoid exhausting the chunk block reserve
4449 * in extreme cases - like having a single transaction create many new
4450 * block groups when starting to write out the free space caches of all
4451 * the block groups that were made dirty during the lifetime of the
4454 if (trans->can_flush_pending_bgs &&
4455 trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4456 btrfs_create_pending_block_groups(trans, trans->root);
4457 btrfs_trans_release_chunk_metadata(trans);
4462 static int can_overcommit(struct btrfs_root *root,
4463 struct btrfs_space_info *space_info, u64 bytes,
4464 enum btrfs_reserve_flush_enum flush)
4466 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4467 u64 profile = btrfs_get_alloc_profile(root, 0);
4472 used = space_info->bytes_used + space_info->bytes_reserved +
4473 space_info->bytes_pinned + space_info->bytes_readonly;
4476 * We only want to allow over committing if we have lots of actual space
4477 * free, but if we don't have enough space to handle the global reserve
4478 * space then we could end up having a real enospc problem when trying
4479 * to allocate a chunk or some other such important allocation.
4481 spin_lock(&global_rsv->lock);
4482 space_size = calc_global_rsv_need_space(global_rsv);
4483 spin_unlock(&global_rsv->lock);
4484 if (used + space_size >= space_info->total_bytes)
4487 used += space_info->bytes_may_use;
4489 spin_lock(&root->fs_info->free_chunk_lock);
4490 avail = root->fs_info->free_chunk_space;
4491 spin_unlock(&root->fs_info->free_chunk_lock);
4494 * If we have dup, raid1 or raid10 then only half of the free
4495 * space is actually useable. For raid56, the space info used
4496 * doesn't include the parity drive, so we don't have to
4499 if (profile & (BTRFS_BLOCK_GROUP_DUP |
4500 BTRFS_BLOCK_GROUP_RAID1 |
4501 BTRFS_BLOCK_GROUP_RAID10))
4505 * If we aren't flushing all things, let us overcommit up to
4506 * 1/2th of the space. If we can flush, don't let us overcommit
4507 * too much, let it overcommit up to 1/8 of the space.
4509 if (flush == BTRFS_RESERVE_FLUSH_ALL)
4514 if (used + bytes < space_info->total_bytes + avail)
4519 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4520 unsigned long nr_pages, int nr_items)
4522 struct super_block *sb = root->fs_info->sb;
4524 if (down_read_trylock(&sb->s_umount)) {
4525 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4526 up_read(&sb->s_umount);
4529 * We needn't worry the filesystem going from r/w to r/o though
4530 * we don't acquire ->s_umount mutex, because the filesystem
4531 * should guarantee the delalloc inodes list be empty after
4532 * the filesystem is readonly(all dirty pages are written to
4535 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4536 if (!current->journal_info)
4537 btrfs_wait_ordered_roots(root->fs_info, nr_items);
4541 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4546 bytes = btrfs_calc_trans_metadata_size(root, 1);
4547 nr = (int)div64_u64(to_reclaim, bytes);
4553 #define EXTENT_SIZE_PER_ITEM (256 * 1024)
4556 * shrink metadata reservation for delalloc
4558 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4561 struct btrfs_block_rsv *block_rsv;
4562 struct btrfs_space_info *space_info;
4563 struct btrfs_trans_handle *trans;
4567 unsigned long nr_pages;
4570 enum btrfs_reserve_flush_enum flush;
4572 /* Calc the number of the pages we need flush for space reservation */
4573 items = calc_reclaim_items_nr(root, to_reclaim);
4574 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4576 trans = (struct btrfs_trans_handle *)current->journal_info;
4577 block_rsv = &root->fs_info->delalloc_block_rsv;
4578 space_info = block_rsv->space_info;
4580 delalloc_bytes = percpu_counter_sum_positive(
4581 &root->fs_info->delalloc_bytes);
4582 if (delalloc_bytes == 0) {
4586 btrfs_wait_ordered_roots(root->fs_info, items);
4591 while (delalloc_bytes && loops < 3) {
4592 max_reclaim = min(delalloc_bytes, to_reclaim);
4593 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4594 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4596 * We need to wait for the async pages to actually start before
4599 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4603 if (max_reclaim <= nr_pages)
4606 max_reclaim -= nr_pages;
4608 wait_event(root->fs_info->async_submit_wait,
4609 atomic_read(&root->fs_info->async_delalloc_pages) <=
4613 flush = BTRFS_RESERVE_FLUSH_ALL;
4615 flush = BTRFS_RESERVE_NO_FLUSH;
4616 spin_lock(&space_info->lock);
4617 if (can_overcommit(root, space_info, orig, flush)) {
4618 spin_unlock(&space_info->lock);
4621 spin_unlock(&space_info->lock);
4624 if (wait_ordered && !trans) {
4625 btrfs_wait_ordered_roots(root->fs_info, items);
4627 time_left = schedule_timeout_killable(1);
4631 delalloc_bytes = percpu_counter_sum_positive(
4632 &root->fs_info->delalloc_bytes);
4637 * maybe_commit_transaction - possibly commit the transaction if its ok to
4638 * @root - the root we're allocating for
4639 * @bytes - the number of bytes we want to reserve
4640 * @force - force the commit
4642 * This will check to make sure that committing the transaction will actually
4643 * get us somewhere and then commit the transaction if it does. Otherwise it
4644 * will return -ENOSPC.
4646 static int may_commit_transaction(struct btrfs_root *root,
4647 struct btrfs_space_info *space_info,
4648 u64 bytes, int force)
4650 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4651 struct btrfs_trans_handle *trans;
4653 trans = (struct btrfs_trans_handle *)current->journal_info;
4660 /* See if there is enough pinned space to make this reservation */
4661 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4666 * See if there is some space in the delayed insertion reservation for
4669 if (space_info != delayed_rsv->space_info)
4672 spin_lock(&delayed_rsv->lock);
4673 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4674 bytes - delayed_rsv->size) >= 0) {
4675 spin_unlock(&delayed_rsv->lock);
4678 spin_unlock(&delayed_rsv->lock);
4681 trans = btrfs_join_transaction(root);
4685 return btrfs_commit_transaction(trans, root);
4689 FLUSH_DELAYED_ITEMS_NR = 1,
4690 FLUSH_DELAYED_ITEMS = 2,
4692 FLUSH_DELALLOC_WAIT = 4,
4697 static int flush_space(struct btrfs_root *root,
4698 struct btrfs_space_info *space_info, u64 num_bytes,
4699 u64 orig_bytes, int state)
4701 struct btrfs_trans_handle *trans;
4706 case FLUSH_DELAYED_ITEMS_NR:
4707 case FLUSH_DELAYED_ITEMS:
4708 if (state == FLUSH_DELAYED_ITEMS_NR)
4709 nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4713 trans = btrfs_join_transaction(root);
4714 if (IS_ERR(trans)) {
4715 ret = PTR_ERR(trans);
4718 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4719 btrfs_end_transaction(trans, root);
4721 case FLUSH_DELALLOC:
4722 case FLUSH_DELALLOC_WAIT:
4723 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4724 state == FLUSH_DELALLOC_WAIT);
4727 trans = btrfs_join_transaction(root);
4728 if (IS_ERR(trans)) {
4729 ret = PTR_ERR(trans);
4732 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4733 btrfs_get_alloc_profile(root, 0),
4734 CHUNK_ALLOC_NO_FORCE);
4735 btrfs_end_transaction(trans, root);
4740 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4751 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4752 struct btrfs_space_info *space_info)
4758 to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4760 spin_lock(&space_info->lock);
4761 if (can_overcommit(root, space_info, to_reclaim,
4762 BTRFS_RESERVE_FLUSH_ALL)) {
4767 used = space_info->bytes_used + space_info->bytes_reserved +
4768 space_info->bytes_pinned + space_info->bytes_readonly +
4769 space_info->bytes_may_use;
4770 if (can_overcommit(root, space_info, 1024 * 1024,
4771 BTRFS_RESERVE_FLUSH_ALL))
4772 expected = div_factor_fine(space_info->total_bytes, 95);
4774 expected = div_factor_fine(space_info->total_bytes, 90);
4776 if (used > expected)
4777 to_reclaim = used - expected;
4780 to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4781 space_info->bytes_reserved);
4783 spin_unlock(&space_info->lock);
4788 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4789 struct btrfs_fs_info *fs_info, u64 used)
4791 u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4793 /* If we're just plain full then async reclaim just slows us down. */
4794 if (space_info->bytes_used >= thresh)
4797 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4798 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4801 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4802 struct btrfs_fs_info *fs_info,
4807 spin_lock(&space_info->lock);
4809 * We run out of space and have not got any free space via flush_space,
4810 * so don't bother doing async reclaim.
4812 if (flush_state > COMMIT_TRANS && space_info->full) {
4813 spin_unlock(&space_info->lock);
4817 used = space_info->bytes_used + space_info->bytes_reserved +
4818 space_info->bytes_pinned + space_info->bytes_readonly +
4819 space_info->bytes_may_use;
4820 if (need_do_async_reclaim(space_info, fs_info, used)) {
4821 spin_unlock(&space_info->lock);
4824 spin_unlock(&space_info->lock);
4829 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4831 struct btrfs_fs_info *fs_info;
4832 struct btrfs_space_info *space_info;
4836 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4837 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4839 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4844 flush_state = FLUSH_DELAYED_ITEMS_NR;
4846 flush_space(fs_info->fs_root, space_info, to_reclaim,
4847 to_reclaim, flush_state);
4849 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4852 } while (flush_state < COMMIT_TRANS);
4855 void btrfs_init_async_reclaim_work(struct work_struct *work)
4857 INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4861 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4862 * @root - the root we're allocating for
4863 * @block_rsv - the block_rsv we're allocating for
4864 * @orig_bytes - the number of bytes we want
4865 * @flush - whether or not we can flush to make our reservation
4867 * This will reserve orgi_bytes number of bytes from the space info associated
4868 * with the block_rsv. If there is not enough space it will make an attempt to
4869 * flush out space to make room. It will do this by flushing delalloc if
4870 * possible or committing the transaction. If flush is 0 then no attempts to
4871 * regain reservations will be made and this will fail if there is not enough
4874 static int reserve_metadata_bytes(struct btrfs_root *root,
4875 struct btrfs_block_rsv *block_rsv,
4877 enum btrfs_reserve_flush_enum flush)
4879 struct btrfs_space_info *space_info = block_rsv->space_info;
4881 u64 num_bytes = orig_bytes;
4882 int flush_state = FLUSH_DELAYED_ITEMS_NR;
4884 bool flushing = false;
4888 spin_lock(&space_info->lock);
4890 * We only want to wait if somebody other than us is flushing and we
4891 * are actually allowed to flush all things.
4893 while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4894 space_info->flush) {
4895 spin_unlock(&space_info->lock);
4897 * If we have a trans handle we can't wait because the flusher
4898 * may have to commit the transaction, which would mean we would
4899 * deadlock since we are waiting for the flusher to finish, but
4900 * hold the current transaction open.
4902 if (current->journal_info)
4904 ret = wait_event_killable(space_info->wait, !space_info->flush);
4905 /* Must have been killed, return */
4909 spin_lock(&space_info->lock);
4913 used = space_info->bytes_used + space_info->bytes_reserved +
4914 space_info->bytes_pinned + space_info->bytes_readonly +
4915 space_info->bytes_may_use;
4918 * The idea here is that we've not already over-reserved the block group
4919 * then we can go ahead and save our reservation first and then start
4920 * flushing if we need to. Otherwise if we've already overcommitted
4921 * lets start flushing stuff first and then come back and try to make
4924 if (used <= space_info->total_bytes) {
4925 if (used + orig_bytes <= space_info->total_bytes) {
4926 space_info->bytes_may_use += orig_bytes;
4927 trace_btrfs_space_reservation(root->fs_info,
4928 "space_info", space_info->flags, orig_bytes, 1);
4932 * Ok set num_bytes to orig_bytes since we aren't
4933 * overocmmitted, this way we only try and reclaim what
4936 num_bytes = orig_bytes;
4940 * Ok we're over committed, set num_bytes to the overcommitted
4941 * amount plus the amount of bytes that we need for this
4944 num_bytes = used - space_info->total_bytes +
4948 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4949 space_info->bytes_may_use += orig_bytes;
4950 trace_btrfs_space_reservation(root->fs_info, "space_info",
4951 space_info->flags, orig_bytes,
4957 * Couldn't make our reservation, save our place so while we're trying
4958 * to reclaim space we can actually use it instead of somebody else
4959 * stealing it from us.
4961 * We make the other tasks wait for the flush only when we can flush
4964 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4966 space_info->flush = 1;
4967 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4970 * We will do the space reservation dance during log replay,
4971 * which means we won't have fs_info->fs_root set, so don't do
4972 * the async reclaim as we will panic.
4974 if (!root->fs_info->log_root_recovering &&
4975 need_do_async_reclaim(space_info, root->fs_info, used) &&
4976 !work_busy(&root->fs_info->async_reclaim_work))
4977 queue_work(system_unbound_wq,
4978 &root->fs_info->async_reclaim_work);
4980 spin_unlock(&space_info->lock);
4982 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4985 ret = flush_space(root, space_info, num_bytes, orig_bytes,
4990 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4991 * would happen. So skip delalloc flush.
4993 if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4994 (flush_state == FLUSH_DELALLOC ||
4995 flush_state == FLUSH_DELALLOC_WAIT))
4996 flush_state = ALLOC_CHUNK;
5000 else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
5001 flush_state < COMMIT_TRANS)
5003 else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
5004 flush_state <= COMMIT_TRANS)
5008 if (ret == -ENOSPC &&
5009 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5010 struct btrfs_block_rsv *global_rsv =
5011 &root->fs_info->global_block_rsv;
5013 if (block_rsv != global_rsv &&
5014 !block_rsv_use_bytes(global_rsv, orig_bytes))
5018 trace_btrfs_space_reservation(root->fs_info,
5019 "space_info:enospc",
5020 space_info->flags, orig_bytes, 1);
5022 spin_lock(&space_info->lock);
5023 space_info->flush = 0;
5024 wake_up_all(&space_info->wait);
5025 spin_unlock(&space_info->lock);
5030 static struct btrfs_block_rsv *get_block_rsv(
5031 const struct btrfs_trans_handle *trans,
5032 const struct btrfs_root *root)
5034 struct btrfs_block_rsv *block_rsv = NULL;
5036 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5037 (root == root->fs_info->csum_root && trans->adding_csums) ||
5038 (root == root->fs_info->uuid_root))
5039 block_rsv = trans->block_rsv;
5042 block_rsv = root->block_rsv;
5045 block_rsv = &root->fs_info->empty_block_rsv;
5050 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5054 spin_lock(&block_rsv->lock);
5055 if (block_rsv->reserved >= num_bytes) {
5056 block_rsv->reserved -= num_bytes;
5057 if (block_rsv->reserved < block_rsv->size)
5058 block_rsv->full = 0;
5061 spin_unlock(&block_rsv->lock);
5065 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5066 u64 num_bytes, int update_size)
5068 spin_lock(&block_rsv->lock);
5069 block_rsv->reserved += num_bytes;
5071 block_rsv->size += num_bytes;
5072 else if (block_rsv->reserved >= block_rsv->size)
5073 block_rsv->full = 1;
5074 spin_unlock(&block_rsv->lock);
5077 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5078 struct btrfs_block_rsv *dest, u64 num_bytes,
5081 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5084 if (global_rsv->space_info != dest->space_info)
5087 spin_lock(&global_rsv->lock);
5088 min_bytes = div_factor(global_rsv->size, min_factor);
5089 if (global_rsv->reserved < min_bytes + num_bytes) {
5090 spin_unlock(&global_rsv->lock);
5093 global_rsv->reserved -= num_bytes;
5094 if (global_rsv->reserved < global_rsv->size)
5095 global_rsv->full = 0;
5096 spin_unlock(&global_rsv->lock);
5098 block_rsv_add_bytes(dest, num_bytes, 1);
5102 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5103 struct btrfs_block_rsv *block_rsv,
5104 struct btrfs_block_rsv *dest, u64 num_bytes)
5106 struct btrfs_space_info *space_info = block_rsv->space_info;
5108 spin_lock(&block_rsv->lock);
5109 if (num_bytes == (u64)-1)
5110 num_bytes = block_rsv->size;
5111 block_rsv->size -= num_bytes;
5112 if (block_rsv->reserved >= block_rsv->size) {
5113 num_bytes = block_rsv->reserved - block_rsv->size;
5114 block_rsv->reserved = block_rsv->size;
5115 block_rsv->full = 1;
5119 spin_unlock(&block_rsv->lock);
5121 if (num_bytes > 0) {
5123 spin_lock(&dest->lock);
5127 bytes_to_add = dest->size - dest->reserved;
5128 bytes_to_add = min(num_bytes, bytes_to_add);
5129 dest->reserved += bytes_to_add;
5130 if (dest->reserved >= dest->size)
5132 num_bytes -= bytes_to_add;
5134 spin_unlock(&dest->lock);
5137 spin_lock(&space_info->lock);
5138 space_info->bytes_may_use -= num_bytes;
5139 trace_btrfs_space_reservation(fs_info, "space_info",
5140 space_info->flags, num_bytes, 0);
5141 spin_unlock(&space_info->lock);
5146 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5147 struct btrfs_block_rsv *dst, u64 num_bytes)
5151 ret = block_rsv_use_bytes(src, num_bytes);
5155 block_rsv_add_bytes(dst, num_bytes, 1);
5159 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5161 memset(rsv, 0, sizeof(*rsv));
5162 spin_lock_init(&rsv->lock);
5166 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5167 unsigned short type)
5169 struct btrfs_block_rsv *block_rsv;
5170 struct btrfs_fs_info *fs_info = root->fs_info;
5172 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5176 btrfs_init_block_rsv(block_rsv, type);
5177 block_rsv->space_info = __find_space_info(fs_info,
5178 BTRFS_BLOCK_GROUP_METADATA);
5182 void btrfs_free_block_rsv(struct btrfs_root *root,
5183 struct btrfs_block_rsv *rsv)
5187 btrfs_block_rsv_release(root, rsv, (u64)-1);
5191 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5196 int btrfs_block_rsv_add(struct btrfs_root *root,
5197 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5198 enum btrfs_reserve_flush_enum flush)
5205 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5207 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5214 int btrfs_block_rsv_check(struct btrfs_root *root,
5215 struct btrfs_block_rsv *block_rsv, int min_factor)
5223 spin_lock(&block_rsv->lock);
5224 num_bytes = div_factor(block_rsv->size, min_factor);
5225 if (block_rsv->reserved >= num_bytes)
5227 spin_unlock(&block_rsv->lock);
5232 int btrfs_block_rsv_refill(struct btrfs_root *root,
5233 struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5234 enum btrfs_reserve_flush_enum flush)
5242 spin_lock(&block_rsv->lock);
5243 num_bytes = min_reserved;
5244 if (block_rsv->reserved >= num_bytes)
5247 num_bytes -= block_rsv->reserved;
5248 spin_unlock(&block_rsv->lock);
5253 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5255 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5262 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5263 struct btrfs_block_rsv *dst_rsv,
5266 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5269 void btrfs_block_rsv_release(struct btrfs_root *root,
5270 struct btrfs_block_rsv *block_rsv,
5273 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5274 if (global_rsv == block_rsv ||
5275 block_rsv->space_info != global_rsv->space_info)
5277 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5282 * helper to calculate size of global block reservation.
5283 * the desired value is sum of space used by extent tree,
5284 * checksum tree and root tree
5286 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5288 struct btrfs_space_info *sinfo;
5292 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5294 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5295 spin_lock(&sinfo->lock);
5296 data_used = sinfo->bytes_used;
5297 spin_unlock(&sinfo->lock);
5299 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5300 spin_lock(&sinfo->lock);
5301 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5303 meta_used = sinfo->bytes_used;
5304 spin_unlock(&sinfo->lock);
5306 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5308 num_bytes += div_u64(data_used + meta_used, 50);
5310 if (num_bytes * 3 > meta_used)
5311 num_bytes = div_u64(meta_used, 3);
5313 return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5316 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5318 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5319 struct btrfs_space_info *sinfo = block_rsv->space_info;
5322 num_bytes = calc_global_metadata_size(fs_info);
5324 spin_lock(&sinfo->lock);
5325 spin_lock(&block_rsv->lock);
5327 block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5329 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5330 sinfo->bytes_reserved + sinfo->bytes_readonly +
5331 sinfo->bytes_may_use;
5333 if (sinfo->total_bytes > num_bytes) {
5334 num_bytes = sinfo->total_bytes - num_bytes;
5335 block_rsv->reserved += num_bytes;
5336 sinfo->bytes_may_use += num_bytes;
5337 trace_btrfs_space_reservation(fs_info, "space_info",
5338 sinfo->flags, num_bytes, 1);
5341 if (block_rsv->reserved >= block_rsv->size) {
5342 num_bytes = block_rsv->reserved - block_rsv->size;
5343 sinfo->bytes_may_use -= num_bytes;
5344 trace_btrfs_space_reservation(fs_info, "space_info",
5345 sinfo->flags, num_bytes, 0);
5346 block_rsv->reserved = block_rsv->size;
5347 block_rsv->full = 1;
5350 spin_unlock(&block_rsv->lock);
5351 spin_unlock(&sinfo->lock);
5354 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5356 struct btrfs_space_info *space_info;
5358 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5359 fs_info->chunk_block_rsv.space_info = space_info;
5361 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5362 fs_info->global_block_rsv.space_info = space_info;
5363 fs_info->delalloc_block_rsv.space_info = space_info;
5364 fs_info->trans_block_rsv.space_info = space_info;
5365 fs_info->empty_block_rsv.space_info = space_info;
5366 fs_info->delayed_block_rsv.space_info = space_info;
5368 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5369 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5370 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5371 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5372 if (fs_info->quota_root)
5373 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5374 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5376 update_global_block_rsv(fs_info);
5379 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5381 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5383 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5384 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5385 WARN_ON(fs_info->trans_block_rsv.size > 0);
5386 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5387 WARN_ON(fs_info->chunk_block_rsv.size > 0);
5388 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5389 WARN_ON(fs_info->delayed_block_rsv.size > 0);
5390 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5393 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5394 struct btrfs_root *root)
5396 if (!trans->block_rsv)
5399 if (!trans->bytes_reserved)
5402 trace_btrfs_space_reservation(root->fs_info, "transaction",
5403 trans->transid, trans->bytes_reserved, 0);
5404 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5405 trans->bytes_reserved = 0;
5409 * To be called after all the new block groups attached to the transaction
5410 * handle have been created (btrfs_create_pending_block_groups()).
5412 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5414 struct btrfs_fs_info *fs_info = trans->root->fs_info;
5416 if (!trans->chunk_bytes_reserved)
5419 WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5421 block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5422 trans->chunk_bytes_reserved);
5423 trans->chunk_bytes_reserved = 0;
5426 /* Can only return 0 or -ENOSPC */
5427 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5428 struct inode *inode)
5430 struct btrfs_root *root = BTRFS_I(inode)->root;
5431 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5432 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5435 * We need to hold space in order to delete our orphan item once we've
5436 * added it, so this takes the reservation so we can release it later
5437 * when we are truly done with the orphan item.
5439 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5440 trace_btrfs_space_reservation(root->fs_info, "orphan",
5441 btrfs_ino(inode), num_bytes, 1);
5442 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5445 void btrfs_orphan_release_metadata(struct inode *inode)
5447 struct btrfs_root *root = BTRFS_I(inode)->root;
5448 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5449 trace_btrfs_space_reservation(root->fs_info, "orphan",
5450 btrfs_ino(inode), num_bytes, 0);
5451 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5455 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5456 * root: the root of the parent directory
5457 * rsv: block reservation
5458 * items: the number of items that we need do reservation
5459 * qgroup_reserved: used to return the reserved size in qgroup
5461 * This function is used to reserve the space for snapshot/subvolume
5462 * creation and deletion. Those operations are different with the
5463 * common file/directory operations, they change two fs/file trees
5464 * and root tree, the number of items that the qgroup reserves is
5465 * different with the free space reservation. So we can not use
5466 * the space reseravtion mechanism in start_transaction().
5468 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5469 struct btrfs_block_rsv *rsv,
5471 u64 *qgroup_reserved,
5472 bool use_global_rsv)
5476 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5478 if (root->fs_info->quota_enabled) {
5479 /* One for parent inode, two for dir entries */
5480 num_bytes = 3 * root->nodesize;
5481 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5488 *qgroup_reserved = num_bytes;
5490 num_bytes = btrfs_calc_trans_metadata_size(root, items);
5491 rsv->space_info = __find_space_info(root->fs_info,
5492 BTRFS_BLOCK_GROUP_METADATA);
5493 ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5494 BTRFS_RESERVE_FLUSH_ALL);
5496 if (ret == -ENOSPC && use_global_rsv)
5497 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5499 if (ret && *qgroup_reserved)
5500 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5505 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5506 struct btrfs_block_rsv *rsv,
5507 u64 qgroup_reserved)
5509 btrfs_block_rsv_release(root, rsv, (u64)-1);
5513 * drop_outstanding_extent - drop an outstanding extent
5514 * @inode: the inode we're dropping the extent for
5515 * @num_bytes: the number of bytes we're relaseing.
5517 * This is called when we are freeing up an outstanding extent, either called
5518 * after an error or after an extent is written. This will return the number of
5519 * reserved extents that need to be freed. This must be called with
5520 * BTRFS_I(inode)->lock held.
5522 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5524 unsigned drop_inode_space = 0;
5525 unsigned dropped_extents = 0;
5526 unsigned num_extents = 0;
5528 num_extents = (unsigned)div64_u64(num_bytes +
5529 BTRFS_MAX_EXTENT_SIZE - 1,
5530 BTRFS_MAX_EXTENT_SIZE);
5531 ASSERT(num_extents);
5532 ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5533 BTRFS_I(inode)->outstanding_extents -= num_extents;
5535 if (BTRFS_I(inode)->outstanding_extents == 0 &&
5536 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5537 &BTRFS_I(inode)->runtime_flags))
5538 drop_inode_space = 1;
5541 * If we have more or the same amount of outsanding extents than we have
5542 * reserved then we need to leave the reserved extents count alone.
5544 if (BTRFS_I(inode)->outstanding_extents >=
5545 BTRFS_I(inode)->reserved_extents)
5546 return drop_inode_space;
5548 dropped_extents = BTRFS_I(inode)->reserved_extents -
5549 BTRFS_I(inode)->outstanding_extents;
5550 BTRFS_I(inode)->reserved_extents -= dropped_extents;
5551 return dropped_extents + drop_inode_space;
5555 * calc_csum_metadata_size - return the amount of metada space that must be
5556 * reserved/free'd for the given bytes.
5557 * @inode: the inode we're manipulating
5558 * @num_bytes: the number of bytes in question
5559 * @reserve: 1 if we are reserving space, 0 if we are freeing space
5561 * This adjusts the number of csum_bytes in the inode and then returns the
5562 * correct amount of metadata that must either be reserved or freed. We
5563 * calculate how many checksums we can fit into one leaf and then divide the
5564 * number of bytes that will need to be checksumed by this value to figure out
5565 * how many checksums will be required. If we are adding bytes then the number
5566 * may go up and we will return the number of additional bytes that must be
5567 * reserved. If it is going down we will return the number of bytes that must
5570 * This must be called with BTRFS_I(inode)->lock held.
5572 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5575 struct btrfs_root *root = BTRFS_I(inode)->root;
5576 u64 old_csums, num_csums;
5578 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5579 BTRFS_I(inode)->csum_bytes == 0)
5582 old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5584 BTRFS_I(inode)->csum_bytes += num_bytes;
5586 BTRFS_I(inode)->csum_bytes -= num_bytes;
5587 num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5589 /* No change, no need to reserve more */
5590 if (old_csums == num_csums)
5594 return btrfs_calc_trans_metadata_size(root,
5595 num_csums - old_csums);
5597 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5600 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5602 struct btrfs_root *root = BTRFS_I(inode)->root;
5603 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5606 unsigned nr_extents = 0;
5607 int extra_reserve = 0;
5608 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5610 bool delalloc_lock = true;
5614 /* If we are a free space inode we need to not flush since we will be in
5615 * the middle of a transaction commit. We also don't need the delalloc
5616 * mutex since we won't race with anybody. We need this mostly to make
5617 * lockdep shut its filthy mouth.
5619 if (btrfs_is_free_space_inode(inode)) {
5620 flush = BTRFS_RESERVE_NO_FLUSH;
5621 delalloc_lock = false;
5624 if (flush != BTRFS_RESERVE_NO_FLUSH &&
5625 btrfs_transaction_in_commit(root->fs_info))
5626 schedule_timeout(1);
5629 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5631 num_bytes = ALIGN(num_bytes, root->sectorsize);
5633 spin_lock(&BTRFS_I(inode)->lock);
5634 nr_extents = (unsigned)div64_u64(num_bytes +
5635 BTRFS_MAX_EXTENT_SIZE - 1,
5636 BTRFS_MAX_EXTENT_SIZE);
5637 BTRFS_I(inode)->outstanding_extents += nr_extents;
5640 if (BTRFS_I(inode)->outstanding_extents >
5641 BTRFS_I(inode)->reserved_extents)
5642 nr_extents = BTRFS_I(inode)->outstanding_extents -
5643 BTRFS_I(inode)->reserved_extents;
5646 * Add an item to reserve for updating the inode when we complete the
5649 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5650 &BTRFS_I(inode)->runtime_flags)) {
5655 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5656 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5657 csum_bytes = BTRFS_I(inode)->csum_bytes;
5658 spin_unlock(&BTRFS_I(inode)->lock);
5660 if (root->fs_info->quota_enabled) {
5661 ret = btrfs_qgroup_reserve_meta(root,
5662 nr_extents * root->nodesize);
5667 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5668 if (unlikely(ret)) {
5669 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5673 spin_lock(&BTRFS_I(inode)->lock);
5674 if (extra_reserve) {
5675 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5676 &BTRFS_I(inode)->runtime_flags);
5679 BTRFS_I(inode)->reserved_extents += nr_extents;
5680 spin_unlock(&BTRFS_I(inode)->lock);
5683 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5686 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5687 btrfs_ino(inode), to_reserve, 1);
5688 block_rsv_add_bytes(block_rsv, to_reserve, 1);
5693 spin_lock(&BTRFS_I(inode)->lock);
5694 dropped = drop_outstanding_extent(inode, num_bytes);
5696 * If the inodes csum_bytes is the same as the original
5697 * csum_bytes then we know we haven't raced with any free()ers
5698 * so we can just reduce our inodes csum bytes and carry on.
5700 if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5701 calc_csum_metadata_size(inode, num_bytes, 0);
5703 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5707 * This is tricky, but first we need to figure out how much we
5708 * free'd from any free-ers that occured during this
5709 * reservation, so we reset ->csum_bytes to the csum_bytes
5710 * before we dropped our lock, and then call the free for the
5711 * number of bytes that were freed while we were trying our
5714 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5715 BTRFS_I(inode)->csum_bytes = csum_bytes;
5716 to_free = calc_csum_metadata_size(inode, bytes, 0);
5720 * Now we need to see how much we would have freed had we not
5721 * been making this reservation and our ->csum_bytes were not
5722 * artificially inflated.
5724 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5725 bytes = csum_bytes - orig_csum_bytes;
5726 bytes = calc_csum_metadata_size(inode, bytes, 0);
5729 * Now reset ->csum_bytes to what it should be. If bytes is
5730 * more than to_free then we would have free'd more space had we
5731 * not had an artificially high ->csum_bytes, so we need to free
5732 * the remainder. If bytes is the same or less then we don't
5733 * need to do anything, the other free-ers did the correct
5736 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5737 if (bytes > to_free)
5738 to_free = bytes - to_free;
5742 spin_unlock(&BTRFS_I(inode)->lock);
5744 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5747 btrfs_block_rsv_release(root, block_rsv, to_free);
5748 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5749 btrfs_ino(inode), to_free, 0);
5752 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5757 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5758 * @inode: the inode to release the reservation for
5759 * @num_bytes: the number of bytes we're releasing
5761 * This will release the metadata reservation for an inode. This can be called
5762 * once we complete IO for a given set of bytes to release their metadata
5765 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5767 struct btrfs_root *root = BTRFS_I(inode)->root;
5771 num_bytes = ALIGN(num_bytes, root->sectorsize);
5772 spin_lock(&BTRFS_I(inode)->lock);
5773 dropped = drop_outstanding_extent(inode, num_bytes);
5776 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5777 spin_unlock(&BTRFS_I(inode)->lock);
5779 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5781 if (btrfs_test_is_dummy_root(root))
5784 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5785 btrfs_ino(inode), to_free, 0);
5787 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5792 * btrfs_delalloc_reserve_space - reserve data and metadata space for
5794 * @inode: inode we're writing to
5795 * @start: start range we are writing to
5796 * @len: how long the range we are writing to
5798 * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
5800 * This will do the following things
5802 * o reserve space in data space info for num bytes
5803 * and reserve precious corresponding qgroup space
5804 * (Done in check_data_free_space)
5806 * o reserve space for metadata space, based on the number of outstanding
5807 * extents and how much csums will be needed
5808 * also reserve metadata space in a per root over-reserve method.
5809 * o add to the inodes->delalloc_bytes
5810 * o add it to the fs_info's delalloc inodes list.
5811 * (Above 3 all done in delalloc_reserve_metadata)
5813 * Return 0 for success
5814 * Return <0 for error(-ENOSPC or -EQUOT)
5816 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
5820 ret = btrfs_check_data_free_space(inode, start, len);
5823 ret = btrfs_delalloc_reserve_metadata(inode, len);
5825 btrfs_free_reserved_data_space(inode, start, len);
5830 * btrfs_delalloc_release_space - release data and metadata space for delalloc
5831 * @inode: inode we're releasing space for
5832 * @start: start position of the space already reserved
5833 * @len: the len of the space already reserved
5835 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
5836 * called in the case that we don't need the metadata AND data reservations
5837 * anymore. So if there is an error or we insert an inline extent.
5839 * This function will release the metadata space that was not used and will
5840 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5841 * list if there are no delalloc bytes left.
5842 * Also it will handle the qgroup reserved space.
5844 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
5846 btrfs_delalloc_release_metadata(inode, len);
5847 btrfs_free_reserved_data_space(inode, start, len);
5850 static int update_block_group(struct btrfs_trans_handle *trans,
5851 struct btrfs_root *root, u64 bytenr,
5852 u64 num_bytes, int alloc)
5854 struct btrfs_block_group_cache *cache = NULL;
5855 struct btrfs_fs_info *info = root->fs_info;
5856 u64 total = num_bytes;
5861 /* block accounting for super block */
5862 spin_lock(&info->delalloc_root_lock);
5863 old_val = btrfs_super_bytes_used(info->super_copy);
5865 old_val += num_bytes;
5867 old_val -= num_bytes;
5868 btrfs_set_super_bytes_used(info->super_copy, old_val);
5869 spin_unlock(&info->delalloc_root_lock);
5872 cache = btrfs_lookup_block_group(info, bytenr);
5875 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5876 BTRFS_BLOCK_GROUP_RAID1 |
5877 BTRFS_BLOCK_GROUP_RAID10))
5882 * If this block group has free space cache written out, we
5883 * need to make sure to load it if we are removing space. This
5884 * is because we need the unpinning stage to actually add the
5885 * space back to the block group, otherwise we will leak space.
5887 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5888 cache_block_group(cache, 1);
5890 byte_in_group = bytenr - cache->key.objectid;
5891 WARN_ON(byte_in_group > cache->key.offset);
5893 spin_lock(&cache->space_info->lock);
5894 spin_lock(&cache->lock);
5896 if (btrfs_test_opt(root, SPACE_CACHE) &&
5897 cache->disk_cache_state < BTRFS_DC_CLEAR)
5898 cache->disk_cache_state = BTRFS_DC_CLEAR;
5900 old_val = btrfs_block_group_used(&cache->item);
5901 num_bytes = min(total, cache->key.offset - byte_in_group);
5903 old_val += num_bytes;
5904 btrfs_set_block_group_used(&cache->item, old_val);
5905 cache->reserved -= num_bytes;
5906 cache->space_info->bytes_reserved -= num_bytes;
5907 cache->space_info->bytes_used += num_bytes;
5908 cache->space_info->disk_used += num_bytes * factor;
5909 spin_unlock(&cache->lock);
5910 spin_unlock(&cache->space_info->lock);
5912 old_val -= num_bytes;
5913 btrfs_set_block_group_used(&cache->item, old_val);
5914 cache->pinned += num_bytes;
5915 cache->space_info->bytes_pinned += num_bytes;
5916 cache->space_info->bytes_used -= num_bytes;
5917 cache->space_info->disk_used -= num_bytes * factor;
5918 spin_unlock(&cache->lock);
5919 spin_unlock(&cache->space_info->lock);
5921 set_extent_dirty(info->pinned_extents,
5922 bytenr, bytenr + num_bytes - 1,
5923 GFP_NOFS | __GFP_NOFAIL);
5926 spin_lock(&trans->transaction->dirty_bgs_lock);
5927 if (list_empty(&cache->dirty_list)) {
5928 list_add_tail(&cache->dirty_list,
5929 &trans->transaction->dirty_bgs);
5930 trans->transaction->num_dirty_bgs++;
5931 btrfs_get_block_group(cache);
5933 spin_unlock(&trans->transaction->dirty_bgs_lock);
5936 * No longer have used bytes in this block group, queue it for
5937 * deletion. We do this after adding the block group to the
5938 * dirty list to avoid races between cleaner kthread and space
5941 if (!alloc && old_val == 0) {
5942 spin_lock(&info->unused_bgs_lock);
5943 if (list_empty(&cache->bg_list)) {
5944 btrfs_get_block_group(cache);
5945 list_add_tail(&cache->bg_list,
5948 spin_unlock(&info->unused_bgs_lock);
5951 btrfs_put_block_group(cache);
5953 bytenr += num_bytes;
5958 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5960 struct btrfs_block_group_cache *cache;
5963 spin_lock(&root->fs_info->block_group_cache_lock);
5964 bytenr = root->fs_info->first_logical_byte;
5965 spin_unlock(&root->fs_info->block_group_cache_lock);
5967 if (bytenr < (u64)-1)
5970 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5974 bytenr = cache->key.objectid;
5975 btrfs_put_block_group(cache);
5980 static int pin_down_extent(struct btrfs_root *root,
5981 struct btrfs_block_group_cache *cache,
5982 u64 bytenr, u64 num_bytes, int reserved)
5984 spin_lock(&cache->space_info->lock);
5985 spin_lock(&cache->lock);
5986 cache->pinned += num_bytes;
5987 cache->space_info->bytes_pinned += num_bytes;
5989 cache->reserved -= num_bytes;
5990 cache->space_info->bytes_reserved -= num_bytes;
5992 spin_unlock(&cache->lock);
5993 spin_unlock(&cache->space_info->lock);
5995 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5996 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5998 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
6003 * this function must be called within transaction
6005 int btrfs_pin_extent(struct btrfs_root *root,
6006 u64 bytenr, u64 num_bytes, int reserved)
6008 struct btrfs_block_group_cache *cache;
6010 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6011 BUG_ON(!cache); /* Logic error */
6013 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6015 btrfs_put_block_group(cache);
6020 * this function must be called within transaction
6022 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6023 u64 bytenr, u64 num_bytes)
6025 struct btrfs_block_group_cache *cache;
6028 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6033 * pull in the free space cache (if any) so that our pin
6034 * removes the free space from the cache. We have load_only set
6035 * to one because the slow code to read in the free extents does check
6036 * the pinned extents.
6038 cache_block_group(cache, 1);
6040 pin_down_extent(root, cache, bytenr, num_bytes, 0);
6042 /* remove us from the free space cache (if we're there at all) */
6043 ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6044 btrfs_put_block_group(cache);
6048 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6051 struct btrfs_block_group_cache *block_group;
6052 struct btrfs_caching_control *caching_ctl;
6054 block_group = btrfs_lookup_block_group(root->fs_info, start);
6058 cache_block_group(block_group, 0);
6059 caching_ctl = get_caching_control(block_group);
6063 BUG_ON(!block_group_cache_done(block_group));
6064 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6066 mutex_lock(&caching_ctl->mutex);
6068 if (start >= caching_ctl->progress) {
6069 ret = add_excluded_extent(root, start, num_bytes);
6070 } else if (start + num_bytes <= caching_ctl->progress) {
6071 ret = btrfs_remove_free_space(block_group,
6074 num_bytes = caching_ctl->progress - start;
6075 ret = btrfs_remove_free_space(block_group,
6080 num_bytes = (start + num_bytes) -
6081 caching_ctl->progress;
6082 start = caching_ctl->progress;
6083 ret = add_excluded_extent(root, start, num_bytes);
6086 mutex_unlock(&caching_ctl->mutex);
6087 put_caching_control(caching_ctl);
6089 btrfs_put_block_group(block_group);
6093 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6094 struct extent_buffer *eb)
6096 struct btrfs_file_extent_item *item;
6097 struct btrfs_key key;
6101 if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6104 for (i = 0; i < btrfs_header_nritems(eb); i++) {
6105 btrfs_item_key_to_cpu(eb, &key, i);
6106 if (key.type != BTRFS_EXTENT_DATA_KEY)
6108 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6109 found_type = btrfs_file_extent_type(eb, item);
6110 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6112 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6114 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6115 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6116 __exclude_logged_extent(log, key.objectid, key.offset);
6123 * btrfs_update_reserved_bytes - update the block_group and space info counters
6124 * @cache: The cache we are manipulating
6125 * @num_bytes: The number of bytes in question
6126 * @reserve: One of the reservation enums
6127 * @delalloc: The blocks are allocated for the delalloc write
6129 * This is called by the allocator when it reserves space, or by somebody who is
6130 * freeing space that was never actually used on disk. For example if you
6131 * reserve some space for a new leaf in transaction A and before transaction A
6132 * commits you free that leaf, you call this with reserve set to 0 in order to
6133 * clear the reservation.
6135 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6136 * ENOSPC accounting. For data we handle the reservation through clearing the
6137 * delalloc bits in the io_tree. We have to do this since we could end up
6138 * allocating less disk space for the amount of data we have reserved in the
6139 * case of compression.
6141 * If this is a reservation and the block group has become read only we cannot
6142 * make the reservation and return -EAGAIN, otherwise this function always
6145 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6146 u64 num_bytes, int reserve, int delalloc)
6148 struct btrfs_space_info *space_info = cache->space_info;
6151 spin_lock(&space_info->lock);
6152 spin_lock(&cache->lock);
6153 if (reserve != RESERVE_FREE) {
6157 cache->reserved += num_bytes;
6158 space_info->bytes_reserved += num_bytes;
6159 if (reserve == RESERVE_ALLOC) {
6160 trace_btrfs_space_reservation(cache->fs_info,
6161 "space_info", space_info->flags,
6163 space_info->bytes_may_use -= num_bytes;
6167 cache->delalloc_bytes += num_bytes;
6171 space_info->bytes_readonly += num_bytes;
6172 cache->reserved -= num_bytes;
6173 space_info->bytes_reserved -= num_bytes;
6176 cache->delalloc_bytes -= num_bytes;
6178 spin_unlock(&cache->lock);
6179 spin_unlock(&space_info->lock);
6183 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6184 struct btrfs_root *root)
6186 struct btrfs_fs_info *fs_info = root->fs_info;
6187 struct btrfs_caching_control *next;
6188 struct btrfs_caching_control *caching_ctl;
6189 struct btrfs_block_group_cache *cache;
6191 down_write(&fs_info->commit_root_sem);
6193 list_for_each_entry_safe(caching_ctl, next,
6194 &fs_info->caching_block_groups, list) {
6195 cache = caching_ctl->block_group;
6196 if (block_group_cache_done(cache)) {
6197 cache->last_byte_to_unpin = (u64)-1;
6198 list_del_init(&caching_ctl->list);
6199 put_caching_control(caching_ctl);
6201 cache->last_byte_to_unpin = caching_ctl->progress;
6205 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6206 fs_info->pinned_extents = &fs_info->freed_extents[1];
6208 fs_info->pinned_extents = &fs_info->freed_extents[0];
6210 up_write(&fs_info->commit_root_sem);
6212 update_global_block_rsv(fs_info);
6216 * Returns the free cluster for the given space info and sets empty_cluster to
6217 * what it should be based on the mount options.
6219 static struct btrfs_free_cluster *
6220 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6223 struct btrfs_free_cluster *ret = NULL;
6224 bool ssd = btrfs_test_opt(root, SSD);
6227 if (btrfs_mixed_space_info(space_info))
6231 *empty_cluster = 2 * 1024 * 1024;
6232 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6233 ret = &root->fs_info->meta_alloc_cluster;
6235 *empty_cluster = 64 * 1024;
6236 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6237 ret = &root->fs_info->data_alloc_cluster;
6243 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6244 const bool return_free_space)
6246 struct btrfs_fs_info *fs_info = root->fs_info;
6247 struct btrfs_block_group_cache *cache = NULL;
6248 struct btrfs_space_info *space_info;
6249 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6250 struct btrfs_free_cluster *cluster = NULL;
6252 u64 total_unpinned = 0;
6253 u64 empty_cluster = 0;
6256 while (start <= end) {
6259 start >= cache->key.objectid + cache->key.offset) {
6261 btrfs_put_block_group(cache);
6263 cache = btrfs_lookup_block_group(fs_info, start);
6264 BUG_ON(!cache); /* Logic error */
6266 cluster = fetch_cluster_info(root,
6269 empty_cluster <<= 1;
6272 len = cache->key.objectid + cache->key.offset - start;
6273 len = min(len, end + 1 - start);
6275 if (start < cache->last_byte_to_unpin) {
6276 len = min(len, cache->last_byte_to_unpin - start);
6277 if (return_free_space)
6278 btrfs_add_free_space(cache, start, len);
6282 total_unpinned += len;
6283 space_info = cache->space_info;
6286 * If this space cluster has been marked as fragmented and we've
6287 * unpinned enough in this block group to potentially allow a
6288 * cluster to be created inside of it go ahead and clear the
6291 if (cluster && cluster->fragmented &&
6292 total_unpinned > empty_cluster) {
6293 spin_lock(&cluster->lock);
6294 cluster->fragmented = 0;
6295 spin_unlock(&cluster->lock);
6298 spin_lock(&space_info->lock);
6299 spin_lock(&cache->lock);
6300 cache->pinned -= len;
6301 space_info->bytes_pinned -= len;
6302 space_info->max_extent_size = 0;
6303 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6305 space_info->bytes_readonly += len;
6308 spin_unlock(&cache->lock);
6309 if (!readonly && global_rsv->space_info == space_info) {
6310 spin_lock(&global_rsv->lock);
6311 if (!global_rsv->full) {
6312 len = min(len, global_rsv->size -
6313 global_rsv->reserved);
6314 global_rsv->reserved += len;
6315 space_info->bytes_may_use += len;
6316 if (global_rsv->reserved >= global_rsv->size)
6317 global_rsv->full = 1;
6319 spin_unlock(&global_rsv->lock);
6321 spin_unlock(&space_info->lock);
6325 btrfs_put_block_group(cache);
6329 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6330 struct btrfs_root *root)
6332 struct btrfs_fs_info *fs_info = root->fs_info;
6333 struct btrfs_block_group_cache *block_group, *tmp;
6334 struct list_head *deleted_bgs;
6335 struct extent_io_tree *unpin;
6340 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6341 unpin = &fs_info->freed_extents[1];
6343 unpin = &fs_info->freed_extents[0];
6345 while (!trans->aborted) {
6346 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6347 ret = find_first_extent_bit(unpin, 0, &start, &end,
6348 EXTENT_DIRTY, NULL);
6350 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6354 if (btrfs_test_opt(root, DISCARD))
6355 ret = btrfs_discard_extent(root, start,
6356 end + 1 - start, NULL);
6358 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6359 unpin_extent_range(root, start, end, true);
6360 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6365 * Transaction is finished. We don't need the lock anymore. We
6366 * do need to clean up the block groups in case of a transaction
6369 deleted_bgs = &trans->transaction->deleted_bgs;
6370 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6374 if (!trans->aborted)
6375 ret = btrfs_discard_extent(root,
6376 block_group->key.objectid,
6377 block_group->key.offset,
6380 list_del_init(&block_group->bg_list);
6381 btrfs_put_block_group_trimming(block_group);
6382 btrfs_put_block_group(block_group);
6385 const char *errstr = btrfs_decode_error(ret);
6387 "Discard failed while removing blockgroup: errno=%d %s\n",
6395 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6396 u64 owner, u64 root_objectid)
6398 struct btrfs_space_info *space_info;
6401 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6402 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6403 flags = BTRFS_BLOCK_GROUP_SYSTEM;
6405 flags = BTRFS_BLOCK_GROUP_METADATA;
6407 flags = BTRFS_BLOCK_GROUP_DATA;
6410 space_info = __find_space_info(fs_info, flags);
6411 BUG_ON(!space_info); /* Logic bug */
6412 percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6416 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6417 struct btrfs_root *root,
6418 struct btrfs_delayed_ref_node *node, u64 parent,
6419 u64 root_objectid, u64 owner_objectid,
6420 u64 owner_offset, int refs_to_drop,
6421 struct btrfs_delayed_extent_op *extent_op)
6423 struct btrfs_key key;
6424 struct btrfs_path *path;
6425 struct btrfs_fs_info *info = root->fs_info;
6426 struct btrfs_root *extent_root = info->extent_root;
6427 struct extent_buffer *leaf;
6428 struct btrfs_extent_item *ei;
6429 struct btrfs_extent_inline_ref *iref;
6432 int extent_slot = 0;
6433 int found_extent = 0;
6437 u64 bytenr = node->bytenr;
6438 u64 num_bytes = node->num_bytes;
6440 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6443 path = btrfs_alloc_path();
6448 path->leave_spinning = 1;
6450 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6451 BUG_ON(!is_data && refs_to_drop != 1);
6454 skinny_metadata = 0;
6456 ret = lookup_extent_backref(trans, extent_root, path, &iref,
6457 bytenr, num_bytes, parent,
6458 root_objectid, owner_objectid,
6461 extent_slot = path->slots[0];
6462 while (extent_slot >= 0) {
6463 btrfs_item_key_to_cpu(path->nodes[0], &key,
6465 if (key.objectid != bytenr)
6467 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6468 key.offset == num_bytes) {
6472 if (key.type == BTRFS_METADATA_ITEM_KEY &&
6473 key.offset == owner_objectid) {
6477 if (path->slots[0] - extent_slot > 5)
6481 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6482 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6483 if (found_extent && item_size < sizeof(*ei))
6486 if (!found_extent) {
6488 ret = remove_extent_backref(trans, extent_root, path,
6490 is_data, &last_ref);
6492 btrfs_abort_transaction(trans, extent_root, ret);
6495 btrfs_release_path(path);
6496 path->leave_spinning = 1;
6498 key.objectid = bytenr;
6499 key.type = BTRFS_EXTENT_ITEM_KEY;
6500 key.offset = num_bytes;
6502 if (!is_data && skinny_metadata) {
6503 key.type = BTRFS_METADATA_ITEM_KEY;
6504 key.offset = owner_objectid;
6507 ret = btrfs_search_slot(trans, extent_root,
6509 if (ret > 0 && skinny_metadata && path->slots[0]) {
6511 * Couldn't find our skinny metadata item,
6512 * see if we have ye olde extent item.
6515 btrfs_item_key_to_cpu(path->nodes[0], &key,
6517 if (key.objectid == bytenr &&
6518 key.type == BTRFS_EXTENT_ITEM_KEY &&
6519 key.offset == num_bytes)
6523 if (ret > 0 && skinny_metadata) {
6524 skinny_metadata = false;
6525 key.objectid = bytenr;
6526 key.type = BTRFS_EXTENT_ITEM_KEY;
6527 key.offset = num_bytes;
6528 btrfs_release_path(path);
6529 ret = btrfs_search_slot(trans, extent_root,
6534 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6537 btrfs_print_leaf(extent_root,
6541 btrfs_abort_transaction(trans, extent_root, ret);
6544 extent_slot = path->slots[0];
6546 } else if (WARN_ON(ret == -ENOENT)) {
6547 btrfs_print_leaf(extent_root, path->nodes[0]);
6549 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
6550 bytenr, parent, root_objectid, owner_objectid,
6552 btrfs_abort_transaction(trans, extent_root, ret);
6555 btrfs_abort_transaction(trans, extent_root, ret);
6559 leaf = path->nodes[0];
6560 item_size = btrfs_item_size_nr(leaf, extent_slot);
6561 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6562 if (item_size < sizeof(*ei)) {
6563 BUG_ON(found_extent || extent_slot != path->slots[0]);
6564 ret = convert_extent_item_v0(trans, extent_root, path,
6567 btrfs_abort_transaction(trans, extent_root, ret);
6571 btrfs_release_path(path);
6572 path->leave_spinning = 1;
6574 key.objectid = bytenr;
6575 key.type = BTRFS_EXTENT_ITEM_KEY;
6576 key.offset = num_bytes;
6578 ret = btrfs_search_slot(trans, extent_root, &key, path,
6581 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6583 btrfs_print_leaf(extent_root, path->nodes[0]);
6586 btrfs_abort_transaction(trans, extent_root, ret);
6590 extent_slot = path->slots[0];
6591 leaf = path->nodes[0];
6592 item_size = btrfs_item_size_nr(leaf, extent_slot);
6595 BUG_ON(item_size < sizeof(*ei));
6596 ei = btrfs_item_ptr(leaf, extent_slot,
6597 struct btrfs_extent_item);
6598 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6599 key.type == BTRFS_EXTENT_ITEM_KEY) {
6600 struct btrfs_tree_block_info *bi;
6601 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6602 bi = (struct btrfs_tree_block_info *)(ei + 1);
6603 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6606 refs = btrfs_extent_refs(leaf, ei);
6607 if (refs < refs_to_drop) {
6608 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6609 "for bytenr %Lu", refs_to_drop, refs, bytenr);
6611 btrfs_abort_transaction(trans, extent_root, ret);
6614 refs -= refs_to_drop;
6618 __run_delayed_extent_op(extent_op, leaf, ei);
6620 * In the case of inline back ref, reference count will
6621 * be updated by remove_extent_backref
6624 BUG_ON(!found_extent);
6626 btrfs_set_extent_refs(leaf, ei, refs);
6627 btrfs_mark_buffer_dirty(leaf);
6630 ret = remove_extent_backref(trans, extent_root, path,
6632 is_data, &last_ref);
6634 btrfs_abort_transaction(trans, extent_root, ret);
6638 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6642 BUG_ON(is_data && refs_to_drop !=
6643 extent_data_ref_count(path, iref));
6645 BUG_ON(path->slots[0] != extent_slot);
6647 BUG_ON(path->slots[0] != extent_slot + 1);
6648 path->slots[0] = extent_slot;
6654 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6657 btrfs_abort_transaction(trans, extent_root, ret);
6660 btrfs_release_path(path);
6663 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6665 btrfs_abort_transaction(trans, extent_root, ret);
6670 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6672 btrfs_abort_transaction(trans, extent_root, ret);
6676 btrfs_release_path(path);
6679 btrfs_free_path(path);
6684 * when we free an block, it is possible (and likely) that we free the last
6685 * delayed ref for that extent as well. This searches the delayed ref tree for
6686 * a given extent, and if there are no other delayed refs to be processed, it
6687 * removes it from the tree.
6689 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6690 struct btrfs_root *root, u64 bytenr)
6692 struct btrfs_delayed_ref_head *head;
6693 struct btrfs_delayed_ref_root *delayed_refs;
6696 delayed_refs = &trans->transaction->delayed_refs;
6697 spin_lock(&delayed_refs->lock);
6698 head = btrfs_find_delayed_ref_head(trans, bytenr);
6700 goto out_delayed_unlock;
6702 spin_lock(&head->lock);
6703 if (!list_empty(&head->ref_list))
6706 if (head->extent_op) {
6707 if (!head->must_insert_reserved)
6709 btrfs_free_delayed_extent_op(head->extent_op);
6710 head->extent_op = NULL;
6714 * waiting for the lock here would deadlock. If someone else has it
6715 * locked they are already in the process of dropping it anyway
6717 if (!mutex_trylock(&head->mutex))
6721 * at this point we have a head with no other entries. Go
6722 * ahead and process it.
6724 head->node.in_tree = 0;
6725 rb_erase(&head->href_node, &delayed_refs->href_root);
6727 atomic_dec(&delayed_refs->num_entries);
6730 * we don't take a ref on the node because we're removing it from the
6731 * tree, so we just steal the ref the tree was holding.
6733 delayed_refs->num_heads--;
6734 if (head->processing == 0)
6735 delayed_refs->num_heads_ready--;
6736 head->processing = 0;
6737 spin_unlock(&head->lock);
6738 spin_unlock(&delayed_refs->lock);
6740 BUG_ON(head->extent_op);
6741 if (head->must_insert_reserved)
6744 mutex_unlock(&head->mutex);
6745 btrfs_put_delayed_ref(&head->node);
6748 spin_unlock(&head->lock);
6751 spin_unlock(&delayed_refs->lock);
6755 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6756 struct btrfs_root *root,
6757 struct extent_buffer *buf,
6758 u64 parent, int last_ref)
6763 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6764 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6765 buf->start, buf->len,
6766 parent, root->root_key.objectid,
6767 btrfs_header_level(buf),
6768 BTRFS_DROP_DELAYED_REF, NULL);
6769 BUG_ON(ret); /* -ENOMEM */
6775 if (btrfs_header_generation(buf) == trans->transid) {
6776 struct btrfs_block_group_cache *cache;
6778 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6779 ret = check_ref_cleanup(trans, root, buf->start);
6784 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6786 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6787 pin_down_extent(root, cache, buf->start, buf->len, 1);
6788 btrfs_put_block_group(cache);
6792 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6794 btrfs_add_free_space(cache, buf->start, buf->len);
6795 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6796 btrfs_put_block_group(cache);
6797 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6802 add_pinned_bytes(root->fs_info, buf->len,
6803 btrfs_header_level(buf),
6804 root->root_key.objectid);
6807 * Deleting the buffer, clear the corrupt flag since it doesn't matter
6810 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6813 /* Can return -ENOMEM */
6814 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6815 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6816 u64 owner, u64 offset)
6819 struct btrfs_fs_info *fs_info = root->fs_info;
6821 if (btrfs_test_is_dummy_root(root))
6824 add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6827 * tree log blocks never actually go into the extent allocation
6828 * tree, just update pinning info and exit early.
6830 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6831 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6832 /* unlocks the pinned mutex */
6833 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6835 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6836 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6838 parent, root_objectid, (int)owner,
6839 BTRFS_DROP_DELAYED_REF, NULL);
6841 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6843 parent, root_objectid, owner,
6845 BTRFS_DROP_DELAYED_REF, NULL);
6851 * when we wait for progress in the block group caching, its because
6852 * our allocation attempt failed at least once. So, we must sleep
6853 * and let some progress happen before we try again.
6855 * This function will sleep at least once waiting for new free space to
6856 * show up, and then it will check the block group free space numbers
6857 * for our min num_bytes. Another option is to have it go ahead
6858 * and look in the rbtree for a free extent of a given size, but this
6861 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6862 * any of the information in this block group.
6864 static noinline void
6865 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6868 struct btrfs_caching_control *caching_ctl;
6870 caching_ctl = get_caching_control(cache);
6874 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6875 (cache->free_space_ctl->free_space >= num_bytes));
6877 put_caching_control(caching_ctl);
6881 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6883 struct btrfs_caching_control *caching_ctl;
6886 caching_ctl = get_caching_control(cache);
6888 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6890 wait_event(caching_ctl->wait, block_group_cache_done(cache));
6891 if (cache->cached == BTRFS_CACHE_ERROR)
6893 put_caching_control(caching_ctl);
6897 int __get_raid_index(u64 flags)
6899 if (flags & BTRFS_BLOCK_GROUP_RAID10)
6900 return BTRFS_RAID_RAID10;
6901 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6902 return BTRFS_RAID_RAID1;
6903 else if (flags & BTRFS_BLOCK_GROUP_DUP)
6904 return BTRFS_RAID_DUP;
6905 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6906 return BTRFS_RAID_RAID0;
6907 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6908 return BTRFS_RAID_RAID5;
6909 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6910 return BTRFS_RAID_RAID6;
6912 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6915 int get_block_group_index(struct btrfs_block_group_cache *cache)
6917 return __get_raid_index(cache->flags);
6920 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6921 [BTRFS_RAID_RAID10] = "raid10",
6922 [BTRFS_RAID_RAID1] = "raid1",
6923 [BTRFS_RAID_DUP] = "dup",
6924 [BTRFS_RAID_RAID0] = "raid0",
6925 [BTRFS_RAID_SINGLE] = "single",
6926 [BTRFS_RAID_RAID5] = "raid5",
6927 [BTRFS_RAID_RAID6] = "raid6",
6930 static const char *get_raid_name(enum btrfs_raid_types type)
6932 if (type >= BTRFS_NR_RAID_TYPES)
6935 return btrfs_raid_type_names[type];
6938 enum btrfs_loop_type {
6939 LOOP_CACHING_NOWAIT = 0,
6940 LOOP_CACHING_WAIT = 1,
6941 LOOP_ALLOC_CHUNK = 2,
6942 LOOP_NO_EMPTY_SIZE = 3,
6946 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6950 down_read(&cache->data_rwsem);
6954 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6957 btrfs_get_block_group(cache);
6959 down_read(&cache->data_rwsem);
6962 static struct btrfs_block_group_cache *
6963 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6964 struct btrfs_free_cluster *cluster,
6967 struct btrfs_block_group_cache *used_bg;
6968 bool locked = false;
6970 spin_lock(&cluster->refill_lock);
6972 if (used_bg == cluster->block_group)
6975 up_read(&used_bg->data_rwsem);
6976 btrfs_put_block_group(used_bg);
6979 used_bg = cluster->block_group;
6983 if (used_bg == block_group)
6986 btrfs_get_block_group(used_bg);
6991 if (down_read_trylock(&used_bg->data_rwsem))
6994 spin_unlock(&cluster->refill_lock);
6995 down_read(&used_bg->data_rwsem);
7001 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7005 up_read(&cache->data_rwsem);
7006 btrfs_put_block_group(cache);
7010 * walks the btree of allocated extents and find a hole of a given size.
7011 * The key ins is changed to record the hole:
7012 * ins->objectid == start position
7013 * ins->flags = BTRFS_EXTENT_ITEM_KEY
7014 * ins->offset == the size of the hole.
7015 * Any available blocks before search_start are skipped.
7017 * If there is no suitable free space, we will record the max size of
7018 * the free space extent currently.
7020 static noinline int find_free_extent(struct btrfs_root *orig_root,
7021 u64 num_bytes, u64 empty_size,
7022 u64 hint_byte, struct btrfs_key *ins,
7023 u64 flags, int delalloc)
7026 struct btrfs_root *root = orig_root->fs_info->extent_root;
7027 struct btrfs_free_cluster *last_ptr = NULL;
7028 struct btrfs_block_group_cache *block_group = NULL;
7029 u64 search_start = 0;
7030 u64 max_extent_size = 0;
7031 u64 empty_cluster = 0;
7032 struct btrfs_space_info *space_info;
7034 int index = __get_raid_index(flags);
7035 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7036 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7037 bool failed_cluster_refill = false;
7038 bool failed_alloc = false;
7039 bool use_cluster = true;
7040 bool have_caching_bg = false;
7041 bool orig_have_caching_bg = false;
7042 bool full_search = false;
7044 WARN_ON(num_bytes < root->sectorsize);
7045 ins->type = BTRFS_EXTENT_ITEM_KEY;
7049 trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7051 space_info = __find_space_info(root->fs_info, flags);
7053 btrfs_err(root->fs_info, "No space info for %llu", flags);
7058 * If our free space is heavily fragmented we may not be able to make
7059 * big contiguous allocations, so instead of doing the expensive search
7060 * for free space, simply return ENOSPC with our max_extent_size so we
7061 * can go ahead and search for a more manageable chunk.
7063 * If our max_extent_size is large enough for our allocation simply
7064 * disable clustering since we will likely not be able to find enough
7065 * space to create a cluster and induce latency trying.
7067 if (unlikely(space_info->max_extent_size)) {
7068 spin_lock(&space_info->lock);
7069 if (space_info->max_extent_size &&
7070 num_bytes > space_info->max_extent_size) {
7071 ins->offset = space_info->max_extent_size;
7072 spin_unlock(&space_info->lock);
7074 } else if (space_info->max_extent_size) {
7075 use_cluster = false;
7077 spin_unlock(&space_info->lock);
7080 last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7082 spin_lock(&last_ptr->lock);
7083 if (last_ptr->block_group)
7084 hint_byte = last_ptr->window_start;
7085 if (last_ptr->fragmented) {
7087 * We still set window_start so we can keep track of the
7088 * last place we found an allocation to try and save
7091 hint_byte = last_ptr->window_start;
7092 use_cluster = false;
7094 spin_unlock(&last_ptr->lock);
7097 search_start = max(search_start, first_logical_byte(root, 0));
7098 search_start = max(search_start, hint_byte);
7099 if (search_start == hint_byte) {
7100 block_group = btrfs_lookup_block_group(root->fs_info,
7103 * we don't want to use the block group if it doesn't match our
7104 * allocation bits, or if its not cached.
7106 * However if we are re-searching with an ideal block group
7107 * picked out then we don't care that the block group is cached.
7109 if (block_group && block_group_bits(block_group, flags) &&
7110 block_group->cached != BTRFS_CACHE_NO) {
7111 down_read(&space_info->groups_sem);
7112 if (list_empty(&block_group->list) ||
7115 * someone is removing this block group,
7116 * we can't jump into the have_block_group
7117 * target because our list pointers are not
7120 btrfs_put_block_group(block_group);
7121 up_read(&space_info->groups_sem);
7123 index = get_block_group_index(block_group);
7124 btrfs_lock_block_group(block_group, delalloc);
7125 goto have_block_group;
7127 } else if (block_group) {
7128 btrfs_put_block_group(block_group);
7132 have_caching_bg = false;
7133 if (index == 0 || index == __get_raid_index(flags))
7135 down_read(&space_info->groups_sem);
7136 list_for_each_entry(block_group, &space_info->block_groups[index],
7141 btrfs_grab_block_group(block_group, delalloc);
7142 search_start = block_group->key.objectid;
7145 * this can happen if we end up cycling through all the
7146 * raid types, but we want to make sure we only allocate
7147 * for the proper type.
7149 if (!block_group_bits(block_group, flags)) {
7150 u64 extra = BTRFS_BLOCK_GROUP_DUP |
7151 BTRFS_BLOCK_GROUP_RAID1 |
7152 BTRFS_BLOCK_GROUP_RAID5 |
7153 BTRFS_BLOCK_GROUP_RAID6 |
7154 BTRFS_BLOCK_GROUP_RAID10;
7157 * if they asked for extra copies and this block group
7158 * doesn't provide them, bail. This does allow us to
7159 * fill raid0 from raid1.
7161 if ((flags & extra) && !(block_group->flags & extra))
7166 cached = block_group_cache_done(block_group);
7167 if (unlikely(!cached)) {
7168 have_caching_bg = true;
7169 ret = cache_block_group(block_group, 0);
7174 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7176 if (unlikely(block_group->ro))
7180 * Ok we want to try and use the cluster allocator, so
7183 if (last_ptr && use_cluster) {
7184 struct btrfs_block_group_cache *used_block_group;
7185 unsigned long aligned_cluster;
7187 * the refill lock keeps out other
7188 * people trying to start a new cluster
7190 used_block_group = btrfs_lock_cluster(block_group,
7193 if (!used_block_group)
7194 goto refill_cluster;
7196 if (used_block_group != block_group &&
7197 (used_block_group->ro ||
7198 !block_group_bits(used_block_group, flags)))
7199 goto release_cluster;
7201 offset = btrfs_alloc_from_cluster(used_block_group,
7204 used_block_group->key.objectid,
7207 /* we have a block, we're done */
7208 spin_unlock(&last_ptr->refill_lock);
7209 trace_btrfs_reserve_extent_cluster(root,
7211 search_start, num_bytes);
7212 if (used_block_group != block_group) {
7213 btrfs_release_block_group(block_group,
7215 block_group = used_block_group;
7220 WARN_ON(last_ptr->block_group != used_block_group);
7222 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7223 * set up a new clusters, so lets just skip it
7224 * and let the allocator find whatever block
7225 * it can find. If we reach this point, we
7226 * will have tried the cluster allocator
7227 * plenty of times and not have found
7228 * anything, so we are likely way too
7229 * fragmented for the clustering stuff to find
7232 * However, if the cluster is taken from the
7233 * current block group, release the cluster
7234 * first, so that we stand a better chance of
7235 * succeeding in the unclustered
7237 if (loop >= LOOP_NO_EMPTY_SIZE &&
7238 used_block_group != block_group) {
7239 spin_unlock(&last_ptr->refill_lock);
7240 btrfs_release_block_group(used_block_group,
7242 goto unclustered_alloc;
7246 * this cluster didn't work out, free it and
7249 btrfs_return_cluster_to_free_space(NULL, last_ptr);
7251 if (used_block_group != block_group)
7252 btrfs_release_block_group(used_block_group,
7255 if (loop >= LOOP_NO_EMPTY_SIZE) {
7256 spin_unlock(&last_ptr->refill_lock);
7257 goto unclustered_alloc;
7260 aligned_cluster = max_t(unsigned long,
7261 empty_cluster + empty_size,
7262 block_group->full_stripe_len);
7264 /* allocate a cluster in this block group */
7265 ret = btrfs_find_space_cluster(root, block_group,
7266 last_ptr, search_start,
7271 * now pull our allocation out of this
7274 offset = btrfs_alloc_from_cluster(block_group,
7280 /* we found one, proceed */
7281 spin_unlock(&last_ptr->refill_lock);
7282 trace_btrfs_reserve_extent_cluster(root,
7283 block_group, search_start,
7287 } else if (!cached && loop > LOOP_CACHING_NOWAIT
7288 && !failed_cluster_refill) {
7289 spin_unlock(&last_ptr->refill_lock);
7291 failed_cluster_refill = true;
7292 wait_block_group_cache_progress(block_group,
7293 num_bytes + empty_cluster + empty_size);
7294 goto have_block_group;
7298 * at this point we either didn't find a cluster
7299 * or we weren't able to allocate a block from our
7300 * cluster. Free the cluster we've been trying
7301 * to use, and go to the next block group
7303 btrfs_return_cluster_to_free_space(NULL, last_ptr);
7304 spin_unlock(&last_ptr->refill_lock);
7310 * We are doing an unclustered alloc, set the fragmented flag so
7311 * we don't bother trying to setup a cluster again until we get
7314 if (unlikely(last_ptr)) {
7315 spin_lock(&last_ptr->lock);
7316 last_ptr->fragmented = 1;
7317 spin_unlock(&last_ptr->lock);
7319 spin_lock(&block_group->free_space_ctl->tree_lock);
7321 block_group->free_space_ctl->free_space <
7322 num_bytes + empty_cluster + empty_size) {
7323 if (block_group->free_space_ctl->free_space >
7326 block_group->free_space_ctl->free_space;
7327 spin_unlock(&block_group->free_space_ctl->tree_lock);
7330 spin_unlock(&block_group->free_space_ctl->tree_lock);
7332 offset = btrfs_find_space_for_alloc(block_group, search_start,
7333 num_bytes, empty_size,
7336 * If we didn't find a chunk, and we haven't failed on this
7337 * block group before, and this block group is in the middle of
7338 * caching and we are ok with waiting, then go ahead and wait
7339 * for progress to be made, and set failed_alloc to true.
7341 * If failed_alloc is true then we've already waited on this
7342 * block group once and should move on to the next block group.
7344 if (!offset && !failed_alloc && !cached &&
7345 loop > LOOP_CACHING_NOWAIT) {
7346 wait_block_group_cache_progress(block_group,
7347 num_bytes + empty_size);
7348 failed_alloc = true;
7349 goto have_block_group;
7350 } else if (!offset) {
7354 search_start = ALIGN(offset, root->stripesize);
7356 /* move on to the next group */
7357 if (search_start + num_bytes >
7358 block_group->key.objectid + block_group->key.offset) {
7359 btrfs_add_free_space(block_group, offset, num_bytes);
7363 if (offset < search_start)
7364 btrfs_add_free_space(block_group, offset,
7365 search_start - offset);
7366 BUG_ON(offset > search_start);
7368 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7369 alloc_type, delalloc);
7370 if (ret == -EAGAIN) {
7371 btrfs_add_free_space(block_group, offset, num_bytes);
7375 /* we are all good, lets return */
7376 ins->objectid = search_start;
7377 ins->offset = num_bytes;
7379 trace_btrfs_reserve_extent(orig_root, block_group,
7380 search_start, num_bytes);
7381 btrfs_release_block_group(block_group, delalloc);
7384 failed_cluster_refill = false;
7385 failed_alloc = false;
7386 BUG_ON(index != get_block_group_index(block_group));
7387 btrfs_release_block_group(block_group, delalloc);
7389 up_read(&space_info->groups_sem);
7391 if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7392 && !orig_have_caching_bg)
7393 orig_have_caching_bg = true;
7395 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7398 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7402 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7403 * caching kthreads as we move along
7404 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7405 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7406 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7409 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7411 if (loop == LOOP_CACHING_NOWAIT) {
7413 * We want to skip the LOOP_CACHING_WAIT step if we
7414 * don't have any unached bgs and we've alrelady done a
7415 * full search through.
7417 if (orig_have_caching_bg || !full_search)
7418 loop = LOOP_CACHING_WAIT;
7420 loop = LOOP_ALLOC_CHUNK;
7425 if (loop == LOOP_ALLOC_CHUNK) {
7426 struct btrfs_trans_handle *trans;
7429 trans = current->journal_info;
7433 trans = btrfs_join_transaction(root);
7435 if (IS_ERR(trans)) {
7436 ret = PTR_ERR(trans);
7440 ret = do_chunk_alloc(trans, root, flags,
7444 * If we can't allocate a new chunk we've already looped
7445 * through at least once, move on to the NO_EMPTY_SIZE
7449 loop = LOOP_NO_EMPTY_SIZE;
7452 * Do not bail out on ENOSPC since we
7453 * can do more things.
7455 if (ret < 0 && ret != -ENOSPC)
7456 btrfs_abort_transaction(trans,
7461 btrfs_end_transaction(trans, root);
7466 if (loop == LOOP_NO_EMPTY_SIZE) {
7468 * Don't loop again if we already have no empty_size and
7471 if (empty_size == 0 &&
7472 empty_cluster == 0) {
7481 } else if (!ins->objectid) {
7483 } else if (ins->objectid) {
7484 if (!use_cluster && last_ptr) {
7485 spin_lock(&last_ptr->lock);
7486 last_ptr->window_start = ins->objectid;
7487 spin_unlock(&last_ptr->lock);
7492 if (ret == -ENOSPC) {
7493 spin_lock(&space_info->lock);
7494 space_info->max_extent_size = max_extent_size;
7495 spin_unlock(&space_info->lock);
7496 ins->offset = max_extent_size;
7501 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7502 int dump_block_groups)
7504 struct btrfs_block_group_cache *cache;
7507 spin_lock(&info->lock);
7508 printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7510 info->total_bytes - info->bytes_used - info->bytes_pinned -
7511 info->bytes_reserved - info->bytes_readonly,
7512 (info->full) ? "" : "not ");
7513 printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7514 "reserved=%llu, may_use=%llu, readonly=%llu\n",
7515 info->total_bytes, info->bytes_used, info->bytes_pinned,
7516 info->bytes_reserved, info->bytes_may_use,
7517 info->bytes_readonly);
7518 spin_unlock(&info->lock);
7520 if (!dump_block_groups)
7523 down_read(&info->groups_sem);
7525 list_for_each_entry(cache, &info->block_groups[index], list) {
7526 spin_lock(&cache->lock);
7527 printk(KERN_INFO "BTRFS: "
7528 "block group %llu has %llu bytes, "
7529 "%llu used %llu pinned %llu reserved %s\n",
7530 cache->key.objectid, cache->key.offset,
7531 btrfs_block_group_used(&cache->item), cache->pinned,
7532 cache->reserved, cache->ro ? "[readonly]" : "");
7533 btrfs_dump_free_space(cache, bytes);
7534 spin_unlock(&cache->lock);
7536 if (++index < BTRFS_NR_RAID_TYPES)
7538 up_read(&info->groups_sem);
7541 int btrfs_reserve_extent(struct btrfs_root *root,
7542 u64 num_bytes, u64 min_alloc_size,
7543 u64 empty_size, u64 hint_byte,
7544 struct btrfs_key *ins, int is_data, int delalloc)
7546 bool final_tried = num_bytes == min_alloc_size;
7550 flags = btrfs_get_alloc_profile(root, is_data);
7552 WARN_ON(num_bytes < root->sectorsize);
7553 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7556 if (ret == -ENOSPC) {
7557 if (!final_tried && ins->offset) {
7558 num_bytes = min(num_bytes >> 1, ins->offset);
7559 num_bytes = round_down(num_bytes, root->sectorsize);
7560 num_bytes = max(num_bytes, min_alloc_size);
7561 if (num_bytes == min_alloc_size)
7564 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7565 struct btrfs_space_info *sinfo;
7567 sinfo = __find_space_info(root->fs_info, flags);
7568 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7571 dump_space_info(sinfo, num_bytes, 1);
7578 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7580 int pin, int delalloc)
7582 struct btrfs_block_group_cache *cache;
7585 cache = btrfs_lookup_block_group(root->fs_info, start);
7587 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7593 pin_down_extent(root, cache, start, len, 1);
7595 if (btrfs_test_opt(root, DISCARD))
7596 ret = btrfs_discard_extent(root, start, len, NULL);
7597 btrfs_add_free_space(cache, start, len);
7598 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7601 btrfs_put_block_group(cache);
7603 trace_btrfs_reserved_extent_free(root, start, len);
7608 int btrfs_free_reserved_extent(struct btrfs_root *root,
7609 u64 start, u64 len, int delalloc)
7611 return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7614 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7617 return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7620 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7621 struct btrfs_root *root,
7622 u64 parent, u64 root_objectid,
7623 u64 flags, u64 owner, u64 offset,
7624 struct btrfs_key *ins, int ref_mod)
7627 struct btrfs_fs_info *fs_info = root->fs_info;
7628 struct btrfs_extent_item *extent_item;
7629 struct btrfs_extent_inline_ref *iref;
7630 struct btrfs_path *path;
7631 struct extent_buffer *leaf;
7636 type = BTRFS_SHARED_DATA_REF_KEY;
7638 type = BTRFS_EXTENT_DATA_REF_KEY;
7640 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7642 path = btrfs_alloc_path();
7646 path->leave_spinning = 1;
7647 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7650 btrfs_free_path(path);
7654 leaf = path->nodes[0];
7655 extent_item = btrfs_item_ptr(leaf, path->slots[0],
7656 struct btrfs_extent_item);
7657 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7658 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7659 btrfs_set_extent_flags(leaf, extent_item,
7660 flags | BTRFS_EXTENT_FLAG_DATA);
7662 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7663 btrfs_set_extent_inline_ref_type(leaf, iref, type);
7665 struct btrfs_shared_data_ref *ref;
7666 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7667 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7668 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7670 struct btrfs_extent_data_ref *ref;
7671 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7672 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7673 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7674 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7675 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7678 btrfs_mark_buffer_dirty(path->nodes[0]);
7679 btrfs_free_path(path);
7681 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7682 if (ret) { /* -ENOENT, logic error */
7683 btrfs_err(fs_info, "update block group failed for %llu %llu",
7684 ins->objectid, ins->offset);
7687 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7691 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7692 struct btrfs_root *root,
7693 u64 parent, u64 root_objectid,
7694 u64 flags, struct btrfs_disk_key *key,
7695 int level, struct btrfs_key *ins)
7698 struct btrfs_fs_info *fs_info = root->fs_info;
7699 struct btrfs_extent_item *extent_item;
7700 struct btrfs_tree_block_info *block_info;
7701 struct btrfs_extent_inline_ref *iref;
7702 struct btrfs_path *path;
7703 struct extent_buffer *leaf;
7704 u32 size = sizeof(*extent_item) + sizeof(*iref);
7705 u64 num_bytes = ins->offset;
7706 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7709 if (!skinny_metadata)
7710 size += sizeof(*block_info);
7712 path = btrfs_alloc_path();
7714 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7719 path->leave_spinning = 1;
7720 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7723 btrfs_free_path(path);
7724 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7729 leaf = path->nodes[0];
7730 extent_item = btrfs_item_ptr(leaf, path->slots[0],
7731 struct btrfs_extent_item);
7732 btrfs_set_extent_refs(leaf, extent_item, 1);
7733 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7734 btrfs_set_extent_flags(leaf, extent_item,
7735 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7737 if (skinny_metadata) {
7738 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7739 num_bytes = root->nodesize;
7741 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7742 btrfs_set_tree_block_key(leaf, block_info, key);
7743 btrfs_set_tree_block_level(leaf, block_info, level);
7744 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7748 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7749 btrfs_set_extent_inline_ref_type(leaf, iref,
7750 BTRFS_SHARED_BLOCK_REF_KEY);
7751 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7753 btrfs_set_extent_inline_ref_type(leaf, iref,
7754 BTRFS_TREE_BLOCK_REF_KEY);
7755 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7758 btrfs_mark_buffer_dirty(leaf);
7759 btrfs_free_path(path);
7761 ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7763 if (ret) { /* -ENOENT, logic error */
7764 btrfs_err(fs_info, "update block group failed for %llu %llu",
7765 ins->objectid, ins->offset);
7769 trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7773 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7774 struct btrfs_root *root,
7775 u64 root_objectid, u64 owner,
7776 u64 offset, u64 ram_bytes,
7777 struct btrfs_key *ins)
7781 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7783 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7785 root_objectid, owner, offset,
7786 ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
7792 * this is used by the tree logging recovery code. It records that
7793 * an extent has been allocated and makes sure to clear the free
7794 * space cache bits as well
7796 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7797 struct btrfs_root *root,
7798 u64 root_objectid, u64 owner, u64 offset,
7799 struct btrfs_key *ins)
7802 struct btrfs_block_group_cache *block_group;
7805 * Mixed block groups will exclude before processing the log so we only
7806 * need to do the exlude dance if this fs isn't mixed.
7808 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7809 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7814 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7818 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7819 RESERVE_ALLOC_NO_ACCOUNT, 0);
7820 BUG_ON(ret); /* logic error */
7821 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7822 0, owner, offset, ins, 1);
7823 btrfs_put_block_group(block_group);
7827 static struct extent_buffer *
7828 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7829 u64 bytenr, int level)
7831 struct extent_buffer *buf;
7833 buf = btrfs_find_create_tree_block(root, bytenr);
7835 return ERR_PTR(-ENOMEM);
7836 btrfs_set_header_generation(buf, trans->transid);
7837 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7838 btrfs_tree_lock(buf);
7839 clean_tree_block(trans, root->fs_info, buf);
7840 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7842 btrfs_set_lock_blocking(buf);
7843 btrfs_set_buffer_uptodate(buf);
7845 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7846 buf->log_index = root->log_transid % 2;
7848 * we allow two log transactions at a time, use different
7849 * EXENT bit to differentiate dirty pages.
7851 if (buf->log_index == 0)
7852 set_extent_dirty(&root->dirty_log_pages, buf->start,
7853 buf->start + buf->len - 1, GFP_NOFS);
7855 set_extent_new(&root->dirty_log_pages, buf->start,
7856 buf->start + buf->len - 1, GFP_NOFS);
7858 buf->log_index = -1;
7859 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7860 buf->start + buf->len - 1, GFP_NOFS);
7862 trans->dirty = true;
7863 /* this returns a buffer locked for blocking */
7867 static struct btrfs_block_rsv *
7868 use_block_rsv(struct btrfs_trans_handle *trans,
7869 struct btrfs_root *root, u32 blocksize)
7871 struct btrfs_block_rsv *block_rsv;
7872 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7874 bool global_updated = false;
7876 block_rsv = get_block_rsv(trans, root);
7878 if (unlikely(block_rsv->size == 0))
7881 ret = block_rsv_use_bytes(block_rsv, blocksize);
7885 if (block_rsv->failfast)
7886 return ERR_PTR(ret);
7888 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7889 global_updated = true;
7890 update_global_block_rsv(root->fs_info);
7894 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7895 static DEFINE_RATELIMIT_STATE(_rs,
7896 DEFAULT_RATELIMIT_INTERVAL * 10,
7897 /*DEFAULT_RATELIMIT_BURST*/ 1);
7898 if (__ratelimit(&_rs))
7900 "BTRFS: block rsv returned %d\n", ret);
7903 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7904 BTRFS_RESERVE_NO_FLUSH);
7908 * If we couldn't reserve metadata bytes try and use some from
7909 * the global reserve if its space type is the same as the global
7912 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7913 block_rsv->space_info == global_rsv->space_info) {
7914 ret = block_rsv_use_bytes(global_rsv, blocksize);
7918 return ERR_PTR(ret);
7921 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7922 struct btrfs_block_rsv *block_rsv, u32 blocksize)
7924 block_rsv_add_bytes(block_rsv, blocksize, 0);
7925 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7929 * finds a free extent and does all the dirty work required for allocation
7930 * returns the tree buffer or an ERR_PTR on error.
7932 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7933 struct btrfs_root *root,
7934 u64 parent, u64 root_objectid,
7935 struct btrfs_disk_key *key, int level,
7936 u64 hint, u64 empty_size)
7938 struct btrfs_key ins;
7939 struct btrfs_block_rsv *block_rsv;
7940 struct extent_buffer *buf;
7941 struct btrfs_delayed_extent_op *extent_op;
7944 u32 blocksize = root->nodesize;
7945 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7948 if (btrfs_test_is_dummy_root(root)) {
7949 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7952 root->alloc_bytenr += blocksize;
7956 block_rsv = use_block_rsv(trans, root, blocksize);
7957 if (IS_ERR(block_rsv))
7958 return ERR_CAST(block_rsv);
7960 ret = btrfs_reserve_extent(root, blocksize, blocksize,
7961 empty_size, hint, &ins, 0, 0);
7965 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7968 goto out_free_reserved;
7971 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7973 parent = ins.objectid;
7974 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7978 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7979 extent_op = btrfs_alloc_delayed_extent_op();
7985 memcpy(&extent_op->key, key, sizeof(extent_op->key));
7987 memset(&extent_op->key, 0, sizeof(extent_op->key));
7988 extent_op->flags_to_set = flags;
7989 if (skinny_metadata)
7990 extent_op->update_key = 0;
7992 extent_op->update_key = 1;
7993 extent_op->update_flags = 1;
7994 extent_op->is_data = 0;
7995 extent_op->level = level;
7997 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7998 ins.objectid, ins.offset,
7999 parent, root_objectid, level,
8000 BTRFS_ADD_DELAYED_EXTENT,
8003 goto out_free_delayed;
8008 btrfs_free_delayed_extent_op(extent_op);
8010 free_extent_buffer(buf);
8012 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8014 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8015 return ERR_PTR(ret);
8018 struct walk_control {
8019 u64 refs[BTRFS_MAX_LEVEL];
8020 u64 flags[BTRFS_MAX_LEVEL];
8021 struct btrfs_key update_progress;
8032 #define DROP_REFERENCE 1
8033 #define UPDATE_BACKREF 2
8035 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8036 struct btrfs_root *root,
8037 struct walk_control *wc,
8038 struct btrfs_path *path)
8046 struct btrfs_key key;
8047 struct extent_buffer *eb;
8052 if (path->slots[wc->level] < wc->reada_slot) {
8053 wc->reada_count = wc->reada_count * 2 / 3;
8054 wc->reada_count = max(wc->reada_count, 2);
8056 wc->reada_count = wc->reada_count * 3 / 2;
8057 wc->reada_count = min_t(int, wc->reada_count,
8058 BTRFS_NODEPTRS_PER_BLOCK(root));
8061 eb = path->nodes[wc->level];
8062 nritems = btrfs_header_nritems(eb);
8063 blocksize = root->nodesize;
8065 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8066 if (nread >= wc->reada_count)
8070 bytenr = btrfs_node_blockptr(eb, slot);
8071 generation = btrfs_node_ptr_generation(eb, slot);
8073 if (slot == path->slots[wc->level])
8076 if (wc->stage == UPDATE_BACKREF &&
8077 generation <= root->root_key.offset)
8080 /* We don't lock the tree block, it's OK to be racy here */
8081 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8082 wc->level - 1, 1, &refs,
8084 /* We don't care about errors in readahead. */
8089 if (wc->stage == DROP_REFERENCE) {
8093 if (wc->level == 1 &&
8094 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8096 if (!wc->update_ref ||
8097 generation <= root->root_key.offset)
8099 btrfs_node_key_to_cpu(eb, &key, slot);
8100 ret = btrfs_comp_cpu_keys(&key,
8101 &wc->update_progress);
8105 if (wc->level == 1 &&
8106 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8110 readahead_tree_block(root, bytenr);
8113 wc->reada_slot = slot;
8117 * These may not be seen by the usual inc/dec ref code so we have to
8120 static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8121 struct btrfs_root *root, u64 bytenr,
8124 struct btrfs_qgroup_extent_record *qrecord;
8125 struct btrfs_delayed_ref_root *delayed_refs;
8127 qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8131 qrecord->bytenr = bytenr;
8132 qrecord->num_bytes = num_bytes;
8133 qrecord->old_roots = NULL;
8135 delayed_refs = &trans->transaction->delayed_refs;
8136 spin_lock(&delayed_refs->lock);
8137 if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
8139 spin_unlock(&delayed_refs->lock);
8144 static int account_leaf_items(struct btrfs_trans_handle *trans,
8145 struct btrfs_root *root,
8146 struct extent_buffer *eb)
8148 int nr = btrfs_header_nritems(eb);
8149 int i, extent_type, ret;
8150 struct btrfs_key key;
8151 struct btrfs_file_extent_item *fi;
8152 u64 bytenr, num_bytes;
8154 /* We can be called directly from walk_up_proc() */
8155 if (!root->fs_info->quota_enabled)
8158 for (i = 0; i < nr; i++) {
8159 btrfs_item_key_to_cpu(eb, &key, i);
8161 if (key.type != BTRFS_EXTENT_DATA_KEY)
8164 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8165 /* filter out non qgroup-accountable extents */
8166 extent_type = btrfs_file_extent_type(eb, fi);
8168 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8171 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8175 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8177 ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
8185 * Walk up the tree from the bottom, freeing leaves and any interior
8186 * nodes which have had all slots visited. If a node (leaf or
8187 * interior) is freed, the node above it will have it's slot
8188 * incremented. The root node will never be freed.
8190 * At the end of this function, we should have a path which has all
8191 * slots incremented to the next position for a search. If we need to
8192 * read a new node it will be NULL and the node above it will have the
8193 * correct slot selected for a later read.
8195 * If we increment the root nodes slot counter past the number of
8196 * elements, 1 is returned to signal completion of the search.
8198 static int adjust_slots_upwards(struct btrfs_root *root,
8199 struct btrfs_path *path, int root_level)
8203 struct extent_buffer *eb;
8205 if (root_level == 0)
8208 while (level <= root_level) {
8209 eb = path->nodes[level];
8210 nr = btrfs_header_nritems(eb);
8211 path->slots[level]++;
8212 slot = path->slots[level];
8213 if (slot >= nr || level == 0) {
8215 * Don't free the root - we will detect this
8216 * condition after our loop and return a
8217 * positive value for caller to stop walking the tree.
8219 if (level != root_level) {
8220 btrfs_tree_unlock_rw(eb, path->locks[level]);
8221 path->locks[level] = 0;
8223 free_extent_buffer(eb);
8224 path->nodes[level] = NULL;
8225 path->slots[level] = 0;
8229 * We have a valid slot to walk back down
8230 * from. Stop here so caller can process these
8239 eb = path->nodes[root_level];
8240 if (path->slots[root_level] >= btrfs_header_nritems(eb))
8247 * root_eb is the subtree root and is locked before this function is called.
8249 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8250 struct btrfs_root *root,
8251 struct extent_buffer *root_eb,
8257 struct extent_buffer *eb = root_eb;
8258 struct btrfs_path *path = NULL;
8260 BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8261 BUG_ON(root_eb == NULL);
8263 if (!root->fs_info->quota_enabled)
8266 if (!extent_buffer_uptodate(root_eb)) {
8267 ret = btrfs_read_buffer(root_eb, root_gen);
8272 if (root_level == 0) {
8273 ret = account_leaf_items(trans, root, root_eb);
8277 path = btrfs_alloc_path();
8282 * Walk down the tree. Missing extent blocks are filled in as
8283 * we go. Metadata is accounted every time we read a new
8286 * When we reach a leaf, we account for file extent items in it,
8287 * walk back up the tree (adjusting slot pointers as we go)
8288 * and restart the search process.
8290 extent_buffer_get(root_eb); /* For path */
8291 path->nodes[root_level] = root_eb;
8292 path->slots[root_level] = 0;
8293 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8296 while (level >= 0) {
8297 if (path->nodes[level] == NULL) {
8302 /* We need to get child blockptr/gen from
8303 * parent before we can read it. */
8304 eb = path->nodes[level + 1];
8305 parent_slot = path->slots[level + 1];
8306 child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8307 child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8309 eb = read_tree_block(root, child_bytenr, child_gen);
8313 } else if (!extent_buffer_uptodate(eb)) {
8314 free_extent_buffer(eb);
8319 path->nodes[level] = eb;
8320 path->slots[level] = 0;
8322 btrfs_tree_read_lock(eb);
8323 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8324 path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8326 ret = record_one_subtree_extent(trans, root, child_bytenr,
8333 ret = account_leaf_items(trans, root, path->nodes[level]);
8337 /* Nonzero return here means we completed our search */
8338 ret = adjust_slots_upwards(root, path, root_level);
8342 /* Restart search with new slots */
8351 btrfs_free_path(path);
8357 * helper to process tree block while walking down the tree.
8359 * when wc->stage == UPDATE_BACKREF, this function updates
8360 * back refs for pointers in the block.
8362 * NOTE: return value 1 means we should stop walking down.
8364 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8365 struct btrfs_root *root,
8366 struct btrfs_path *path,
8367 struct walk_control *wc, int lookup_info)
8369 int level = wc->level;
8370 struct extent_buffer *eb = path->nodes[level];
8371 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8374 if (wc->stage == UPDATE_BACKREF &&
8375 btrfs_header_owner(eb) != root->root_key.objectid)
8379 * when reference count of tree block is 1, it won't increase
8380 * again. once full backref flag is set, we never clear it.
8383 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8384 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8385 BUG_ON(!path->locks[level]);
8386 ret = btrfs_lookup_extent_info(trans, root,
8387 eb->start, level, 1,
8390 BUG_ON(ret == -ENOMEM);
8393 BUG_ON(wc->refs[level] == 0);
8396 if (wc->stage == DROP_REFERENCE) {
8397 if (wc->refs[level] > 1)
8400 if (path->locks[level] && !wc->keep_locks) {
8401 btrfs_tree_unlock_rw(eb, path->locks[level]);
8402 path->locks[level] = 0;
8407 /* wc->stage == UPDATE_BACKREF */
8408 if (!(wc->flags[level] & flag)) {
8409 BUG_ON(!path->locks[level]);
8410 ret = btrfs_inc_ref(trans, root, eb, 1);
8411 BUG_ON(ret); /* -ENOMEM */
8412 ret = btrfs_dec_ref(trans, root, eb, 0);
8413 BUG_ON(ret); /* -ENOMEM */
8414 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8416 btrfs_header_level(eb), 0);
8417 BUG_ON(ret); /* -ENOMEM */
8418 wc->flags[level] |= flag;
8422 * the block is shared by multiple trees, so it's not good to
8423 * keep the tree lock
8425 if (path->locks[level] && level > 0) {
8426 btrfs_tree_unlock_rw(eb, path->locks[level]);
8427 path->locks[level] = 0;
8433 * helper to process tree block pointer.
8435 * when wc->stage == DROP_REFERENCE, this function checks
8436 * reference count of the block pointed to. if the block
8437 * is shared and we need update back refs for the subtree
8438 * rooted at the block, this function changes wc->stage to
8439 * UPDATE_BACKREF. if the block is shared and there is no
8440 * need to update back, this function drops the reference
8443 * NOTE: return value 1 means we should stop walking down.
8445 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8446 struct btrfs_root *root,
8447 struct btrfs_path *path,
8448 struct walk_control *wc, int *lookup_info)
8454 struct btrfs_key key;
8455 struct extent_buffer *next;
8456 int level = wc->level;
8459 bool need_account = false;
8461 generation = btrfs_node_ptr_generation(path->nodes[level],
8462 path->slots[level]);
8464 * if the lower level block was created before the snapshot
8465 * was created, we know there is no need to update back refs
8468 if (wc->stage == UPDATE_BACKREF &&
8469 generation <= root->root_key.offset) {
8474 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8475 blocksize = root->nodesize;
8477 next = btrfs_find_tree_block(root->fs_info, bytenr);
8479 next = btrfs_find_create_tree_block(root, bytenr);
8482 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8486 btrfs_tree_lock(next);
8487 btrfs_set_lock_blocking(next);
8489 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8490 &wc->refs[level - 1],
8491 &wc->flags[level - 1]);
8495 if (unlikely(wc->refs[level - 1] == 0)) {
8496 btrfs_err(root->fs_info, "Missing references.");
8502 if (wc->stage == DROP_REFERENCE) {
8503 if (wc->refs[level - 1] > 1) {
8504 need_account = true;
8506 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8509 if (!wc->update_ref ||
8510 generation <= root->root_key.offset)
8513 btrfs_node_key_to_cpu(path->nodes[level], &key,
8514 path->slots[level]);
8515 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8519 wc->stage = UPDATE_BACKREF;
8520 wc->shared_level = level - 1;
8524 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8528 if (!btrfs_buffer_uptodate(next, generation, 0)) {
8529 btrfs_tree_unlock(next);
8530 free_extent_buffer(next);
8536 if (reada && level == 1)
8537 reada_walk_down(trans, root, wc, path);
8538 next = read_tree_block(root, bytenr, generation);
8540 return PTR_ERR(next);
8541 } else if (!extent_buffer_uptodate(next)) {
8542 free_extent_buffer(next);
8545 btrfs_tree_lock(next);
8546 btrfs_set_lock_blocking(next);
8550 ASSERT(level == btrfs_header_level(next));
8551 if (level != btrfs_header_level(next)) {
8552 btrfs_err(root->fs_info, "mismatched level");
8556 path->nodes[level] = next;
8557 path->slots[level] = 0;
8558 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8564 wc->refs[level - 1] = 0;
8565 wc->flags[level - 1] = 0;
8566 if (wc->stage == DROP_REFERENCE) {
8567 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8568 parent = path->nodes[level]->start;
8570 ASSERT(root->root_key.objectid ==
8571 btrfs_header_owner(path->nodes[level]));
8572 if (root->root_key.objectid !=
8573 btrfs_header_owner(path->nodes[level])) {
8574 btrfs_err(root->fs_info,
8575 "mismatched block owner");
8583 ret = account_shared_subtree(trans, root, next,
8584 generation, level - 1);
8586 btrfs_err_rl(root->fs_info,
8588 "%d accounting shared subtree. Quota "
8589 "is out of sync, rescan required.",
8593 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8594 root->root_key.objectid, level - 1, 0);
8603 btrfs_tree_unlock(next);
8604 free_extent_buffer(next);
8610 * helper to process tree block while walking up the tree.
8612 * when wc->stage == DROP_REFERENCE, this function drops
8613 * reference count on the block.
8615 * when wc->stage == UPDATE_BACKREF, this function changes
8616 * wc->stage back to DROP_REFERENCE if we changed wc->stage
8617 * to UPDATE_BACKREF previously while processing the block.
8619 * NOTE: return value 1 means we should stop walking up.
8621 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8622 struct btrfs_root *root,
8623 struct btrfs_path *path,
8624 struct walk_control *wc)
8627 int level = wc->level;
8628 struct extent_buffer *eb = path->nodes[level];
8631 if (wc->stage == UPDATE_BACKREF) {
8632 BUG_ON(wc->shared_level < level);
8633 if (level < wc->shared_level)
8636 ret = find_next_key(path, level + 1, &wc->update_progress);
8640 wc->stage = DROP_REFERENCE;
8641 wc->shared_level = -1;
8642 path->slots[level] = 0;
8645 * check reference count again if the block isn't locked.
8646 * we should start walking down the tree again if reference
8649 if (!path->locks[level]) {
8651 btrfs_tree_lock(eb);
8652 btrfs_set_lock_blocking(eb);
8653 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8655 ret = btrfs_lookup_extent_info(trans, root,
8656 eb->start, level, 1,
8660 btrfs_tree_unlock_rw(eb, path->locks[level]);
8661 path->locks[level] = 0;
8664 BUG_ON(wc->refs[level] == 0);
8665 if (wc->refs[level] == 1) {
8666 btrfs_tree_unlock_rw(eb, path->locks[level]);
8667 path->locks[level] = 0;
8673 /* wc->stage == DROP_REFERENCE */
8674 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8676 if (wc->refs[level] == 1) {
8678 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8679 ret = btrfs_dec_ref(trans, root, eb, 1);
8681 ret = btrfs_dec_ref(trans, root, eb, 0);
8682 BUG_ON(ret); /* -ENOMEM */
8683 ret = account_leaf_items(trans, root, eb);
8685 btrfs_err_rl(root->fs_info,
8687 "%d accounting leaf items. Quota "
8688 "is out of sync, rescan required.",
8692 /* make block locked assertion in clean_tree_block happy */
8693 if (!path->locks[level] &&
8694 btrfs_header_generation(eb) == trans->transid) {
8695 btrfs_tree_lock(eb);
8696 btrfs_set_lock_blocking(eb);
8697 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8699 clean_tree_block(trans, root->fs_info, eb);
8702 if (eb == root->node) {
8703 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8706 BUG_ON(root->root_key.objectid !=
8707 btrfs_header_owner(eb));
8709 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8710 parent = path->nodes[level + 1]->start;
8712 BUG_ON(root->root_key.objectid !=
8713 btrfs_header_owner(path->nodes[level + 1]));
8716 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8718 wc->refs[level] = 0;
8719 wc->flags[level] = 0;
8723 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8724 struct btrfs_root *root,
8725 struct btrfs_path *path,
8726 struct walk_control *wc)
8728 int level = wc->level;
8729 int lookup_info = 1;
8732 while (level >= 0) {
8733 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8740 if (path->slots[level] >=
8741 btrfs_header_nritems(path->nodes[level]))
8744 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8746 path->slots[level]++;
8755 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8756 struct btrfs_root *root,
8757 struct btrfs_path *path,
8758 struct walk_control *wc, int max_level)
8760 int level = wc->level;
8763 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8764 while (level < max_level && path->nodes[level]) {
8766 if (path->slots[level] + 1 <
8767 btrfs_header_nritems(path->nodes[level])) {
8768 path->slots[level]++;
8771 ret = walk_up_proc(trans, root, path, wc);
8775 if (path->locks[level]) {
8776 btrfs_tree_unlock_rw(path->nodes[level],
8777 path->locks[level]);
8778 path->locks[level] = 0;
8780 free_extent_buffer(path->nodes[level]);
8781 path->nodes[level] = NULL;
8789 * drop a subvolume tree.
8791 * this function traverses the tree freeing any blocks that only
8792 * referenced by the tree.
8794 * when a shared tree block is found. this function decreases its
8795 * reference count by one. if update_ref is true, this function
8796 * also make sure backrefs for the shared block and all lower level
8797 * blocks are properly updated.
8799 * If called with for_reloc == 0, may exit early with -EAGAIN
8801 int btrfs_drop_snapshot(struct btrfs_root *root,
8802 struct btrfs_block_rsv *block_rsv, int update_ref,
8805 struct btrfs_path *path;
8806 struct btrfs_trans_handle *trans;
8807 struct btrfs_root *tree_root = root->fs_info->tree_root;
8808 struct btrfs_root_item *root_item = &root->root_item;
8809 struct walk_control *wc;
8810 struct btrfs_key key;
8814 bool root_dropped = false;
8816 btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8818 path = btrfs_alloc_path();
8824 wc = kzalloc(sizeof(*wc), GFP_NOFS);
8826 btrfs_free_path(path);
8831 trans = btrfs_start_transaction(tree_root, 0);
8832 if (IS_ERR(trans)) {
8833 err = PTR_ERR(trans);
8838 trans->block_rsv = block_rsv;
8840 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8841 level = btrfs_header_level(root->node);
8842 path->nodes[level] = btrfs_lock_root_node(root);
8843 btrfs_set_lock_blocking(path->nodes[level]);
8844 path->slots[level] = 0;
8845 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8846 memset(&wc->update_progress, 0,
8847 sizeof(wc->update_progress));
8849 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8850 memcpy(&wc->update_progress, &key,
8851 sizeof(wc->update_progress));
8853 level = root_item->drop_level;
8855 path->lowest_level = level;
8856 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8857 path->lowest_level = 0;
8865 * unlock our path, this is safe because only this
8866 * function is allowed to delete this snapshot
8868 btrfs_unlock_up_safe(path, 0);
8870 level = btrfs_header_level(root->node);
8872 btrfs_tree_lock(path->nodes[level]);
8873 btrfs_set_lock_blocking(path->nodes[level]);
8874 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8876 ret = btrfs_lookup_extent_info(trans, root,
8877 path->nodes[level]->start,
8878 level, 1, &wc->refs[level],
8884 BUG_ON(wc->refs[level] == 0);
8886 if (level == root_item->drop_level)
8889 btrfs_tree_unlock(path->nodes[level]);
8890 path->locks[level] = 0;
8891 WARN_ON(wc->refs[level] != 1);
8897 wc->shared_level = -1;
8898 wc->stage = DROP_REFERENCE;
8899 wc->update_ref = update_ref;
8901 wc->for_reloc = for_reloc;
8902 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8906 ret = walk_down_tree(trans, root, path, wc);
8912 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8919 BUG_ON(wc->stage != DROP_REFERENCE);
8923 if (wc->stage == DROP_REFERENCE) {
8925 btrfs_node_key(path->nodes[level],
8926 &root_item->drop_progress,
8927 path->slots[level]);
8928 root_item->drop_level = level;
8931 BUG_ON(wc->level == 0);
8932 if (btrfs_should_end_transaction(trans, tree_root) ||
8933 (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8934 ret = btrfs_update_root(trans, tree_root,
8938 btrfs_abort_transaction(trans, tree_root, ret);
8943 btrfs_end_transaction_throttle(trans, tree_root);
8944 if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8945 pr_debug("BTRFS: drop snapshot early exit\n");
8950 trans = btrfs_start_transaction(tree_root, 0);
8951 if (IS_ERR(trans)) {
8952 err = PTR_ERR(trans);
8956 trans->block_rsv = block_rsv;
8959 btrfs_release_path(path);
8963 ret = btrfs_del_root(trans, tree_root, &root->root_key);
8965 btrfs_abort_transaction(trans, tree_root, ret);
8969 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8970 ret = btrfs_find_root(tree_root, &root->root_key, path,
8973 btrfs_abort_transaction(trans, tree_root, ret);
8976 } else if (ret > 0) {
8977 /* if we fail to delete the orphan item this time
8978 * around, it'll get picked up the next time.
8980 * The most common failure here is just -ENOENT.
8982 btrfs_del_orphan_item(trans, tree_root,
8983 root->root_key.objectid);
8987 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8988 btrfs_add_dropped_root(trans, root);
8990 free_extent_buffer(root->node);
8991 free_extent_buffer(root->commit_root);
8992 btrfs_put_fs_root(root);
8994 root_dropped = true;
8996 btrfs_end_transaction_throttle(trans, tree_root);
8999 btrfs_free_path(path);
9002 * So if we need to stop dropping the snapshot for whatever reason we
9003 * need to make sure to add it back to the dead root list so that we
9004 * keep trying to do the work later. This also cleans up roots if we
9005 * don't have it in the radix (like when we recover after a power fail
9006 * or unmount) so we don't leak memory.
9008 if (!for_reloc && root_dropped == false)
9009 btrfs_add_dead_root(root);
9010 if (err && err != -EAGAIN)
9011 btrfs_std_error(root->fs_info, err, NULL);
9016 * drop subtree rooted at tree block 'node'.
9018 * NOTE: this function will unlock and release tree block 'node'
9019 * only used by relocation code
9021 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9022 struct btrfs_root *root,
9023 struct extent_buffer *node,
9024 struct extent_buffer *parent)
9026 struct btrfs_path *path;
9027 struct walk_control *wc;
9033 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9035 path = btrfs_alloc_path();
9039 wc = kzalloc(sizeof(*wc), GFP_NOFS);
9041 btrfs_free_path(path);
9045 btrfs_assert_tree_locked(parent);
9046 parent_level = btrfs_header_level(parent);
9047 extent_buffer_get(parent);
9048 path->nodes[parent_level] = parent;
9049 path->slots[parent_level] = btrfs_header_nritems(parent);
9051 btrfs_assert_tree_locked(node);
9052 level = btrfs_header_level(node);
9053 path->nodes[level] = node;
9054 path->slots[level] = 0;
9055 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9057 wc->refs[parent_level] = 1;
9058 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9060 wc->shared_level = -1;
9061 wc->stage = DROP_REFERENCE;
9065 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9068 wret = walk_down_tree(trans, root, path, wc);
9074 wret = walk_up_tree(trans, root, path, wc, parent_level);
9082 btrfs_free_path(path);
9086 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9092 * if restripe for this chunk_type is on pick target profile and
9093 * return, otherwise do the usual balance
9095 stripped = get_restripe_target(root->fs_info, flags);
9097 return extended_to_chunk(stripped);
9099 num_devices = root->fs_info->fs_devices->rw_devices;
9101 stripped = BTRFS_BLOCK_GROUP_RAID0 |
9102 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9103 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9105 if (num_devices == 1) {
9106 stripped |= BTRFS_BLOCK_GROUP_DUP;
9107 stripped = flags & ~stripped;
9109 /* turn raid0 into single device chunks */
9110 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9113 /* turn mirroring into duplication */
9114 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9115 BTRFS_BLOCK_GROUP_RAID10))
9116 return stripped | BTRFS_BLOCK_GROUP_DUP;
9118 /* they already had raid on here, just return */
9119 if (flags & stripped)
9122 stripped |= BTRFS_BLOCK_GROUP_DUP;
9123 stripped = flags & ~stripped;
9125 /* switch duplicated blocks with raid1 */
9126 if (flags & BTRFS_BLOCK_GROUP_DUP)
9127 return stripped | BTRFS_BLOCK_GROUP_RAID1;
9129 /* this is drive concat, leave it alone */
9135 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9137 struct btrfs_space_info *sinfo = cache->space_info;
9139 u64 min_allocable_bytes;
9143 * We need some metadata space and system metadata space for
9144 * allocating chunks in some corner cases until we force to set
9145 * it to be readonly.
9148 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9150 min_allocable_bytes = 1 * 1024 * 1024;
9152 min_allocable_bytes = 0;
9154 spin_lock(&sinfo->lock);
9155 spin_lock(&cache->lock);
9163 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9164 cache->bytes_super - btrfs_block_group_used(&cache->item);
9166 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9167 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9168 min_allocable_bytes <= sinfo->total_bytes) {
9169 sinfo->bytes_readonly += num_bytes;
9171 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9175 spin_unlock(&cache->lock);
9176 spin_unlock(&sinfo->lock);
9180 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9181 struct btrfs_block_group_cache *cache)
9184 struct btrfs_trans_handle *trans;
9189 trans = btrfs_join_transaction(root);
9191 return PTR_ERR(trans);
9194 * we're not allowed to set block groups readonly after the dirty
9195 * block groups cache has started writing. If it already started,
9196 * back off and let this transaction commit
9198 mutex_lock(&root->fs_info->ro_block_group_mutex);
9199 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9200 u64 transid = trans->transid;
9202 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9203 btrfs_end_transaction(trans, root);
9205 ret = btrfs_wait_for_commit(root, transid);
9212 * if we are changing raid levels, try to allocate a corresponding
9213 * block group with the new raid level.
9215 alloc_flags = update_block_group_flags(root, cache->flags);
9216 if (alloc_flags != cache->flags) {
9217 ret = do_chunk_alloc(trans, root, alloc_flags,
9220 * ENOSPC is allowed here, we may have enough space
9221 * already allocated at the new raid level to
9230 ret = inc_block_group_ro(cache, 0);
9233 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9234 ret = do_chunk_alloc(trans, root, alloc_flags,
9238 ret = inc_block_group_ro(cache, 0);
9240 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9241 alloc_flags = update_block_group_flags(root, cache->flags);
9242 lock_chunks(root->fs_info->chunk_root);
9243 check_system_chunk(trans, root, alloc_flags);
9244 unlock_chunks(root->fs_info->chunk_root);
9246 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9248 btrfs_end_transaction(trans, root);
9252 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9253 struct btrfs_root *root, u64 type)
9255 u64 alloc_flags = get_alloc_profile(root, type);
9256 return do_chunk_alloc(trans, root, alloc_flags,
9261 * helper to account the unused space of all the readonly block group in the
9262 * space_info. takes mirrors into account.
9264 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9266 struct btrfs_block_group_cache *block_group;
9270 /* It's df, we don't care if it's racey */
9271 if (list_empty(&sinfo->ro_bgs))
9274 spin_lock(&sinfo->lock);
9275 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9276 spin_lock(&block_group->lock);
9278 if (!block_group->ro) {
9279 spin_unlock(&block_group->lock);
9283 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9284 BTRFS_BLOCK_GROUP_RAID10 |
9285 BTRFS_BLOCK_GROUP_DUP))
9290 free_bytes += (block_group->key.offset -
9291 btrfs_block_group_used(&block_group->item)) *
9294 spin_unlock(&block_group->lock);
9296 spin_unlock(&sinfo->lock);
9301 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9302 struct btrfs_block_group_cache *cache)
9304 struct btrfs_space_info *sinfo = cache->space_info;
9309 spin_lock(&sinfo->lock);
9310 spin_lock(&cache->lock);
9312 num_bytes = cache->key.offset - cache->reserved -
9313 cache->pinned - cache->bytes_super -
9314 btrfs_block_group_used(&cache->item);
9315 sinfo->bytes_readonly -= num_bytes;
9316 list_del_init(&cache->ro_list);
9318 spin_unlock(&cache->lock);
9319 spin_unlock(&sinfo->lock);
9323 * checks to see if its even possible to relocate this block group.
9325 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9326 * ok to go ahead and try.
9328 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9330 struct btrfs_block_group_cache *block_group;
9331 struct btrfs_space_info *space_info;
9332 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9333 struct btrfs_device *device;
9334 struct btrfs_trans_handle *trans;
9343 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9345 /* odd, couldn't find the block group, leave it alone */
9349 min_free = btrfs_block_group_used(&block_group->item);
9351 /* no bytes used, we're good */
9355 space_info = block_group->space_info;
9356 spin_lock(&space_info->lock);
9358 full = space_info->full;
9361 * if this is the last block group we have in this space, we can't
9362 * relocate it unless we're able to allocate a new chunk below.
9364 * Otherwise, we need to make sure we have room in the space to handle
9365 * all of the extents from this block group. If we can, we're good
9367 if ((space_info->total_bytes != block_group->key.offset) &&
9368 (space_info->bytes_used + space_info->bytes_reserved +
9369 space_info->bytes_pinned + space_info->bytes_readonly +
9370 min_free < space_info->total_bytes)) {
9371 spin_unlock(&space_info->lock);
9374 spin_unlock(&space_info->lock);
9377 * ok we don't have enough space, but maybe we have free space on our
9378 * devices to allocate new chunks for relocation, so loop through our
9379 * alloc devices and guess if we have enough space. if this block
9380 * group is going to be restriped, run checks against the target
9381 * profile instead of the current one.
9393 target = get_restripe_target(root->fs_info, block_group->flags);
9395 index = __get_raid_index(extended_to_chunk(target));
9398 * this is just a balance, so if we were marked as full
9399 * we know there is no space for a new chunk
9404 index = get_block_group_index(block_group);
9407 if (index == BTRFS_RAID_RAID10) {
9411 } else if (index == BTRFS_RAID_RAID1) {
9413 } else if (index == BTRFS_RAID_DUP) {
9416 } else if (index == BTRFS_RAID_RAID0) {
9417 dev_min = fs_devices->rw_devices;
9418 min_free = div64_u64(min_free, dev_min);
9421 /* We need to do this so that we can look at pending chunks */
9422 trans = btrfs_join_transaction(root);
9423 if (IS_ERR(trans)) {
9424 ret = PTR_ERR(trans);
9428 mutex_lock(&root->fs_info->chunk_mutex);
9429 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9433 * check to make sure we can actually find a chunk with enough
9434 * space to fit our block group in.
9436 if (device->total_bytes > device->bytes_used + min_free &&
9437 !device->is_tgtdev_for_dev_replace) {
9438 ret = find_free_dev_extent(trans, device, min_free,
9443 if (dev_nr >= dev_min)
9449 mutex_unlock(&root->fs_info->chunk_mutex);
9450 btrfs_end_transaction(trans, root);
9452 btrfs_put_block_group(block_group);
9456 static int find_first_block_group(struct btrfs_root *root,
9457 struct btrfs_path *path, struct btrfs_key *key)
9460 struct btrfs_key found_key;
9461 struct extent_buffer *leaf;
9464 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9469 slot = path->slots[0];
9470 leaf = path->nodes[0];
9471 if (slot >= btrfs_header_nritems(leaf)) {
9472 ret = btrfs_next_leaf(root, path);
9479 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9481 if (found_key.objectid >= key->objectid &&
9482 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9492 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9494 struct btrfs_block_group_cache *block_group;
9498 struct inode *inode;
9500 block_group = btrfs_lookup_first_block_group(info, last);
9501 while (block_group) {
9502 spin_lock(&block_group->lock);
9503 if (block_group->iref)
9505 spin_unlock(&block_group->lock);
9506 block_group = next_block_group(info->tree_root,
9516 inode = block_group->inode;
9517 block_group->iref = 0;
9518 block_group->inode = NULL;
9519 spin_unlock(&block_group->lock);
9521 last = block_group->key.objectid + block_group->key.offset;
9522 btrfs_put_block_group(block_group);
9526 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9528 struct btrfs_block_group_cache *block_group;
9529 struct btrfs_space_info *space_info;
9530 struct btrfs_caching_control *caching_ctl;
9533 down_write(&info->commit_root_sem);
9534 while (!list_empty(&info->caching_block_groups)) {
9535 caching_ctl = list_entry(info->caching_block_groups.next,
9536 struct btrfs_caching_control, list);
9537 list_del(&caching_ctl->list);
9538 put_caching_control(caching_ctl);
9540 up_write(&info->commit_root_sem);
9542 spin_lock(&info->unused_bgs_lock);
9543 while (!list_empty(&info->unused_bgs)) {
9544 block_group = list_first_entry(&info->unused_bgs,
9545 struct btrfs_block_group_cache,
9547 list_del_init(&block_group->bg_list);
9548 btrfs_put_block_group(block_group);
9550 spin_unlock(&info->unused_bgs_lock);
9552 spin_lock(&info->block_group_cache_lock);
9553 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9554 block_group = rb_entry(n, struct btrfs_block_group_cache,
9556 rb_erase(&block_group->cache_node,
9557 &info->block_group_cache_tree);
9558 RB_CLEAR_NODE(&block_group->cache_node);
9559 spin_unlock(&info->block_group_cache_lock);
9561 down_write(&block_group->space_info->groups_sem);
9562 list_del(&block_group->list);
9563 up_write(&block_group->space_info->groups_sem);
9565 if (block_group->cached == BTRFS_CACHE_STARTED)
9566 wait_block_group_cache_done(block_group);
9569 * We haven't cached this block group, which means we could
9570 * possibly have excluded extents on this block group.
9572 if (block_group->cached == BTRFS_CACHE_NO ||
9573 block_group->cached == BTRFS_CACHE_ERROR)
9574 free_excluded_extents(info->extent_root, block_group);
9576 btrfs_remove_free_space_cache(block_group);
9577 btrfs_put_block_group(block_group);
9579 spin_lock(&info->block_group_cache_lock);
9581 spin_unlock(&info->block_group_cache_lock);
9583 /* now that all the block groups are freed, go through and
9584 * free all the space_info structs. This is only called during
9585 * the final stages of unmount, and so we know nobody is
9586 * using them. We call synchronize_rcu() once before we start,
9587 * just to be on the safe side.
9591 release_global_block_rsv(info);
9593 while (!list_empty(&info->space_info)) {
9596 space_info = list_entry(info->space_info.next,
9597 struct btrfs_space_info,
9599 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9600 if (WARN_ON(space_info->bytes_pinned > 0 ||
9601 space_info->bytes_reserved > 0 ||
9602 space_info->bytes_may_use > 0)) {
9603 dump_space_info(space_info, 0, 0);
9606 list_del(&space_info->list);
9607 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9608 struct kobject *kobj;
9609 kobj = space_info->block_group_kobjs[i];
9610 space_info->block_group_kobjs[i] = NULL;
9616 kobject_del(&space_info->kobj);
9617 kobject_put(&space_info->kobj);
9622 static void __link_block_group(struct btrfs_space_info *space_info,
9623 struct btrfs_block_group_cache *cache)
9625 int index = get_block_group_index(cache);
9628 down_write(&space_info->groups_sem);
9629 if (list_empty(&space_info->block_groups[index]))
9631 list_add_tail(&cache->list, &space_info->block_groups[index]);
9632 up_write(&space_info->groups_sem);
9635 struct raid_kobject *rkobj;
9638 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9641 rkobj->raid_type = index;
9642 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9643 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9644 "%s", get_raid_name(index));
9646 kobject_put(&rkobj->kobj);
9649 space_info->block_group_kobjs[index] = &rkobj->kobj;
9654 pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9657 static struct btrfs_block_group_cache *
9658 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9660 struct btrfs_block_group_cache *cache;
9662 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9666 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9668 if (!cache->free_space_ctl) {
9673 cache->key.objectid = start;
9674 cache->key.offset = size;
9675 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9677 cache->sectorsize = root->sectorsize;
9678 cache->fs_info = root->fs_info;
9679 cache->full_stripe_len = btrfs_full_stripe_len(root,
9680 &root->fs_info->mapping_tree,
9682 atomic_set(&cache->count, 1);
9683 spin_lock_init(&cache->lock);
9684 init_rwsem(&cache->data_rwsem);
9685 INIT_LIST_HEAD(&cache->list);
9686 INIT_LIST_HEAD(&cache->cluster_list);
9687 INIT_LIST_HEAD(&cache->bg_list);
9688 INIT_LIST_HEAD(&cache->ro_list);
9689 INIT_LIST_HEAD(&cache->dirty_list);
9690 INIT_LIST_HEAD(&cache->io_list);
9691 btrfs_init_free_space_ctl(cache);
9692 atomic_set(&cache->trimming, 0);
9697 int btrfs_read_block_groups(struct btrfs_root *root)
9699 struct btrfs_path *path;
9701 struct btrfs_block_group_cache *cache;
9702 struct btrfs_fs_info *info = root->fs_info;
9703 struct btrfs_space_info *space_info;
9704 struct btrfs_key key;
9705 struct btrfs_key found_key;
9706 struct extent_buffer *leaf;
9712 feature = btrfs_super_incompat_flags(info->super_copy);
9713 mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
9715 root = info->extent_root;
9718 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9719 path = btrfs_alloc_path();
9724 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9725 if (btrfs_test_opt(root, SPACE_CACHE) &&
9726 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9728 if (btrfs_test_opt(root, CLEAR_CACHE))
9732 ret = find_first_block_group(root, path, &key);
9738 leaf = path->nodes[0];
9739 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9741 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9750 * When we mount with old space cache, we need to
9751 * set BTRFS_DC_CLEAR and set dirty flag.
9753 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9754 * truncate the old free space cache inode and
9756 * b) Setting 'dirty flag' makes sure that we flush
9757 * the new space cache info onto disk.
9759 if (btrfs_test_opt(root, SPACE_CACHE))
9760 cache->disk_cache_state = BTRFS_DC_CLEAR;
9763 read_extent_buffer(leaf, &cache->item,
9764 btrfs_item_ptr_offset(leaf, path->slots[0]),
9765 sizeof(cache->item));
9766 cache->flags = btrfs_block_group_flags(&cache->item);
9768 ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
9769 (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
9771 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
9772 cache->key.objectid);
9777 key.objectid = found_key.objectid + found_key.offset;
9778 btrfs_release_path(path);
9781 * We need to exclude the super stripes now so that the space
9782 * info has super bytes accounted for, otherwise we'll think
9783 * we have more space than we actually do.
9785 ret = exclude_super_stripes(root, cache);
9788 * We may have excluded something, so call this just in
9791 free_excluded_extents(root, cache);
9792 btrfs_put_block_group(cache);
9797 * check for two cases, either we are full, and therefore
9798 * don't need to bother with the caching work since we won't
9799 * find any space, or we are empty, and we can just add all
9800 * the space in and be done with it. This saves us _alot_ of
9801 * time, particularly in the full case.
9803 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9804 cache->last_byte_to_unpin = (u64)-1;
9805 cache->cached = BTRFS_CACHE_FINISHED;
9806 free_excluded_extents(root, cache);
9807 } else if (btrfs_block_group_used(&cache->item) == 0) {
9808 cache->last_byte_to_unpin = (u64)-1;
9809 cache->cached = BTRFS_CACHE_FINISHED;
9810 add_new_free_space(cache, root->fs_info,
9812 found_key.objectid +
9814 free_excluded_extents(root, cache);
9817 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9819 btrfs_remove_free_space_cache(cache);
9820 btrfs_put_block_group(cache);
9824 ret = update_space_info(info, cache->flags, found_key.offset,
9825 btrfs_block_group_used(&cache->item),
9828 btrfs_remove_free_space_cache(cache);
9829 spin_lock(&info->block_group_cache_lock);
9830 rb_erase(&cache->cache_node,
9831 &info->block_group_cache_tree);
9832 RB_CLEAR_NODE(&cache->cache_node);
9833 spin_unlock(&info->block_group_cache_lock);
9834 btrfs_put_block_group(cache);
9838 cache->space_info = space_info;
9839 spin_lock(&cache->space_info->lock);
9840 cache->space_info->bytes_readonly += cache->bytes_super;
9841 spin_unlock(&cache->space_info->lock);
9843 __link_block_group(space_info, cache);
9845 set_avail_alloc_bits(root->fs_info, cache->flags);
9846 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9847 inc_block_group_ro(cache, 1);
9848 } else if (btrfs_block_group_used(&cache->item) == 0) {
9849 spin_lock(&info->unused_bgs_lock);
9850 /* Should always be true but just in case. */
9851 if (list_empty(&cache->bg_list)) {
9852 btrfs_get_block_group(cache);
9853 list_add_tail(&cache->bg_list,
9856 spin_unlock(&info->unused_bgs_lock);
9860 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9861 if (!(get_alloc_profile(root, space_info->flags) &
9862 (BTRFS_BLOCK_GROUP_RAID10 |
9863 BTRFS_BLOCK_GROUP_RAID1 |
9864 BTRFS_BLOCK_GROUP_RAID5 |
9865 BTRFS_BLOCK_GROUP_RAID6 |
9866 BTRFS_BLOCK_GROUP_DUP)))
9869 * avoid allocating from un-mirrored block group if there are
9870 * mirrored block groups.
9872 list_for_each_entry(cache,
9873 &space_info->block_groups[BTRFS_RAID_RAID0],
9875 inc_block_group_ro(cache, 1);
9876 list_for_each_entry(cache,
9877 &space_info->block_groups[BTRFS_RAID_SINGLE],
9879 inc_block_group_ro(cache, 1);
9882 init_global_block_rsv(info);
9885 btrfs_free_path(path);
9889 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9890 struct btrfs_root *root)
9892 struct btrfs_block_group_cache *block_group, *tmp;
9893 struct btrfs_root *extent_root = root->fs_info->extent_root;
9894 struct btrfs_block_group_item item;
9895 struct btrfs_key key;
9897 bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9899 trans->can_flush_pending_bgs = false;
9900 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9904 spin_lock(&block_group->lock);
9905 memcpy(&item, &block_group->item, sizeof(item));
9906 memcpy(&key, &block_group->key, sizeof(key));
9907 spin_unlock(&block_group->lock);
9909 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9912 btrfs_abort_transaction(trans, extent_root, ret);
9913 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9914 key.objectid, key.offset);
9916 btrfs_abort_transaction(trans, extent_root, ret);
9918 list_del_init(&block_group->bg_list);
9920 trans->can_flush_pending_bgs = can_flush_pending_bgs;
9923 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9924 struct btrfs_root *root, u64 bytes_used,
9925 u64 type, u64 chunk_objectid, u64 chunk_offset,
9929 struct btrfs_root *extent_root;
9930 struct btrfs_block_group_cache *cache;
9932 extent_root = root->fs_info->extent_root;
9934 btrfs_set_log_full_commit(root->fs_info, trans);
9936 cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9940 btrfs_set_block_group_used(&cache->item, bytes_used);
9941 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9942 btrfs_set_block_group_flags(&cache->item, type);
9944 cache->flags = type;
9945 cache->last_byte_to_unpin = (u64)-1;
9946 cache->cached = BTRFS_CACHE_FINISHED;
9947 ret = exclude_super_stripes(root, cache);
9950 * We may have excluded something, so call this just in
9953 free_excluded_extents(root, cache);
9954 btrfs_put_block_group(cache);
9958 add_new_free_space(cache, root->fs_info, chunk_offset,
9959 chunk_offset + size);
9961 free_excluded_extents(root, cache);
9963 #ifdef CONFIG_BTRFS_DEBUG
9964 if (btrfs_should_fragment_free_space(root, cache)) {
9965 u64 new_bytes_used = size - bytes_used;
9967 bytes_used += new_bytes_used >> 1;
9968 fragment_free_space(root, cache);
9972 * Call to ensure the corresponding space_info object is created and
9973 * assigned to our block group, but don't update its counters just yet.
9974 * We want our bg to be added to the rbtree with its ->space_info set.
9976 ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9977 &cache->space_info);
9979 btrfs_remove_free_space_cache(cache);
9980 btrfs_put_block_group(cache);
9984 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9986 btrfs_remove_free_space_cache(cache);
9987 btrfs_put_block_group(cache);
9992 * Now that our block group has its ->space_info set and is inserted in
9993 * the rbtree, update the space info's counters.
9995 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9996 &cache->space_info);
9998 btrfs_remove_free_space_cache(cache);
9999 spin_lock(&root->fs_info->block_group_cache_lock);
10000 rb_erase(&cache->cache_node,
10001 &root->fs_info->block_group_cache_tree);
10002 RB_CLEAR_NODE(&cache->cache_node);
10003 spin_unlock(&root->fs_info->block_group_cache_lock);
10004 btrfs_put_block_group(cache);
10007 update_global_block_rsv(root->fs_info);
10009 spin_lock(&cache->space_info->lock);
10010 cache->space_info->bytes_readonly += cache->bytes_super;
10011 spin_unlock(&cache->space_info->lock);
10013 __link_block_group(cache->space_info, cache);
10015 list_add_tail(&cache->bg_list, &trans->new_bgs);
10017 set_avail_alloc_bits(extent_root->fs_info, type);
10022 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10024 u64 extra_flags = chunk_to_extended(flags) &
10025 BTRFS_EXTENDED_PROFILE_MASK;
10027 write_seqlock(&fs_info->profiles_lock);
10028 if (flags & BTRFS_BLOCK_GROUP_DATA)
10029 fs_info->avail_data_alloc_bits &= ~extra_flags;
10030 if (flags & BTRFS_BLOCK_GROUP_METADATA)
10031 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10032 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10033 fs_info->avail_system_alloc_bits &= ~extra_flags;
10034 write_sequnlock(&fs_info->profiles_lock);
10037 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10038 struct btrfs_root *root, u64 group_start,
10039 struct extent_map *em)
10041 struct btrfs_path *path;
10042 struct btrfs_block_group_cache *block_group;
10043 struct btrfs_free_cluster *cluster;
10044 struct btrfs_root *tree_root = root->fs_info->tree_root;
10045 struct btrfs_key key;
10046 struct inode *inode;
10047 struct kobject *kobj = NULL;
10051 struct btrfs_caching_control *caching_ctl = NULL;
10054 root = root->fs_info->extent_root;
10056 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
10057 BUG_ON(!block_group);
10058 BUG_ON(!block_group->ro);
10061 * Free the reserved super bytes from this block group before
10064 free_excluded_extents(root, block_group);
10066 memcpy(&key, &block_group->key, sizeof(key));
10067 index = get_block_group_index(block_group);
10068 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10069 BTRFS_BLOCK_GROUP_RAID1 |
10070 BTRFS_BLOCK_GROUP_RAID10))
10075 /* make sure this block group isn't part of an allocation cluster */
10076 cluster = &root->fs_info->data_alloc_cluster;
10077 spin_lock(&cluster->refill_lock);
10078 btrfs_return_cluster_to_free_space(block_group, cluster);
10079 spin_unlock(&cluster->refill_lock);
10082 * make sure this block group isn't part of a metadata
10083 * allocation cluster
10085 cluster = &root->fs_info->meta_alloc_cluster;
10086 spin_lock(&cluster->refill_lock);
10087 btrfs_return_cluster_to_free_space(block_group, cluster);
10088 spin_unlock(&cluster->refill_lock);
10090 path = btrfs_alloc_path();
10097 * get the inode first so any iput calls done for the io_list
10098 * aren't the final iput (no unlinks allowed now)
10100 inode = lookup_free_space_inode(tree_root, block_group, path);
10102 mutex_lock(&trans->transaction->cache_write_mutex);
10104 * make sure our free spache cache IO is done before remove the
10107 spin_lock(&trans->transaction->dirty_bgs_lock);
10108 if (!list_empty(&block_group->io_list)) {
10109 list_del_init(&block_group->io_list);
10111 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10113 spin_unlock(&trans->transaction->dirty_bgs_lock);
10114 btrfs_wait_cache_io(root, trans, block_group,
10115 &block_group->io_ctl, path,
10116 block_group->key.objectid);
10117 btrfs_put_block_group(block_group);
10118 spin_lock(&trans->transaction->dirty_bgs_lock);
10121 if (!list_empty(&block_group->dirty_list)) {
10122 list_del_init(&block_group->dirty_list);
10123 btrfs_put_block_group(block_group);
10125 spin_unlock(&trans->transaction->dirty_bgs_lock);
10126 mutex_unlock(&trans->transaction->cache_write_mutex);
10128 if (!IS_ERR(inode)) {
10129 ret = btrfs_orphan_add(trans, inode);
10131 btrfs_add_delayed_iput(inode);
10134 clear_nlink(inode);
10135 /* One for the block groups ref */
10136 spin_lock(&block_group->lock);
10137 if (block_group->iref) {
10138 block_group->iref = 0;
10139 block_group->inode = NULL;
10140 spin_unlock(&block_group->lock);
10143 spin_unlock(&block_group->lock);
10145 /* One for our lookup ref */
10146 btrfs_add_delayed_iput(inode);
10149 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10150 key.offset = block_group->key.objectid;
10153 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10157 btrfs_release_path(path);
10159 ret = btrfs_del_item(trans, tree_root, path);
10162 btrfs_release_path(path);
10165 spin_lock(&root->fs_info->block_group_cache_lock);
10166 rb_erase(&block_group->cache_node,
10167 &root->fs_info->block_group_cache_tree);
10168 RB_CLEAR_NODE(&block_group->cache_node);
10170 if (root->fs_info->first_logical_byte == block_group->key.objectid)
10171 root->fs_info->first_logical_byte = (u64)-1;
10172 spin_unlock(&root->fs_info->block_group_cache_lock);
10174 down_write(&block_group->space_info->groups_sem);
10176 * we must use list_del_init so people can check to see if they
10177 * are still on the list after taking the semaphore
10179 list_del_init(&block_group->list);
10180 if (list_empty(&block_group->space_info->block_groups[index])) {
10181 kobj = block_group->space_info->block_group_kobjs[index];
10182 block_group->space_info->block_group_kobjs[index] = NULL;
10183 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10185 up_write(&block_group->space_info->groups_sem);
10191 if (block_group->has_caching_ctl)
10192 caching_ctl = get_caching_control(block_group);
10193 if (block_group->cached == BTRFS_CACHE_STARTED)
10194 wait_block_group_cache_done(block_group);
10195 if (block_group->has_caching_ctl) {
10196 down_write(&root->fs_info->commit_root_sem);
10197 if (!caching_ctl) {
10198 struct btrfs_caching_control *ctl;
10200 list_for_each_entry(ctl,
10201 &root->fs_info->caching_block_groups, list)
10202 if (ctl->block_group == block_group) {
10204 atomic_inc(&caching_ctl->count);
10209 list_del_init(&caching_ctl->list);
10210 up_write(&root->fs_info->commit_root_sem);
10212 /* Once for the caching bgs list and once for us. */
10213 put_caching_control(caching_ctl);
10214 put_caching_control(caching_ctl);
10218 spin_lock(&trans->transaction->dirty_bgs_lock);
10219 if (!list_empty(&block_group->dirty_list)) {
10222 if (!list_empty(&block_group->io_list)) {
10225 spin_unlock(&trans->transaction->dirty_bgs_lock);
10226 btrfs_remove_free_space_cache(block_group);
10228 spin_lock(&block_group->space_info->lock);
10229 list_del_init(&block_group->ro_list);
10231 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10232 WARN_ON(block_group->space_info->total_bytes
10233 < block_group->key.offset);
10234 WARN_ON(block_group->space_info->bytes_readonly
10235 < block_group->key.offset);
10236 WARN_ON(block_group->space_info->disk_total
10237 < block_group->key.offset * factor);
10239 block_group->space_info->total_bytes -= block_group->key.offset;
10240 block_group->space_info->bytes_readonly -= block_group->key.offset;
10241 block_group->space_info->disk_total -= block_group->key.offset * factor;
10243 spin_unlock(&block_group->space_info->lock);
10245 memcpy(&key, &block_group->key, sizeof(key));
10248 if (!list_empty(&em->list)) {
10249 /* We're in the transaction->pending_chunks list. */
10250 free_extent_map(em);
10252 spin_lock(&block_group->lock);
10253 block_group->removed = 1;
10255 * At this point trimming can't start on this block group, because we
10256 * removed the block group from the tree fs_info->block_group_cache_tree
10257 * so no one can't find it anymore and even if someone already got this
10258 * block group before we removed it from the rbtree, they have already
10259 * incremented block_group->trimming - if they didn't, they won't find
10260 * any free space entries because we already removed them all when we
10261 * called btrfs_remove_free_space_cache().
10263 * And we must not remove the extent map from the fs_info->mapping_tree
10264 * to prevent the same logical address range and physical device space
10265 * ranges from being reused for a new block group. This is because our
10266 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10267 * completely transactionless, so while it is trimming a range the
10268 * currently running transaction might finish and a new one start,
10269 * allowing for new block groups to be created that can reuse the same
10270 * physical device locations unless we take this special care.
10272 * There may also be an implicit trim operation if the file system
10273 * is mounted with -odiscard. The same protections must remain
10274 * in place until the extents have been discarded completely when
10275 * the transaction commit has completed.
10277 remove_em = (atomic_read(&block_group->trimming) == 0);
10279 * Make sure a trimmer task always sees the em in the pinned_chunks list
10280 * if it sees block_group->removed == 1 (needs to lock block_group->lock
10281 * before checking block_group->removed).
10285 * Our em might be in trans->transaction->pending_chunks which
10286 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10287 * and so is the fs_info->pinned_chunks list.
10289 * So at this point we must be holding the chunk_mutex to avoid
10290 * any races with chunk allocation (more specifically at
10291 * volumes.c:contains_pending_extent()), to ensure it always
10292 * sees the em, either in the pending_chunks list or in the
10293 * pinned_chunks list.
10295 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10297 spin_unlock(&block_group->lock);
10300 struct extent_map_tree *em_tree;
10302 em_tree = &root->fs_info->mapping_tree.map_tree;
10303 write_lock(&em_tree->lock);
10305 * The em might be in the pending_chunks list, so make sure the
10306 * chunk mutex is locked, since remove_extent_mapping() will
10307 * delete us from that list.
10309 remove_extent_mapping(em_tree, em);
10310 write_unlock(&em_tree->lock);
10311 /* once for the tree */
10312 free_extent_map(em);
10315 unlock_chunks(root);
10317 btrfs_put_block_group(block_group);
10318 btrfs_put_block_group(block_group);
10320 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10326 ret = btrfs_del_item(trans, root, path);
10328 btrfs_free_path(path);
10332 struct btrfs_trans_handle *
10333 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10334 const u64 chunk_offset)
10336 struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10337 struct extent_map *em;
10338 struct map_lookup *map;
10339 unsigned int num_items;
10341 read_lock(&em_tree->lock);
10342 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10343 read_unlock(&em_tree->lock);
10344 ASSERT(em && em->start == chunk_offset);
10347 * We need to reserve 3 + N units from the metadata space info in order
10348 * to remove a block group (done at btrfs_remove_chunk() and at
10349 * btrfs_remove_block_group()), which are used for:
10351 * 1 unit for adding the free space inode's orphan (located in the tree
10353 * 1 unit for deleting the block group item (located in the extent
10355 * 1 unit for deleting the free space item (located in tree of tree
10357 * N units for deleting N device extent items corresponding to each
10358 * stripe (located in the device tree).
10360 * In order to remove a block group we also need to reserve units in the
10361 * system space info in order to update the chunk tree (update one or
10362 * more device items and remove one chunk item), but this is done at
10363 * btrfs_remove_chunk() through a call to check_system_chunk().
10365 map = (struct map_lookup *)em->bdev;
10366 num_items = 3 + map->num_stripes;
10367 free_extent_map(em);
10369 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10374 * Process the unused_bgs list and remove any that don't have any allocated
10375 * space inside of them.
10377 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10379 struct btrfs_block_group_cache *block_group;
10380 struct btrfs_space_info *space_info;
10381 struct btrfs_root *root = fs_info->extent_root;
10382 struct btrfs_trans_handle *trans;
10385 if (!fs_info->open)
10388 spin_lock(&fs_info->unused_bgs_lock);
10389 while (!list_empty(&fs_info->unused_bgs)) {
10393 block_group = list_first_entry(&fs_info->unused_bgs,
10394 struct btrfs_block_group_cache,
10396 list_del_init(&block_group->bg_list);
10398 space_info = block_group->space_info;
10400 if (ret || btrfs_mixed_space_info(space_info)) {
10401 btrfs_put_block_group(block_group);
10404 spin_unlock(&fs_info->unused_bgs_lock);
10406 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10408 /* Don't want to race with allocators so take the groups_sem */
10409 down_write(&space_info->groups_sem);
10410 spin_lock(&block_group->lock);
10411 if (block_group->reserved ||
10412 btrfs_block_group_used(&block_group->item) ||
10414 list_is_singular(&block_group->list)) {
10416 * We want to bail if we made new allocations or have
10417 * outstanding allocations in this block group. We do
10418 * the ro check in case balance is currently acting on
10419 * this block group.
10421 spin_unlock(&block_group->lock);
10422 up_write(&space_info->groups_sem);
10425 spin_unlock(&block_group->lock);
10427 /* We don't want to force the issue, only flip if it's ok. */
10428 ret = inc_block_group_ro(block_group, 0);
10429 up_write(&space_info->groups_sem);
10436 * Want to do this before we do anything else so we can recover
10437 * properly if we fail to join the transaction.
10439 trans = btrfs_start_trans_remove_block_group(fs_info,
10440 block_group->key.objectid);
10441 if (IS_ERR(trans)) {
10442 btrfs_dec_block_group_ro(root, block_group);
10443 ret = PTR_ERR(trans);
10448 * We could have pending pinned extents for this block group,
10449 * just delete them, we don't care about them anymore.
10451 start = block_group->key.objectid;
10452 end = start + block_group->key.offset - 1;
10454 * Hold the unused_bg_unpin_mutex lock to avoid racing with
10455 * btrfs_finish_extent_commit(). If we are at transaction N,
10456 * another task might be running finish_extent_commit() for the
10457 * previous transaction N - 1, and have seen a range belonging
10458 * to the block group in freed_extents[] before we were able to
10459 * clear the whole block group range from freed_extents[]. This
10460 * means that task can lookup for the block group after we
10461 * unpinned it from freed_extents[] and removed it, leading to
10462 * a BUG_ON() at btrfs_unpin_extent_range().
10464 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10465 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10466 EXTENT_DIRTY, GFP_NOFS);
10468 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10469 btrfs_dec_block_group_ro(root, block_group);
10472 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10473 EXTENT_DIRTY, GFP_NOFS);
10475 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10476 btrfs_dec_block_group_ro(root, block_group);
10479 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10481 /* Reset pinned so btrfs_put_block_group doesn't complain */
10482 spin_lock(&space_info->lock);
10483 spin_lock(&block_group->lock);
10485 space_info->bytes_pinned -= block_group->pinned;
10486 space_info->bytes_readonly += block_group->pinned;
10487 percpu_counter_add(&space_info->total_bytes_pinned,
10488 -block_group->pinned);
10489 block_group->pinned = 0;
10491 spin_unlock(&block_group->lock);
10492 spin_unlock(&space_info->lock);
10494 /* DISCARD can flip during remount */
10495 trimming = btrfs_test_opt(root, DISCARD);
10497 /* Implicit trim during transaction commit. */
10499 btrfs_get_block_group_trimming(block_group);
10502 * Btrfs_remove_chunk will abort the transaction if things go
10505 ret = btrfs_remove_chunk(trans, root,
10506 block_group->key.objectid);
10510 btrfs_put_block_group_trimming(block_group);
10515 * If we're not mounted with -odiscard, we can just forget
10516 * about this block group. Otherwise we'll need to wait
10517 * until transaction commit to do the actual discard.
10520 spin_lock(&fs_info->unused_bgs_lock);
10522 * A concurrent scrub might have added us to the list
10523 * fs_info->unused_bgs, so use a list_move operation
10524 * to add the block group to the deleted_bgs list.
10526 list_move(&block_group->bg_list,
10527 &trans->transaction->deleted_bgs);
10528 spin_unlock(&fs_info->unused_bgs_lock);
10529 btrfs_get_block_group(block_group);
10532 btrfs_end_transaction(trans, root);
10534 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10535 btrfs_put_block_group(block_group);
10536 spin_lock(&fs_info->unused_bgs_lock);
10538 spin_unlock(&fs_info->unused_bgs_lock);
10541 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10543 struct btrfs_space_info *space_info;
10544 struct btrfs_super_block *disk_super;
10550 disk_super = fs_info->super_copy;
10551 if (!btrfs_super_root(disk_super))
10554 features = btrfs_super_incompat_flags(disk_super);
10555 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10558 flags = BTRFS_BLOCK_GROUP_SYSTEM;
10559 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10564 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10565 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10567 flags = BTRFS_BLOCK_GROUP_METADATA;
10568 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10572 flags = BTRFS_BLOCK_GROUP_DATA;
10573 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10579 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10581 return unpin_extent_range(root, start, end, false);
10585 * It used to be that old block groups would be left around forever.
10586 * Iterating over them would be enough to trim unused space. Since we
10587 * now automatically remove them, we also need to iterate over unallocated
10590 * We don't want a transaction for this since the discard may take a
10591 * substantial amount of time. We don't require that a transaction be
10592 * running, but we do need to take a running transaction into account
10593 * to ensure that we're not discarding chunks that were released in
10594 * the current transaction.
10596 * Holding the chunks lock will prevent other threads from allocating
10597 * or releasing chunks, but it won't prevent a running transaction
10598 * from committing and releasing the memory that the pending chunks
10599 * list head uses. For that, we need to take a reference to the
10602 static int btrfs_trim_free_extents(struct btrfs_device *device,
10603 u64 minlen, u64 *trimmed)
10605 u64 start = 0, len = 0;
10610 /* Not writeable = nothing to do. */
10611 if (!device->writeable)
10614 /* No free space = nothing to do. */
10615 if (device->total_bytes <= device->bytes_used)
10621 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10622 struct btrfs_transaction *trans;
10625 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10629 down_read(&fs_info->commit_root_sem);
10631 spin_lock(&fs_info->trans_lock);
10632 trans = fs_info->running_transaction;
10634 atomic_inc(&trans->use_count);
10635 spin_unlock(&fs_info->trans_lock);
10637 ret = find_free_dev_extent_start(trans, device, minlen, start,
10640 btrfs_put_transaction(trans);
10643 up_read(&fs_info->commit_root_sem);
10644 mutex_unlock(&fs_info->chunk_mutex);
10645 if (ret == -ENOSPC)
10650 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10651 up_read(&fs_info->commit_root_sem);
10652 mutex_unlock(&fs_info->chunk_mutex);
10660 if (fatal_signal_pending(current)) {
10661 ret = -ERESTARTSYS;
10671 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10673 struct btrfs_fs_info *fs_info = root->fs_info;
10674 struct btrfs_block_group_cache *cache = NULL;
10675 struct btrfs_device *device;
10676 struct list_head *devices;
10681 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10685 * try to trim all FS space, our block group may start from non-zero.
10687 if (range->len == total_bytes)
10688 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10690 cache = btrfs_lookup_block_group(fs_info, range->start);
10693 if (cache->key.objectid >= (range->start + range->len)) {
10694 btrfs_put_block_group(cache);
10698 start = max(range->start, cache->key.objectid);
10699 end = min(range->start + range->len,
10700 cache->key.objectid + cache->key.offset);
10702 if (end - start >= range->minlen) {
10703 if (!block_group_cache_done(cache)) {
10704 ret = cache_block_group(cache, 0);
10706 btrfs_put_block_group(cache);
10709 ret = wait_block_group_cache_done(cache);
10711 btrfs_put_block_group(cache);
10715 ret = btrfs_trim_block_group(cache,
10721 trimmed += group_trimmed;
10723 btrfs_put_block_group(cache);
10728 cache = next_block_group(fs_info->tree_root, cache);
10731 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10732 devices = &root->fs_info->fs_devices->alloc_list;
10733 list_for_each_entry(device, devices, dev_alloc_list) {
10734 ret = btrfs_trim_free_extents(device, range->minlen,
10739 trimmed += group_trimmed;
10741 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10743 range->len = trimmed;
10748 * btrfs_{start,end}_write_no_snapshoting() are similar to
10749 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10750 * data into the page cache through nocow before the subvolume is snapshoted,
10751 * but flush the data into disk after the snapshot creation, or to prevent
10752 * operations while snapshoting is ongoing and that cause the snapshot to be
10753 * inconsistent (writes followed by expanding truncates for example).
10755 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10757 percpu_counter_dec(&root->subv_writers->counter);
10759 * Make sure counter is updated before we wake up waiters.
10762 if (waitqueue_active(&root->subv_writers->wait))
10763 wake_up(&root->subv_writers->wait);
10766 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10768 if (atomic_read(&root->will_be_snapshoted))
10771 percpu_counter_inc(&root->subv_writers->counter);
10773 * Make sure counter is updated before we check for snapshot creation.
10776 if (atomic_read(&root->will_be_snapshoted)) {
10777 btrfs_end_write_no_snapshoting(root);