Kernel bump from 4.1.3-rt to 4.1.7-rt.
[kvmfornfv.git] / kernel / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34 #include "qgroup.h"
35
36 #define BTRFS_ROOT_TRANS_TAG 0
37
38 static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
39         [TRANS_STATE_RUNNING]           = 0U,
40         [TRANS_STATE_BLOCKED]           = (__TRANS_USERSPACE |
41                                            __TRANS_START),
42         [TRANS_STATE_COMMIT_START]      = (__TRANS_USERSPACE |
43                                            __TRANS_START |
44                                            __TRANS_ATTACH),
45         [TRANS_STATE_COMMIT_DOING]      = (__TRANS_USERSPACE |
46                                            __TRANS_START |
47                                            __TRANS_ATTACH |
48                                            __TRANS_JOIN),
49         [TRANS_STATE_UNBLOCKED]         = (__TRANS_USERSPACE |
50                                            __TRANS_START |
51                                            __TRANS_ATTACH |
52                                            __TRANS_JOIN |
53                                            __TRANS_JOIN_NOLOCK),
54         [TRANS_STATE_COMPLETED]         = (__TRANS_USERSPACE |
55                                            __TRANS_START |
56                                            __TRANS_ATTACH |
57                                            __TRANS_JOIN |
58                                            __TRANS_JOIN_NOLOCK),
59 };
60
61 void btrfs_put_transaction(struct btrfs_transaction *transaction)
62 {
63         WARN_ON(atomic_read(&transaction->use_count) == 0);
64         if (atomic_dec_and_test(&transaction->use_count)) {
65                 BUG_ON(!list_empty(&transaction->list));
66                 WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
67                 if (transaction->delayed_refs.pending_csums)
68                         printk(KERN_ERR "pending csums is %llu\n",
69                                transaction->delayed_refs.pending_csums);
70                 while (!list_empty(&transaction->pending_chunks)) {
71                         struct extent_map *em;
72
73                         em = list_first_entry(&transaction->pending_chunks,
74                                               struct extent_map, list);
75                         list_del_init(&em->list);
76                         free_extent_map(em);
77                 }
78                 kmem_cache_free(btrfs_transaction_cachep, transaction);
79         }
80 }
81
82 static void clear_btree_io_tree(struct extent_io_tree *tree)
83 {
84         spin_lock(&tree->lock);
85         while (!RB_EMPTY_ROOT(&tree->state)) {
86                 struct rb_node *node;
87                 struct extent_state *state;
88
89                 node = rb_first(&tree->state);
90                 state = rb_entry(node, struct extent_state, rb_node);
91                 rb_erase(&state->rb_node, &tree->state);
92                 RB_CLEAR_NODE(&state->rb_node);
93                 /*
94                  * btree io trees aren't supposed to have tasks waiting for
95                  * changes in the flags of extent states ever.
96                  */
97                 ASSERT(!waitqueue_active(&state->wq));
98                 free_extent_state(state);
99
100                 cond_resched_lock(&tree->lock);
101         }
102         spin_unlock(&tree->lock);
103 }
104
105 static noinline void switch_commit_roots(struct btrfs_transaction *trans,
106                                          struct btrfs_fs_info *fs_info)
107 {
108         struct btrfs_root *root, *tmp;
109
110         down_write(&fs_info->commit_root_sem);
111         list_for_each_entry_safe(root, tmp, &trans->switch_commits,
112                                  dirty_list) {
113                 list_del_init(&root->dirty_list);
114                 free_extent_buffer(root->commit_root);
115                 root->commit_root = btrfs_root_node(root);
116                 if (is_fstree(root->objectid))
117                         btrfs_unpin_free_ino(root);
118                 clear_btree_io_tree(&root->dirty_log_pages);
119         }
120         up_write(&fs_info->commit_root_sem);
121 }
122
123 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
124                                          unsigned int type)
125 {
126         if (type & TRANS_EXTWRITERS)
127                 atomic_inc(&trans->num_extwriters);
128 }
129
130 static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
131                                          unsigned int type)
132 {
133         if (type & TRANS_EXTWRITERS)
134                 atomic_dec(&trans->num_extwriters);
135 }
136
137 static inline void extwriter_counter_init(struct btrfs_transaction *trans,
138                                           unsigned int type)
139 {
140         atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
141 }
142
143 static inline int extwriter_counter_read(struct btrfs_transaction *trans)
144 {
145         return atomic_read(&trans->num_extwriters);
146 }
147
148 /*
149  * either allocate a new transaction or hop into the existing one
150  */
151 static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
152 {
153         struct btrfs_transaction *cur_trans;
154         struct btrfs_fs_info *fs_info = root->fs_info;
155
156         spin_lock(&fs_info->trans_lock);
157 loop:
158         /* The file system has been taken offline. No new transactions. */
159         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
160                 spin_unlock(&fs_info->trans_lock);
161                 return -EROFS;
162         }
163
164         cur_trans = fs_info->running_transaction;
165         if (cur_trans) {
166                 if (cur_trans->aborted) {
167                         spin_unlock(&fs_info->trans_lock);
168                         return cur_trans->aborted;
169                 }
170                 if (btrfs_blocked_trans_types[cur_trans->state] & type) {
171                         spin_unlock(&fs_info->trans_lock);
172                         return -EBUSY;
173                 }
174                 atomic_inc(&cur_trans->use_count);
175                 atomic_inc(&cur_trans->num_writers);
176                 extwriter_counter_inc(cur_trans, type);
177                 spin_unlock(&fs_info->trans_lock);
178                 return 0;
179         }
180         spin_unlock(&fs_info->trans_lock);
181
182         /*
183          * If we are ATTACH, we just want to catch the current transaction,
184          * and commit it. If there is no transaction, just return ENOENT.
185          */
186         if (type == TRANS_ATTACH)
187                 return -ENOENT;
188
189         /*
190          * JOIN_NOLOCK only happens during the transaction commit, so
191          * it is impossible that ->running_transaction is NULL
192          */
193         BUG_ON(type == TRANS_JOIN_NOLOCK);
194
195         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
196         if (!cur_trans)
197                 return -ENOMEM;
198
199         spin_lock(&fs_info->trans_lock);
200         if (fs_info->running_transaction) {
201                 /*
202                  * someone started a transaction after we unlocked.  Make sure
203                  * to redo the checks above
204                  */
205                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
206                 goto loop;
207         } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
208                 spin_unlock(&fs_info->trans_lock);
209                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
210                 return -EROFS;
211         }
212
213         atomic_set(&cur_trans->num_writers, 1);
214         extwriter_counter_init(cur_trans, type);
215         init_waitqueue_head(&cur_trans->writer_wait);
216         init_waitqueue_head(&cur_trans->commit_wait);
217         cur_trans->state = TRANS_STATE_RUNNING;
218         /*
219          * One for this trans handle, one so it will live on until we
220          * commit the transaction.
221          */
222         atomic_set(&cur_trans->use_count, 2);
223         cur_trans->have_free_bgs = 0;
224         cur_trans->start_time = get_seconds();
225         cur_trans->dirty_bg_run = 0;
226
227         cur_trans->delayed_refs.href_root = RB_ROOT;
228         atomic_set(&cur_trans->delayed_refs.num_entries, 0);
229         cur_trans->delayed_refs.num_heads_ready = 0;
230         cur_trans->delayed_refs.pending_csums = 0;
231         cur_trans->delayed_refs.num_heads = 0;
232         cur_trans->delayed_refs.flushing = 0;
233         cur_trans->delayed_refs.run_delayed_start = 0;
234
235         /*
236          * although the tree mod log is per file system and not per transaction,
237          * the log must never go across transaction boundaries.
238          */
239         smp_mb();
240         if (!list_empty(&fs_info->tree_mod_seq_list))
241                 WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
242                         "creating a fresh transaction\n");
243         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
244                 WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
245                         "creating a fresh transaction\n");
246         atomic64_set(&fs_info->tree_mod_seq, 0);
247
248         spin_lock_init(&cur_trans->delayed_refs.lock);
249
250         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
251         INIT_LIST_HEAD(&cur_trans->pending_chunks);
252         INIT_LIST_HEAD(&cur_trans->switch_commits);
253         INIT_LIST_HEAD(&cur_trans->pending_ordered);
254         INIT_LIST_HEAD(&cur_trans->dirty_bgs);
255         INIT_LIST_HEAD(&cur_trans->io_bgs);
256         mutex_init(&cur_trans->cache_write_mutex);
257         cur_trans->num_dirty_bgs = 0;
258         spin_lock_init(&cur_trans->dirty_bgs_lock);
259         list_add_tail(&cur_trans->list, &fs_info->trans_list);
260         extent_io_tree_init(&cur_trans->dirty_pages,
261                              fs_info->btree_inode->i_mapping);
262         fs_info->generation++;
263         cur_trans->transid = fs_info->generation;
264         fs_info->running_transaction = cur_trans;
265         cur_trans->aborted = 0;
266         spin_unlock(&fs_info->trans_lock);
267
268         return 0;
269 }
270
271 /*
272  * this does all the record keeping required to make sure that a reference
273  * counted root is properly recorded in a given transaction.  This is required
274  * to make sure the old root from before we joined the transaction is deleted
275  * when the transaction commits
276  */
277 static int record_root_in_trans(struct btrfs_trans_handle *trans,
278                                struct btrfs_root *root)
279 {
280         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
281             root->last_trans < trans->transid) {
282                 WARN_ON(root == root->fs_info->extent_root);
283                 WARN_ON(root->commit_root != root->node);
284
285                 /*
286                  * see below for IN_TRANS_SETUP usage rules
287                  * we have the reloc mutex held now, so there
288                  * is only one writer in this function
289                  */
290                 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
291
292                 /* make sure readers find IN_TRANS_SETUP before
293                  * they find our root->last_trans update
294                  */
295                 smp_wmb();
296
297                 spin_lock(&root->fs_info->fs_roots_radix_lock);
298                 if (root->last_trans == trans->transid) {
299                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
300                         return 0;
301                 }
302                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
303                            (unsigned long)root->root_key.objectid,
304                            BTRFS_ROOT_TRANS_TAG);
305                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
306                 root->last_trans = trans->transid;
307
308                 /* this is pretty tricky.  We don't want to
309                  * take the relocation lock in btrfs_record_root_in_trans
310                  * unless we're really doing the first setup for this root in
311                  * this transaction.
312                  *
313                  * Normally we'd use root->last_trans as a flag to decide
314                  * if we want to take the expensive mutex.
315                  *
316                  * But, we have to set root->last_trans before we
317                  * init the relocation root, otherwise, we trip over warnings
318                  * in ctree.c.  The solution used here is to flag ourselves
319                  * with root IN_TRANS_SETUP.  When this is 1, we're still
320                  * fixing up the reloc trees and everyone must wait.
321                  *
322                  * When this is zero, they can trust root->last_trans and fly
323                  * through btrfs_record_root_in_trans without having to take the
324                  * lock.  smp_wmb() makes sure that all the writes above are
325                  * done before we pop in the zero below
326                  */
327                 btrfs_init_reloc_root(trans, root);
328                 smp_mb__before_atomic();
329                 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
330         }
331         return 0;
332 }
333
334
335 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
336                                struct btrfs_root *root)
337 {
338         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
339                 return 0;
340
341         /*
342          * see record_root_in_trans for comments about IN_TRANS_SETUP usage
343          * and barriers
344          */
345         smp_rmb();
346         if (root->last_trans == trans->transid &&
347             !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
348                 return 0;
349
350         mutex_lock(&root->fs_info->reloc_mutex);
351         record_root_in_trans(trans, root);
352         mutex_unlock(&root->fs_info->reloc_mutex);
353
354         return 0;
355 }
356
357 static inline int is_transaction_blocked(struct btrfs_transaction *trans)
358 {
359         return (trans->state >= TRANS_STATE_BLOCKED &&
360                 trans->state < TRANS_STATE_UNBLOCKED &&
361                 !trans->aborted);
362 }
363
364 /* wait for commit against the current transaction to become unblocked
365  * when this is done, it is safe to start a new transaction, but the current
366  * transaction might not be fully on disk.
367  */
368 static void wait_current_trans(struct btrfs_root *root)
369 {
370         struct btrfs_transaction *cur_trans;
371
372         spin_lock(&root->fs_info->trans_lock);
373         cur_trans = root->fs_info->running_transaction;
374         if (cur_trans && is_transaction_blocked(cur_trans)) {
375                 atomic_inc(&cur_trans->use_count);
376                 spin_unlock(&root->fs_info->trans_lock);
377
378                 wait_event(root->fs_info->transaction_wait,
379                            cur_trans->state >= TRANS_STATE_UNBLOCKED ||
380                            cur_trans->aborted);
381                 btrfs_put_transaction(cur_trans);
382         } else {
383                 spin_unlock(&root->fs_info->trans_lock);
384         }
385 }
386
387 static int may_wait_transaction(struct btrfs_root *root, int type)
388 {
389         if (root->fs_info->log_root_recovering)
390                 return 0;
391
392         if (type == TRANS_USERSPACE)
393                 return 1;
394
395         if (type == TRANS_START &&
396             !atomic_read(&root->fs_info->open_ioctl_trans))
397                 return 1;
398
399         return 0;
400 }
401
402 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
403 {
404         if (!root->fs_info->reloc_ctl ||
405             !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
406             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
407             root->reloc_root)
408                 return false;
409
410         return true;
411 }
412
413 static struct btrfs_trans_handle *
414 start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
415                   enum btrfs_reserve_flush_enum flush)
416 {
417         struct btrfs_trans_handle *h;
418         struct btrfs_transaction *cur_trans;
419         u64 num_bytes = 0;
420         u64 qgroup_reserved = 0;
421         bool reloc_reserved = false;
422         int ret;
423
424         /* Send isn't supposed to start transactions. */
425         ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
426
427         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
428                 return ERR_PTR(-EROFS);
429
430         if (current->journal_info) {
431                 WARN_ON(type & TRANS_EXTWRITERS);
432                 h = current->journal_info;
433                 h->use_count++;
434                 WARN_ON(h->use_count > 2);
435                 h->orig_rsv = h->block_rsv;
436                 h->block_rsv = NULL;
437                 goto got_it;
438         }
439
440         /*
441          * Do the reservation before we join the transaction so we can do all
442          * the appropriate flushing if need be.
443          */
444         if (num_items > 0 && root != root->fs_info->chunk_root) {
445                 if (root->fs_info->quota_enabled &&
446                     is_fstree(root->root_key.objectid)) {
447                         qgroup_reserved = num_items * root->nodesize;
448                         ret = btrfs_qgroup_reserve(root, qgroup_reserved);
449                         if (ret)
450                                 return ERR_PTR(ret);
451                 }
452
453                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
454                 /*
455                  * Do the reservation for the relocation root creation
456                  */
457                 if (need_reserve_reloc_root(root)) {
458                         num_bytes += root->nodesize;
459                         reloc_reserved = true;
460                 }
461
462                 ret = btrfs_block_rsv_add(root,
463                                           &root->fs_info->trans_block_rsv,
464                                           num_bytes, flush);
465                 if (ret)
466                         goto reserve_fail;
467         }
468 again:
469         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
470         if (!h) {
471                 ret = -ENOMEM;
472                 goto alloc_fail;
473         }
474
475         /*
476          * If we are JOIN_NOLOCK we're already committing a transaction and
477          * waiting on this guy, so we don't need to do the sb_start_intwrite
478          * because we're already holding a ref.  We need this because we could
479          * have raced in and did an fsync() on a file which can kick a commit
480          * and then we deadlock with somebody doing a freeze.
481          *
482          * If we are ATTACH, it means we just want to catch the current
483          * transaction and commit it, so we needn't do sb_start_intwrite(). 
484          */
485         if (type & __TRANS_FREEZABLE)
486                 sb_start_intwrite(root->fs_info->sb);
487
488         if (may_wait_transaction(root, type))
489                 wait_current_trans(root);
490
491         do {
492                 ret = join_transaction(root, type);
493                 if (ret == -EBUSY) {
494                         wait_current_trans(root);
495                         if (unlikely(type == TRANS_ATTACH))
496                                 ret = -ENOENT;
497                 }
498         } while (ret == -EBUSY);
499
500         if (ret < 0) {
501                 /* We must get the transaction if we are JOIN_NOLOCK. */
502                 BUG_ON(type == TRANS_JOIN_NOLOCK);
503                 goto join_fail;
504         }
505
506         cur_trans = root->fs_info->running_transaction;
507
508         h->transid = cur_trans->transid;
509         h->transaction = cur_trans;
510         h->blocks_used = 0;
511         h->bytes_reserved = 0;
512         h->root = root;
513         h->delayed_ref_updates = 0;
514         h->use_count = 1;
515         h->adding_csums = 0;
516         h->block_rsv = NULL;
517         h->orig_rsv = NULL;
518         h->aborted = 0;
519         h->qgroup_reserved = 0;
520         h->delayed_ref_elem.seq = 0;
521         h->type = type;
522         h->allocating_chunk = false;
523         h->reloc_reserved = false;
524         h->sync = false;
525         INIT_LIST_HEAD(&h->qgroup_ref_list);
526         INIT_LIST_HEAD(&h->new_bgs);
527         INIT_LIST_HEAD(&h->ordered);
528
529         smp_mb();
530         if (cur_trans->state >= TRANS_STATE_BLOCKED &&
531             may_wait_transaction(root, type)) {
532                 current->journal_info = h;
533                 btrfs_commit_transaction(h, root);
534                 goto again;
535         }
536
537         if (num_bytes) {
538                 trace_btrfs_space_reservation(root->fs_info, "transaction",
539                                               h->transid, num_bytes, 1);
540                 h->block_rsv = &root->fs_info->trans_block_rsv;
541                 h->bytes_reserved = num_bytes;
542                 h->reloc_reserved = reloc_reserved;
543         }
544         h->qgroup_reserved = qgroup_reserved;
545
546 got_it:
547         btrfs_record_root_in_trans(h, root);
548
549         if (!current->journal_info && type != TRANS_USERSPACE)
550                 current->journal_info = h;
551         return h;
552
553 join_fail:
554         if (type & __TRANS_FREEZABLE)
555                 sb_end_intwrite(root->fs_info->sb);
556         kmem_cache_free(btrfs_trans_handle_cachep, h);
557 alloc_fail:
558         if (num_bytes)
559                 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
560                                         num_bytes);
561 reserve_fail:
562         if (qgroup_reserved)
563                 btrfs_qgroup_free(root, qgroup_reserved);
564         return ERR_PTR(ret);
565 }
566
567 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
568                                                    int num_items)
569 {
570         return start_transaction(root, num_items, TRANS_START,
571                                  BTRFS_RESERVE_FLUSH_ALL);
572 }
573
574 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
575                                         struct btrfs_root *root, int num_items)
576 {
577         return start_transaction(root, num_items, TRANS_START,
578                                  BTRFS_RESERVE_FLUSH_LIMIT);
579 }
580
581 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
582 {
583         return start_transaction(root, 0, TRANS_JOIN, 0);
584 }
585
586 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
587 {
588         return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
589 }
590
591 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
592 {
593         return start_transaction(root, 0, TRANS_USERSPACE, 0);
594 }
595
596 /*
597  * btrfs_attach_transaction() - catch the running transaction
598  *
599  * It is used when we want to commit the current the transaction, but
600  * don't want to start a new one.
601  *
602  * Note: If this function return -ENOENT, it just means there is no
603  * running transaction. But it is possible that the inactive transaction
604  * is still in the memory, not fully on disk. If you hope there is no
605  * inactive transaction in the fs when -ENOENT is returned, you should
606  * invoke
607  *     btrfs_attach_transaction_barrier()
608  */
609 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
610 {
611         return start_transaction(root, 0, TRANS_ATTACH, 0);
612 }
613
614 /*
615  * btrfs_attach_transaction_barrier() - catch the running transaction
616  *
617  * It is similar to the above function, the differentia is this one
618  * will wait for all the inactive transactions until they fully
619  * complete.
620  */
621 struct btrfs_trans_handle *
622 btrfs_attach_transaction_barrier(struct btrfs_root *root)
623 {
624         struct btrfs_trans_handle *trans;
625
626         trans = start_transaction(root, 0, TRANS_ATTACH, 0);
627         if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
628                 btrfs_wait_for_commit(root, 0);
629
630         return trans;
631 }
632
633 /* wait for a transaction commit to be fully complete */
634 static noinline void wait_for_commit(struct btrfs_root *root,
635                                     struct btrfs_transaction *commit)
636 {
637         wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
638 }
639
640 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
641 {
642         struct btrfs_transaction *cur_trans = NULL, *t;
643         int ret = 0;
644
645         if (transid) {
646                 if (transid <= root->fs_info->last_trans_committed)
647                         goto out;
648
649                 /* find specified transaction */
650                 spin_lock(&root->fs_info->trans_lock);
651                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
652                         if (t->transid == transid) {
653                                 cur_trans = t;
654                                 atomic_inc(&cur_trans->use_count);
655                                 ret = 0;
656                                 break;
657                         }
658                         if (t->transid > transid) {
659                                 ret = 0;
660                                 break;
661                         }
662                 }
663                 spin_unlock(&root->fs_info->trans_lock);
664
665                 /*
666                  * The specified transaction doesn't exist, or we
667                  * raced with btrfs_commit_transaction
668                  */
669                 if (!cur_trans) {
670                         if (transid > root->fs_info->last_trans_committed)
671                                 ret = -EINVAL;
672                         goto out;
673                 }
674         } else {
675                 /* find newest transaction that is committing | committed */
676                 spin_lock(&root->fs_info->trans_lock);
677                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
678                                             list) {
679                         if (t->state >= TRANS_STATE_COMMIT_START) {
680                                 if (t->state == TRANS_STATE_COMPLETED)
681                                         break;
682                                 cur_trans = t;
683                                 atomic_inc(&cur_trans->use_count);
684                                 break;
685                         }
686                 }
687                 spin_unlock(&root->fs_info->trans_lock);
688                 if (!cur_trans)
689                         goto out;  /* nothing committing|committed */
690         }
691
692         wait_for_commit(root, cur_trans);
693         btrfs_put_transaction(cur_trans);
694 out:
695         return ret;
696 }
697
698 void btrfs_throttle(struct btrfs_root *root)
699 {
700         if (!atomic_read(&root->fs_info->open_ioctl_trans))
701                 wait_current_trans(root);
702 }
703
704 static int should_end_transaction(struct btrfs_trans_handle *trans,
705                                   struct btrfs_root *root)
706 {
707         if (root->fs_info->global_block_rsv.space_info->full &&
708             btrfs_check_space_for_delayed_refs(trans, root))
709                 return 1;
710
711         return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
712 }
713
714 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
715                                  struct btrfs_root *root)
716 {
717         struct btrfs_transaction *cur_trans = trans->transaction;
718         int updates;
719         int err;
720
721         smp_mb();
722         if (cur_trans->state >= TRANS_STATE_BLOCKED ||
723             cur_trans->delayed_refs.flushing)
724                 return 1;
725
726         updates = trans->delayed_ref_updates;
727         trans->delayed_ref_updates = 0;
728         if (updates) {
729                 err = btrfs_run_delayed_refs(trans, root, updates * 2);
730                 if (err) /* Error code will also eval true */
731                         return err;
732         }
733
734         return should_end_transaction(trans, root);
735 }
736
737 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
738                           struct btrfs_root *root, int throttle)
739 {
740         struct btrfs_transaction *cur_trans = trans->transaction;
741         struct btrfs_fs_info *info = root->fs_info;
742         unsigned long cur = trans->delayed_ref_updates;
743         int lock = (trans->type != TRANS_JOIN_NOLOCK);
744         int err = 0;
745         int must_run_delayed_refs = 0;
746
747         if (trans->use_count > 1) {
748                 trans->use_count--;
749                 trans->block_rsv = trans->orig_rsv;
750                 return 0;
751         }
752
753         btrfs_trans_release_metadata(trans, root);
754         trans->block_rsv = NULL;
755
756         if (!list_empty(&trans->new_bgs))
757                 btrfs_create_pending_block_groups(trans, root);
758
759         if (!list_empty(&trans->ordered)) {
760                 spin_lock(&info->trans_lock);
761                 list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
762                 spin_unlock(&info->trans_lock);
763         }
764
765         trans->delayed_ref_updates = 0;
766         if (!trans->sync) {
767                 must_run_delayed_refs =
768                         btrfs_should_throttle_delayed_refs(trans, root);
769                 cur = max_t(unsigned long, cur, 32);
770
771                 /*
772                  * don't make the caller wait if they are from a NOLOCK
773                  * or ATTACH transaction, it will deadlock with commit
774                  */
775                 if (must_run_delayed_refs == 1 &&
776                     (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
777                         must_run_delayed_refs = 2;
778         }
779
780         if (trans->qgroup_reserved) {
781                 /*
782                  * the same root has to be passed here between start_transaction
783                  * and end_transaction. Subvolume quota depends on this.
784                  */
785                 btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
786                 trans->qgroup_reserved = 0;
787         }
788
789         btrfs_trans_release_metadata(trans, root);
790         trans->block_rsv = NULL;
791
792         if (!list_empty(&trans->new_bgs))
793                 btrfs_create_pending_block_groups(trans, root);
794
795         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
796             should_end_transaction(trans, root) &&
797             ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
798                 spin_lock(&info->trans_lock);
799                 if (cur_trans->state == TRANS_STATE_RUNNING)
800                         cur_trans->state = TRANS_STATE_BLOCKED;
801                 spin_unlock(&info->trans_lock);
802         }
803
804         if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
805                 if (throttle)
806                         return btrfs_commit_transaction(trans, root);
807                 else
808                         wake_up_process(info->transaction_kthread);
809         }
810
811         if (trans->type & __TRANS_FREEZABLE)
812                 sb_end_intwrite(root->fs_info->sb);
813
814         WARN_ON(cur_trans != info->running_transaction);
815         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
816         atomic_dec(&cur_trans->num_writers);
817         extwriter_counter_dec(cur_trans, trans->type);
818
819         smp_mb();
820         if (waitqueue_active(&cur_trans->writer_wait))
821                 wake_up(&cur_trans->writer_wait);
822         btrfs_put_transaction(cur_trans);
823
824         if (current->journal_info == trans)
825                 current->journal_info = NULL;
826
827         if (throttle)
828                 btrfs_run_delayed_iputs(root);
829
830         if (trans->aborted ||
831             test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
832                 wake_up_process(info->transaction_kthread);
833                 err = -EIO;
834         }
835         assert_qgroups_uptodate(trans);
836
837         kmem_cache_free(btrfs_trans_handle_cachep, trans);
838         if (must_run_delayed_refs) {
839                 btrfs_async_run_delayed_refs(root, cur,
840                                              must_run_delayed_refs == 1);
841         }
842         return err;
843 }
844
845 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
846                           struct btrfs_root *root)
847 {
848         return __btrfs_end_transaction(trans, root, 0);
849 }
850
851 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
852                                    struct btrfs_root *root)
853 {
854         return __btrfs_end_transaction(trans, root, 1);
855 }
856
857 /*
858  * when btree blocks are allocated, they have some corresponding bits set for
859  * them in one of two extent_io trees.  This is used to make sure all of
860  * those extents are sent to disk but does not wait on them
861  */
862 int btrfs_write_marked_extents(struct btrfs_root *root,
863                                struct extent_io_tree *dirty_pages, int mark)
864 {
865         int err = 0;
866         int werr = 0;
867         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
868         struct extent_state *cached_state = NULL;
869         u64 start = 0;
870         u64 end;
871
872         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
873                                       mark, &cached_state)) {
874                 bool wait_writeback = false;
875
876                 err = convert_extent_bit(dirty_pages, start, end,
877                                          EXTENT_NEED_WAIT,
878                                          mark, &cached_state, GFP_NOFS);
879                 /*
880                  * convert_extent_bit can return -ENOMEM, which is most of the
881                  * time a temporary error. So when it happens, ignore the error
882                  * and wait for writeback of this range to finish - because we
883                  * failed to set the bit EXTENT_NEED_WAIT for the range, a call
884                  * to btrfs_wait_marked_extents() would not know that writeback
885                  * for this range started and therefore wouldn't wait for it to
886                  * finish - we don't want to commit a superblock that points to
887                  * btree nodes/leafs for which writeback hasn't finished yet
888                  * (and without errors).
889                  * We cleanup any entries left in the io tree when committing
890                  * the transaction (through clear_btree_io_tree()).
891                  */
892                 if (err == -ENOMEM) {
893                         err = 0;
894                         wait_writeback = true;
895                 }
896                 if (!err)
897                         err = filemap_fdatawrite_range(mapping, start, end);
898                 if (err)
899                         werr = err;
900                 else if (wait_writeback)
901                         werr = filemap_fdatawait_range(mapping, start, end);
902                 free_extent_state(cached_state);
903                 cached_state = NULL;
904                 cond_resched();
905                 start = end + 1;
906         }
907         return werr;
908 }
909
910 /*
911  * when btree blocks are allocated, they have some corresponding bits set for
912  * them in one of two extent_io trees.  This is used to make sure all of
913  * those extents are on disk for transaction or log commit.  We wait
914  * on all the pages and clear them from the dirty pages state tree
915  */
916 int btrfs_wait_marked_extents(struct btrfs_root *root,
917                               struct extent_io_tree *dirty_pages, int mark)
918 {
919         int err = 0;
920         int werr = 0;
921         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
922         struct extent_state *cached_state = NULL;
923         u64 start = 0;
924         u64 end;
925         struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
926         bool errors = false;
927
928         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
929                                       EXTENT_NEED_WAIT, &cached_state)) {
930                 /*
931                  * Ignore -ENOMEM errors returned by clear_extent_bit().
932                  * When committing the transaction, we'll remove any entries
933                  * left in the io tree. For a log commit, we don't remove them
934                  * after committing the log because the tree can be accessed
935                  * concurrently - we do it only at transaction commit time when
936                  * it's safe to do it (through clear_btree_io_tree()).
937                  */
938                 err = clear_extent_bit(dirty_pages, start, end,
939                                        EXTENT_NEED_WAIT,
940                                        0, 0, &cached_state, GFP_NOFS);
941                 if (err == -ENOMEM)
942                         err = 0;
943                 if (!err)
944                         err = filemap_fdatawait_range(mapping, start, end);
945                 if (err)
946                         werr = err;
947                 free_extent_state(cached_state);
948                 cached_state = NULL;
949                 cond_resched();
950                 start = end + 1;
951         }
952         if (err)
953                 werr = err;
954
955         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
956                 if ((mark & EXTENT_DIRTY) &&
957                     test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
958                                        &btree_ino->runtime_flags))
959                         errors = true;
960
961                 if ((mark & EXTENT_NEW) &&
962                     test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
963                                        &btree_ino->runtime_flags))
964                         errors = true;
965         } else {
966                 if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
967                                        &btree_ino->runtime_flags))
968                         errors = true;
969         }
970
971         if (errors && !werr)
972                 werr = -EIO;
973
974         return werr;
975 }
976
977 /*
978  * when btree blocks are allocated, they have some corresponding bits set for
979  * them in one of two extent_io trees.  This is used to make sure all of
980  * those extents are on disk for transaction or log commit
981  */
982 static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
983                                 struct extent_io_tree *dirty_pages, int mark)
984 {
985         int ret;
986         int ret2;
987         struct blk_plug plug;
988
989         blk_start_plug(&plug);
990         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
991         blk_finish_plug(&plug);
992         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
993
994         if (ret)
995                 return ret;
996         if (ret2)
997                 return ret2;
998         return 0;
999 }
1000
1001 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
1002                                      struct btrfs_root *root)
1003 {
1004         int ret;
1005
1006         ret = btrfs_write_and_wait_marked_extents(root,
1007                                            &trans->transaction->dirty_pages,
1008                                            EXTENT_DIRTY);
1009         clear_btree_io_tree(&trans->transaction->dirty_pages);
1010
1011         return ret;
1012 }
1013
1014 /*
1015  * this is used to update the root pointer in the tree of tree roots.
1016  *
1017  * But, in the case of the extent allocation tree, updating the root
1018  * pointer may allocate blocks which may change the root of the extent
1019  * allocation tree.
1020  *
1021  * So, this loops and repeats and makes sure the cowonly root didn't
1022  * change while the root pointer was being updated in the metadata.
1023  */
1024 static int update_cowonly_root(struct btrfs_trans_handle *trans,
1025                                struct btrfs_root *root)
1026 {
1027         int ret;
1028         u64 old_root_bytenr;
1029         u64 old_root_used;
1030         struct btrfs_root *tree_root = root->fs_info->tree_root;
1031
1032         old_root_used = btrfs_root_used(&root->root_item);
1033
1034         while (1) {
1035                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
1036                 if (old_root_bytenr == root->node->start &&
1037                     old_root_used == btrfs_root_used(&root->root_item))
1038                         break;
1039
1040                 btrfs_set_root_node(&root->root_item, root->node);
1041                 ret = btrfs_update_root(trans, tree_root,
1042                                         &root->root_key,
1043                                         &root->root_item);
1044                 if (ret)
1045                         return ret;
1046
1047                 old_root_used = btrfs_root_used(&root->root_item);
1048         }
1049
1050         return 0;
1051 }
1052
1053 /*
1054  * update all the cowonly tree roots on disk
1055  *
1056  * The error handling in this function may not be obvious. Any of the
1057  * failures will cause the file system to go offline. We still need
1058  * to clean up the delayed refs.
1059  */
1060 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
1061                                          struct btrfs_root *root)
1062 {
1063         struct btrfs_fs_info *fs_info = root->fs_info;
1064         struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
1065         struct list_head *io_bgs = &trans->transaction->io_bgs;
1066         struct list_head *next;
1067         struct extent_buffer *eb;
1068         int ret;
1069
1070         eb = btrfs_lock_root_node(fs_info->tree_root);
1071         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
1072                               0, &eb);
1073         btrfs_tree_unlock(eb);
1074         free_extent_buffer(eb);
1075
1076         if (ret)
1077                 return ret;
1078
1079         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1080         if (ret)
1081                 return ret;
1082
1083         ret = btrfs_run_dev_stats(trans, root->fs_info);
1084         if (ret)
1085                 return ret;
1086         ret = btrfs_run_dev_replace(trans, root->fs_info);
1087         if (ret)
1088                 return ret;
1089         ret = btrfs_run_qgroups(trans, root->fs_info);
1090         if (ret)
1091                 return ret;
1092
1093         ret = btrfs_setup_space_cache(trans, root);
1094         if (ret)
1095                 return ret;
1096
1097         /* run_qgroups might have added some more refs */
1098         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1099         if (ret)
1100                 return ret;
1101 again:
1102         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
1103                 next = fs_info->dirty_cowonly_roots.next;
1104                 list_del_init(next);
1105                 root = list_entry(next, struct btrfs_root, dirty_list);
1106                 clear_bit(BTRFS_ROOT_DIRTY, &root->state);
1107
1108                 if (root != fs_info->extent_root)
1109                         list_add_tail(&root->dirty_list,
1110                                       &trans->transaction->switch_commits);
1111                 ret = update_cowonly_root(trans, root);
1112                 if (ret)
1113                         return ret;
1114                 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1115                 if (ret)
1116                         return ret;
1117         }
1118
1119         while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
1120                 ret = btrfs_write_dirty_block_groups(trans, root);
1121                 if (ret)
1122                         return ret;
1123                 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1124                 if (ret)
1125                         return ret;
1126         }
1127
1128         if (!list_empty(&fs_info->dirty_cowonly_roots))
1129                 goto again;
1130
1131         list_add_tail(&fs_info->extent_root->dirty_list,
1132                       &trans->transaction->switch_commits);
1133         btrfs_after_dev_replace_commit(fs_info);
1134
1135         return 0;
1136 }
1137
1138 /*
1139  * dead roots are old snapshots that need to be deleted.  This allocates
1140  * a dirty root struct and adds it into the list of dead roots that need to
1141  * be deleted
1142  */
1143 void btrfs_add_dead_root(struct btrfs_root *root)
1144 {
1145         spin_lock(&root->fs_info->trans_lock);
1146         if (list_empty(&root->root_list))
1147                 list_add_tail(&root->root_list, &root->fs_info->dead_roots);
1148         spin_unlock(&root->fs_info->trans_lock);
1149 }
1150
1151 /*
1152  * update all the cowonly tree roots on disk
1153  */
1154 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
1155                                     struct btrfs_root *root)
1156 {
1157         struct btrfs_root *gang[8];
1158         struct btrfs_fs_info *fs_info = root->fs_info;
1159         int i;
1160         int ret;
1161         int err = 0;
1162
1163         spin_lock(&fs_info->fs_roots_radix_lock);
1164         while (1) {
1165                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
1166                                                  (void **)gang, 0,
1167                                                  ARRAY_SIZE(gang),
1168                                                  BTRFS_ROOT_TRANS_TAG);
1169                 if (ret == 0)
1170                         break;
1171                 for (i = 0; i < ret; i++) {
1172                         root = gang[i];
1173                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
1174                                         (unsigned long)root->root_key.objectid,
1175                                         BTRFS_ROOT_TRANS_TAG);
1176                         spin_unlock(&fs_info->fs_roots_radix_lock);
1177
1178                         btrfs_free_log(trans, root);
1179                         btrfs_update_reloc_root(trans, root);
1180                         btrfs_orphan_commit_root(trans, root);
1181
1182                         btrfs_save_ino_cache(root, trans);
1183
1184                         /* see comments in should_cow_block() */
1185                         clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1186                         smp_mb__after_atomic();
1187
1188                         if (root->commit_root != root->node) {
1189                                 list_add_tail(&root->dirty_list,
1190                                         &trans->transaction->switch_commits);
1191                                 btrfs_set_root_node(&root->root_item,
1192                                                     root->node);
1193                         }
1194
1195                         err = btrfs_update_root(trans, fs_info->tree_root,
1196                                                 &root->root_key,
1197                                                 &root->root_item);
1198                         spin_lock(&fs_info->fs_roots_radix_lock);
1199                         if (err)
1200                                 break;
1201                 }
1202         }
1203         spin_unlock(&fs_info->fs_roots_radix_lock);
1204         return err;
1205 }
1206
1207 /*
1208  * defrag a given btree.
1209  * Every leaf in the btree is read and defragged.
1210  */
1211 int btrfs_defrag_root(struct btrfs_root *root)
1212 {
1213         struct btrfs_fs_info *info = root->fs_info;
1214         struct btrfs_trans_handle *trans;
1215         int ret;
1216
1217         if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
1218                 return 0;
1219
1220         while (1) {
1221                 trans = btrfs_start_transaction(root, 0);
1222                 if (IS_ERR(trans))
1223                         return PTR_ERR(trans);
1224
1225                 ret = btrfs_defrag_leaves(trans, root);
1226
1227                 btrfs_end_transaction(trans, root);
1228                 btrfs_btree_balance_dirty(info->tree_root);
1229                 cond_resched();
1230
1231                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
1232                         break;
1233
1234                 if (btrfs_defrag_cancelled(root->fs_info)) {
1235                         pr_debug("BTRFS: defrag_root cancelled\n");
1236                         ret = -EAGAIN;
1237                         break;
1238                 }
1239         }
1240         clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
1241         return ret;
1242 }
1243
1244 /*
1245  * new snapshots need to be created at a very specific time in the
1246  * transaction commit.  This does the actual creation.
1247  *
1248  * Note:
1249  * If the error which may affect the commitment of the current transaction
1250  * happens, we should return the error number. If the error which just affect
1251  * the creation of the pending snapshots, just return 0.
1252  */
1253 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1254                                    struct btrfs_fs_info *fs_info,
1255                                    struct btrfs_pending_snapshot *pending)
1256 {
1257         struct btrfs_key key;
1258         struct btrfs_root_item *new_root_item;
1259         struct btrfs_root *tree_root = fs_info->tree_root;
1260         struct btrfs_root *root = pending->root;
1261         struct btrfs_root *parent_root;
1262         struct btrfs_block_rsv *rsv;
1263         struct inode *parent_inode;
1264         struct btrfs_path *path;
1265         struct btrfs_dir_item *dir_item;
1266         struct dentry *dentry;
1267         struct extent_buffer *tmp;
1268         struct extent_buffer *old;
1269         struct timespec cur_time = CURRENT_TIME;
1270         int ret = 0;
1271         u64 to_reserve = 0;
1272         u64 index = 0;
1273         u64 objectid;
1274         u64 root_flags;
1275         uuid_le new_uuid;
1276
1277         path = btrfs_alloc_path();
1278         if (!path) {
1279                 pending->error = -ENOMEM;
1280                 return 0;
1281         }
1282
1283         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1284         if (!new_root_item) {
1285                 pending->error = -ENOMEM;
1286                 goto root_item_alloc_fail;
1287         }
1288
1289         pending->error = btrfs_find_free_objectid(tree_root, &objectid);
1290         if (pending->error)
1291                 goto no_free_objectid;
1292
1293         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1294
1295         if (to_reserve > 0) {
1296                 pending->error = btrfs_block_rsv_add(root,
1297                                                      &pending->block_rsv,
1298                                                      to_reserve,
1299                                                      BTRFS_RESERVE_NO_FLUSH);
1300                 if (pending->error)
1301                         goto no_free_objectid;
1302         }
1303
1304         key.objectid = objectid;
1305         key.offset = (u64)-1;
1306         key.type = BTRFS_ROOT_ITEM_KEY;
1307
1308         rsv = trans->block_rsv;
1309         trans->block_rsv = &pending->block_rsv;
1310         trans->bytes_reserved = trans->block_rsv->reserved;
1311
1312         dentry = pending->dentry;
1313         parent_inode = pending->dir;
1314         parent_root = BTRFS_I(parent_inode)->root;
1315         record_root_in_trans(trans, parent_root);
1316
1317         /*
1318          * insert the directory item
1319          */
1320         ret = btrfs_set_inode_index(parent_inode, &index);
1321         BUG_ON(ret); /* -ENOMEM */
1322
1323         /* check if there is a file/dir which has the same name. */
1324         dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1325                                          btrfs_ino(parent_inode),
1326                                          dentry->d_name.name,
1327                                          dentry->d_name.len, 0);
1328         if (dir_item != NULL && !IS_ERR(dir_item)) {
1329                 pending->error = -EEXIST;
1330                 goto dir_item_existed;
1331         } else if (IS_ERR(dir_item)) {
1332                 ret = PTR_ERR(dir_item);
1333                 btrfs_abort_transaction(trans, root, ret);
1334                 goto fail;
1335         }
1336         btrfs_release_path(path);
1337
1338         /*
1339          * pull in the delayed directory update
1340          * and the delayed inode item
1341          * otherwise we corrupt the FS during
1342          * snapshot
1343          */
1344         ret = btrfs_run_delayed_items(trans, root);
1345         if (ret) {      /* Transaction aborted */
1346                 btrfs_abort_transaction(trans, root, ret);
1347                 goto fail;
1348         }
1349
1350         record_root_in_trans(trans, root);
1351         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1352         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1353         btrfs_check_and_init_root_item(new_root_item);
1354
1355         root_flags = btrfs_root_flags(new_root_item);
1356         if (pending->readonly)
1357                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1358         else
1359                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1360         btrfs_set_root_flags(new_root_item, root_flags);
1361
1362         btrfs_set_root_generation_v2(new_root_item,
1363                         trans->transid);
1364         uuid_le_gen(&new_uuid);
1365         memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1366         memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1367                         BTRFS_UUID_SIZE);
1368         if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
1369                 memset(new_root_item->received_uuid, 0,
1370                        sizeof(new_root_item->received_uuid));
1371                 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1372                 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1373                 btrfs_set_root_stransid(new_root_item, 0);
1374                 btrfs_set_root_rtransid(new_root_item, 0);
1375         }
1376         btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
1377         btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
1378         btrfs_set_root_otransid(new_root_item, trans->transid);
1379
1380         old = btrfs_lock_root_node(root);
1381         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1382         if (ret) {
1383                 btrfs_tree_unlock(old);
1384                 free_extent_buffer(old);
1385                 btrfs_abort_transaction(trans, root, ret);
1386                 goto fail;
1387         }
1388
1389         btrfs_set_lock_blocking(old);
1390
1391         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1392         /* clean up in any case */
1393         btrfs_tree_unlock(old);
1394         free_extent_buffer(old);
1395         if (ret) {
1396                 btrfs_abort_transaction(trans, root, ret);
1397                 goto fail;
1398         }
1399
1400         /*
1401          * We need to flush delayed refs in order to make sure all of our quota
1402          * operations have been done before we call btrfs_qgroup_inherit.
1403          */
1404         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1405         if (ret) {
1406                 btrfs_abort_transaction(trans, root, ret);
1407                 goto fail;
1408         }
1409
1410         ret = btrfs_qgroup_inherit(trans, fs_info,
1411                                    root->root_key.objectid,
1412                                    objectid, pending->inherit);
1413         if (ret) {
1414                 btrfs_abort_transaction(trans, root, ret);
1415                 goto fail;
1416         }
1417
1418         /* see comments in should_cow_block() */
1419         set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
1420         smp_wmb();
1421
1422         btrfs_set_root_node(new_root_item, tmp);
1423         /* record when the snapshot was created in key.offset */
1424         key.offset = trans->transid;
1425         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1426         btrfs_tree_unlock(tmp);
1427         free_extent_buffer(tmp);
1428         if (ret) {
1429                 btrfs_abort_transaction(trans, root, ret);
1430                 goto fail;
1431         }
1432
1433         /*
1434          * insert root back/forward references
1435          */
1436         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1437                                  parent_root->root_key.objectid,
1438                                  btrfs_ino(parent_inode), index,
1439                                  dentry->d_name.name, dentry->d_name.len);
1440         if (ret) {
1441                 btrfs_abort_transaction(trans, root, ret);
1442                 goto fail;
1443         }
1444
1445         key.offset = (u64)-1;
1446         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1447         if (IS_ERR(pending->snap)) {
1448                 ret = PTR_ERR(pending->snap);
1449                 btrfs_abort_transaction(trans, root, ret);
1450                 goto fail;
1451         }
1452
1453         ret = btrfs_reloc_post_snapshot(trans, pending);
1454         if (ret) {
1455                 btrfs_abort_transaction(trans, root, ret);
1456                 goto fail;
1457         }
1458
1459         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1460         if (ret) {
1461                 btrfs_abort_transaction(trans, root, ret);
1462                 goto fail;
1463         }
1464
1465         ret = btrfs_insert_dir_item(trans, parent_root,
1466                                     dentry->d_name.name, dentry->d_name.len,
1467                                     parent_inode, &key,
1468                                     BTRFS_FT_DIR, index);
1469         /* We have check then name at the beginning, so it is impossible. */
1470         BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1471         if (ret) {
1472                 btrfs_abort_transaction(trans, root, ret);
1473                 goto fail;
1474         }
1475
1476         btrfs_i_size_write(parent_inode, parent_inode->i_size +
1477                                          dentry->d_name.len * 2);
1478         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1479         ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1480         if (ret) {
1481                 btrfs_abort_transaction(trans, root, ret);
1482                 goto fail;
1483         }
1484         ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
1485                                   BTRFS_UUID_KEY_SUBVOL, objectid);
1486         if (ret) {
1487                 btrfs_abort_transaction(trans, root, ret);
1488                 goto fail;
1489         }
1490         if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
1491                 ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
1492                                           new_root_item->received_uuid,
1493                                           BTRFS_UUID_KEY_RECEIVED_SUBVOL,
1494                                           objectid);
1495                 if (ret && ret != -EEXIST) {
1496                         btrfs_abort_transaction(trans, root, ret);
1497                         goto fail;
1498                 }
1499         }
1500 fail:
1501         pending->error = ret;
1502 dir_item_existed:
1503         trans->block_rsv = rsv;
1504         trans->bytes_reserved = 0;
1505 no_free_objectid:
1506         kfree(new_root_item);
1507 root_item_alloc_fail:
1508         btrfs_free_path(path);
1509         return ret;
1510 }
1511
1512 /*
1513  * create all the snapshots we've scheduled for creation
1514  */
1515 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1516                                              struct btrfs_fs_info *fs_info)
1517 {
1518         struct btrfs_pending_snapshot *pending, *next;
1519         struct list_head *head = &trans->transaction->pending_snapshots;
1520         int ret = 0;
1521
1522         list_for_each_entry_safe(pending, next, head, list) {
1523                 list_del(&pending->list);
1524                 ret = create_pending_snapshot(trans, fs_info, pending);
1525                 if (ret)
1526                         break;
1527         }
1528         return ret;
1529 }
1530
1531 static void update_super_roots(struct btrfs_root *root)
1532 {
1533         struct btrfs_root_item *root_item;
1534         struct btrfs_super_block *super;
1535
1536         super = root->fs_info->super_copy;
1537
1538         root_item = &root->fs_info->chunk_root->root_item;
1539         super->chunk_root = root_item->bytenr;
1540         super->chunk_root_generation = root_item->generation;
1541         super->chunk_root_level = root_item->level;
1542
1543         root_item = &root->fs_info->tree_root->root_item;
1544         super->root = root_item->bytenr;
1545         super->generation = root_item->generation;
1546         super->root_level = root_item->level;
1547         if (btrfs_test_opt(root, SPACE_CACHE))
1548                 super->cache_generation = root_item->generation;
1549         if (root->fs_info->update_uuid_tree_gen)
1550                 super->uuid_tree_generation = root_item->generation;
1551 }
1552
1553 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1554 {
1555         struct btrfs_transaction *trans;
1556         int ret = 0;
1557
1558         spin_lock(&info->trans_lock);
1559         trans = info->running_transaction;
1560         if (trans)
1561                 ret = (trans->state >= TRANS_STATE_COMMIT_START);
1562         spin_unlock(&info->trans_lock);
1563         return ret;
1564 }
1565
1566 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1567 {
1568         struct btrfs_transaction *trans;
1569         int ret = 0;
1570
1571         spin_lock(&info->trans_lock);
1572         trans = info->running_transaction;
1573         if (trans)
1574                 ret = is_transaction_blocked(trans);
1575         spin_unlock(&info->trans_lock);
1576         return ret;
1577 }
1578
1579 /*
1580  * wait for the current transaction commit to start and block subsequent
1581  * transaction joins
1582  */
1583 static void wait_current_trans_commit_start(struct btrfs_root *root,
1584                                             struct btrfs_transaction *trans)
1585 {
1586         wait_event(root->fs_info->transaction_blocked_wait,
1587                    trans->state >= TRANS_STATE_COMMIT_START ||
1588                    trans->aborted);
1589 }
1590
1591 /*
1592  * wait for the current transaction to start and then become unblocked.
1593  * caller holds ref.
1594  */
1595 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1596                                          struct btrfs_transaction *trans)
1597 {
1598         wait_event(root->fs_info->transaction_wait,
1599                    trans->state >= TRANS_STATE_UNBLOCKED ||
1600                    trans->aborted);
1601 }
1602
1603 /*
1604  * commit transactions asynchronously. once btrfs_commit_transaction_async
1605  * returns, any subsequent transaction will not be allowed to join.
1606  */
1607 struct btrfs_async_commit {
1608         struct btrfs_trans_handle *newtrans;
1609         struct btrfs_root *root;
1610         struct work_struct work;
1611 };
1612
1613 static void do_async_commit(struct work_struct *work)
1614 {
1615         struct btrfs_async_commit *ac =
1616                 container_of(work, struct btrfs_async_commit, work);
1617
1618         /*
1619          * We've got freeze protection passed with the transaction.
1620          * Tell lockdep about it.
1621          */
1622         if (ac->newtrans->type & __TRANS_FREEZABLE)
1623                 rwsem_acquire_read(
1624                      &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1625                      0, 1, _THIS_IP_);
1626
1627         current->journal_info = ac->newtrans;
1628
1629         btrfs_commit_transaction(ac->newtrans, ac->root);
1630         kfree(ac);
1631 }
1632
1633 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1634                                    struct btrfs_root *root,
1635                                    int wait_for_unblock)
1636 {
1637         struct btrfs_async_commit *ac;
1638         struct btrfs_transaction *cur_trans;
1639
1640         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1641         if (!ac)
1642                 return -ENOMEM;
1643
1644         INIT_WORK(&ac->work, do_async_commit);
1645         ac->root = root;
1646         ac->newtrans = btrfs_join_transaction(root);
1647         if (IS_ERR(ac->newtrans)) {
1648                 int err = PTR_ERR(ac->newtrans);
1649                 kfree(ac);
1650                 return err;
1651         }
1652
1653         /* take transaction reference */
1654         cur_trans = trans->transaction;
1655         atomic_inc(&cur_trans->use_count);
1656
1657         btrfs_end_transaction(trans, root);
1658
1659         /*
1660          * Tell lockdep we've released the freeze rwsem, since the
1661          * async commit thread will be the one to unlock it.
1662          */
1663         if (ac->newtrans->type & __TRANS_FREEZABLE)
1664                 rwsem_release(
1665                         &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1666                         1, _THIS_IP_);
1667
1668         schedule_work(&ac->work);
1669
1670         /* wait for transaction to start and unblock */
1671         if (wait_for_unblock)
1672                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1673         else
1674                 wait_current_trans_commit_start(root, cur_trans);
1675
1676         if (current->journal_info == trans)
1677                 current->journal_info = NULL;
1678
1679         btrfs_put_transaction(cur_trans);
1680         return 0;
1681 }
1682
1683
1684 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1685                                 struct btrfs_root *root, int err)
1686 {
1687         struct btrfs_transaction *cur_trans = trans->transaction;
1688         DEFINE_WAIT(wait);
1689
1690         WARN_ON(trans->use_count > 1);
1691
1692         btrfs_abort_transaction(trans, root, err);
1693
1694         spin_lock(&root->fs_info->trans_lock);
1695
1696         /*
1697          * If the transaction is removed from the list, it means this
1698          * transaction has been committed successfully, so it is impossible
1699          * to call the cleanup function.
1700          */
1701         BUG_ON(list_empty(&cur_trans->list));
1702
1703         list_del_init(&cur_trans->list);
1704         if (cur_trans == root->fs_info->running_transaction) {
1705                 cur_trans->state = TRANS_STATE_COMMIT_DOING;
1706                 spin_unlock(&root->fs_info->trans_lock);
1707                 wait_event(cur_trans->writer_wait,
1708                            atomic_read(&cur_trans->num_writers) == 1);
1709
1710                 spin_lock(&root->fs_info->trans_lock);
1711         }
1712         spin_unlock(&root->fs_info->trans_lock);
1713
1714         btrfs_cleanup_one_transaction(trans->transaction, root);
1715
1716         spin_lock(&root->fs_info->trans_lock);
1717         if (cur_trans == root->fs_info->running_transaction)
1718                 root->fs_info->running_transaction = NULL;
1719         spin_unlock(&root->fs_info->trans_lock);
1720
1721         if (trans->type & __TRANS_FREEZABLE)
1722                 sb_end_intwrite(root->fs_info->sb);
1723         btrfs_put_transaction(cur_trans);
1724         btrfs_put_transaction(cur_trans);
1725
1726         trace_btrfs_transaction_commit(root);
1727
1728         if (current->journal_info == trans)
1729                 current->journal_info = NULL;
1730         btrfs_scrub_cancel(root->fs_info);
1731
1732         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1733 }
1734
1735 static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1736 {
1737         if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1738                 return btrfs_start_delalloc_roots(fs_info, 1, -1);
1739         return 0;
1740 }
1741
1742 static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1743 {
1744         if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
1745                 btrfs_wait_ordered_roots(fs_info, -1);
1746 }
1747
1748 static inline void
1749 btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
1750                            struct btrfs_fs_info *fs_info)
1751 {
1752         struct btrfs_ordered_extent *ordered;
1753
1754         spin_lock(&fs_info->trans_lock);
1755         while (!list_empty(&cur_trans->pending_ordered)) {
1756                 ordered = list_first_entry(&cur_trans->pending_ordered,
1757                                            struct btrfs_ordered_extent,
1758                                            trans_list);
1759                 list_del_init(&ordered->trans_list);
1760                 spin_unlock(&fs_info->trans_lock);
1761
1762                 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
1763                                                    &ordered->flags));
1764                 btrfs_put_ordered_extent(ordered);
1765                 spin_lock(&fs_info->trans_lock);
1766         }
1767         spin_unlock(&fs_info->trans_lock);
1768 }
1769
1770 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1771                              struct btrfs_root *root)
1772 {
1773         struct btrfs_transaction *cur_trans = trans->transaction;
1774         struct btrfs_transaction *prev_trans = NULL;
1775         struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
1776         int ret;
1777
1778         /* Stop the commit early if ->aborted is set */
1779         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1780                 ret = cur_trans->aborted;
1781                 btrfs_end_transaction(trans, root);
1782                 return ret;
1783         }
1784
1785         /* make a pass through all the delayed refs we have so far
1786          * any runnings procs may add more while we are here
1787          */
1788         ret = btrfs_run_delayed_refs(trans, root, 0);
1789         if (ret) {
1790                 btrfs_end_transaction(trans, root);
1791                 return ret;
1792         }
1793
1794         btrfs_trans_release_metadata(trans, root);
1795         trans->block_rsv = NULL;
1796         if (trans->qgroup_reserved) {
1797                 btrfs_qgroup_free(root, trans->qgroup_reserved);
1798                 trans->qgroup_reserved = 0;
1799         }
1800
1801         cur_trans = trans->transaction;
1802
1803         /*
1804          * set the flushing flag so procs in this transaction have to
1805          * start sending their work down.
1806          */
1807         cur_trans->delayed_refs.flushing = 1;
1808         smp_wmb();
1809
1810         if (!list_empty(&trans->new_bgs))
1811                 btrfs_create_pending_block_groups(trans, root);
1812
1813         ret = btrfs_run_delayed_refs(trans, root, 0);
1814         if (ret) {
1815                 btrfs_end_transaction(trans, root);
1816                 return ret;
1817         }
1818
1819         if (!cur_trans->dirty_bg_run) {
1820                 int run_it = 0;
1821
1822                 /* this mutex is also taken before trying to set
1823                  * block groups readonly.  We need to make sure
1824                  * that nobody has set a block group readonly
1825                  * after a extents from that block group have been
1826                  * allocated for cache files.  btrfs_set_block_group_ro
1827                  * will wait for the transaction to commit if it
1828                  * finds dirty_bg_run = 1
1829                  *
1830                  * The dirty_bg_run flag is also used to make sure only
1831                  * one process starts all the block group IO.  It wouldn't
1832                  * hurt to have more than one go through, but there's no
1833                  * real advantage to it either.
1834                  */
1835                 mutex_lock(&root->fs_info->ro_block_group_mutex);
1836                 if (!cur_trans->dirty_bg_run) {
1837                         run_it = 1;
1838                         cur_trans->dirty_bg_run = 1;
1839                 }
1840                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
1841
1842                 if (run_it)
1843                         ret = btrfs_start_dirty_block_groups(trans, root);
1844         }
1845         if (ret) {
1846                 btrfs_end_transaction(trans, root);
1847                 return ret;
1848         }
1849
1850         spin_lock(&root->fs_info->trans_lock);
1851         list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
1852         if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1853                 spin_unlock(&root->fs_info->trans_lock);
1854                 atomic_inc(&cur_trans->use_count);
1855                 ret = btrfs_end_transaction(trans, root);
1856
1857                 wait_for_commit(root, cur_trans);
1858
1859                 if (unlikely(cur_trans->aborted))
1860                         ret = cur_trans->aborted;
1861
1862                 btrfs_put_transaction(cur_trans);
1863
1864                 return ret;
1865         }
1866
1867         cur_trans->state = TRANS_STATE_COMMIT_START;
1868         wake_up(&root->fs_info->transaction_blocked_wait);
1869
1870         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1871                 prev_trans = list_entry(cur_trans->list.prev,
1872                                         struct btrfs_transaction, list);
1873                 if (prev_trans->state != TRANS_STATE_COMPLETED) {
1874                         atomic_inc(&prev_trans->use_count);
1875                         spin_unlock(&root->fs_info->trans_lock);
1876
1877                         wait_for_commit(root, prev_trans);
1878
1879                         btrfs_put_transaction(prev_trans);
1880                 } else {
1881                         spin_unlock(&root->fs_info->trans_lock);
1882                 }
1883         } else {
1884                 spin_unlock(&root->fs_info->trans_lock);
1885         }
1886
1887         extwriter_counter_dec(cur_trans, trans->type);
1888
1889         ret = btrfs_start_delalloc_flush(root->fs_info);
1890         if (ret)
1891                 goto cleanup_transaction;
1892
1893         ret = btrfs_run_delayed_items(trans, root);
1894         if (ret)
1895                 goto cleanup_transaction;
1896
1897         wait_event(cur_trans->writer_wait,
1898                    extwriter_counter_read(cur_trans) == 0);
1899
1900         /* some pending stuffs might be added after the previous flush. */
1901         ret = btrfs_run_delayed_items(trans, root);
1902         if (ret)
1903                 goto cleanup_transaction;
1904
1905         btrfs_wait_delalloc_flush(root->fs_info);
1906
1907         btrfs_wait_pending_ordered(cur_trans, root->fs_info);
1908
1909         btrfs_scrub_pause(root);
1910         /*
1911          * Ok now we need to make sure to block out any other joins while we
1912          * commit the transaction.  We could have started a join before setting
1913          * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
1914          */
1915         spin_lock(&root->fs_info->trans_lock);
1916         cur_trans->state = TRANS_STATE_COMMIT_DOING;
1917         spin_unlock(&root->fs_info->trans_lock);
1918         wait_event(cur_trans->writer_wait,
1919                    atomic_read(&cur_trans->num_writers) == 1);
1920
1921         /* ->aborted might be set after the previous check, so check it */
1922         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1923                 ret = cur_trans->aborted;
1924                 goto scrub_continue;
1925         }
1926         /*
1927          * the reloc mutex makes sure that we stop
1928          * the balancing code from coming in and moving
1929          * extents around in the middle of the commit
1930          */
1931         mutex_lock(&root->fs_info->reloc_mutex);
1932
1933         /*
1934          * We needn't worry about the delayed items because we will
1935          * deal with them in create_pending_snapshot(), which is the
1936          * core function of the snapshot creation.
1937          */
1938         ret = create_pending_snapshots(trans, root->fs_info);
1939         if (ret) {
1940                 mutex_unlock(&root->fs_info->reloc_mutex);
1941                 goto scrub_continue;
1942         }
1943
1944         /*
1945          * We insert the dir indexes of the snapshots and update the inode
1946          * of the snapshots' parents after the snapshot creation, so there
1947          * are some delayed items which are not dealt with. Now deal with
1948          * them.
1949          *
1950          * We needn't worry that this operation will corrupt the snapshots,
1951          * because all the tree which are snapshoted will be forced to COW
1952          * the nodes and leaves.
1953          */
1954         ret = btrfs_run_delayed_items(trans, root);
1955         if (ret) {
1956                 mutex_unlock(&root->fs_info->reloc_mutex);
1957                 goto scrub_continue;
1958         }
1959
1960         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1961         if (ret) {
1962                 mutex_unlock(&root->fs_info->reloc_mutex);
1963                 goto scrub_continue;
1964         }
1965
1966         /*
1967          * make sure none of the code above managed to slip in a
1968          * delayed item
1969          */
1970         btrfs_assert_delayed_root_empty(root);
1971
1972         WARN_ON(cur_trans != trans->transaction);
1973
1974         /* btrfs_commit_tree_roots is responsible for getting the
1975          * various roots consistent with each other.  Every pointer
1976          * in the tree of tree roots has to point to the most up to date
1977          * root for every subvolume and other tree.  So, we have to keep
1978          * the tree logging code from jumping in and changing any
1979          * of the trees.
1980          *
1981          * At this point in the commit, there can't be any tree-log
1982          * writers, but a little lower down we drop the trans mutex
1983          * and let new people in.  By holding the tree_log_mutex
1984          * from now until after the super is written, we avoid races
1985          * with the tree-log code.
1986          */
1987         mutex_lock(&root->fs_info->tree_log_mutex);
1988
1989         ret = commit_fs_roots(trans, root);
1990         if (ret) {
1991                 mutex_unlock(&root->fs_info->tree_log_mutex);
1992                 mutex_unlock(&root->fs_info->reloc_mutex);
1993                 goto scrub_continue;
1994         }
1995
1996         /*
1997          * Since the transaction is done, we can apply the pending changes
1998          * before the next transaction.
1999          */
2000         btrfs_apply_pending_changes(root->fs_info);
2001
2002         /* commit_fs_roots gets rid of all the tree log roots, it is now
2003          * safe to free the root of tree log roots
2004          */
2005         btrfs_free_log_root_tree(trans, root->fs_info);
2006
2007         ret = commit_cowonly_roots(trans, root);
2008         if (ret) {
2009                 mutex_unlock(&root->fs_info->tree_log_mutex);
2010                 mutex_unlock(&root->fs_info->reloc_mutex);
2011                 goto scrub_continue;
2012         }
2013
2014         /*
2015          * The tasks which save the space cache and inode cache may also
2016          * update ->aborted, check it.
2017          */
2018         if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
2019                 ret = cur_trans->aborted;
2020                 mutex_unlock(&root->fs_info->tree_log_mutex);
2021                 mutex_unlock(&root->fs_info->reloc_mutex);
2022                 goto scrub_continue;
2023         }
2024
2025         btrfs_prepare_extent_commit(trans, root);
2026
2027         cur_trans = root->fs_info->running_transaction;
2028
2029         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
2030                             root->fs_info->tree_root->node);
2031         list_add_tail(&root->fs_info->tree_root->dirty_list,
2032                       &cur_trans->switch_commits);
2033
2034         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
2035                             root->fs_info->chunk_root->node);
2036         list_add_tail(&root->fs_info->chunk_root->dirty_list,
2037                       &cur_trans->switch_commits);
2038
2039         switch_commit_roots(cur_trans, root->fs_info);
2040
2041         assert_qgroups_uptodate(trans);
2042         ASSERT(list_empty(&cur_trans->dirty_bgs));
2043         ASSERT(list_empty(&cur_trans->io_bgs));
2044         update_super_roots(root);
2045
2046         btrfs_set_super_log_root(root->fs_info->super_copy, 0);
2047         btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
2048         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
2049                sizeof(*root->fs_info->super_copy));
2050
2051         btrfs_update_commit_device_size(root->fs_info);
2052         btrfs_update_commit_device_bytes_used(root, cur_trans);
2053
2054         clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
2055         clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
2056
2057         spin_lock(&root->fs_info->trans_lock);
2058         cur_trans->state = TRANS_STATE_UNBLOCKED;
2059         root->fs_info->running_transaction = NULL;
2060         spin_unlock(&root->fs_info->trans_lock);
2061         mutex_unlock(&root->fs_info->reloc_mutex);
2062
2063         wake_up(&root->fs_info->transaction_wait);
2064
2065         ret = btrfs_write_and_wait_transaction(trans, root);
2066         if (ret) {
2067                 btrfs_error(root->fs_info, ret,
2068                             "Error while writing out transaction");
2069                 mutex_unlock(&root->fs_info->tree_log_mutex);
2070                 goto scrub_continue;
2071         }
2072
2073         ret = write_ctree_super(trans, root, 0);
2074         if (ret) {
2075                 mutex_unlock(&root->fs_info->tree_log_mutex);
2076                 goto scrub_continue;
2077         }
2078
2079         /*
2080          * the super is written, we can safely allow the tree-loggers
2081          * to go about their business
2082          */
2083         mutex_unlock(&root->fs_info->tree_log_mutex);
2084
2085         btrfs_finish_extent_commit(trans, root);
2086
2087         if (cur_trans->have_free_bgs)
2088                 btrfs_clear_space_info_full(root->fs_info);
2089
2090         root->fs_info->last_trans_committed = cur_trans->transid;
2091         /*
2092          * We needn't acquire the lock here because there is no other task
2093          * which can change it.
2094          */
2095         cur_trans->state = TRANS_STATE_COMPLETED;
2096         wake_up(&cur_trans->commit_wait);
2097
2098         spin_lock(&root->fs_info->trans_lock);
2099         list_del_init(&cur_trans->list);
2100         spin_unlock(&root->fs_info->trans_lock);
2101
2102         btrfs_put_transaction(cur_trans);
2103         btrfs_put_transaction(cur_trans);
2104
2105         if (trans->type & __TRANS_FREEZABLE)
2106                 sb_end_intwrite(root->fs_info->sb);
2107
2108         trace_btrfs_transaction_commit(root);
2109
2110         btrfs_scrub_continue(root);
2111
2112         if (current->journal_info == trans)
2113                 current->journal_info = NULL;
2114
2115         kmem_cache_free(btrfs_trans_handle_cachep, trans);
2116
2117         if (current != root->fs_info->transaction_kthread)
2118                 btrfs_run_delayed_iputs(root);
2119
2120         return ret;
2121
2122 scrub_continue:
2123         btrfs_scrub_continue(root);
2124 cleanup_transaction:
2125         btrfs_trans_release_metadata(trans, root);
2126         trans->block_rsv = NULL;
2127         if (trans->qgroup_reserved) {
2128                 btrfs_qgroup_free(root, trans->qgroup_reserved);
2129                 trans->qgroup_reserved = 0;
2130         }
2131         btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
2132         if (current->journal_info == trans)
2133                 current->journal_info = NULL;
2134         cleanup_transaction(trans, root, ret);
2135
2136         return ret;
2137 }
2138
2139 /*
2140  * return < 0 if error
2141  * 0 if there are no more dead_roots at the time of call
2142  * 1 there are more to be processed, call me again
2143  *
2144  * The return value indicates there are certainly more snapshots to delete, but
2145  * if there comes a new one during processing, it may return 0. We don't mind,
2146  * because btrfs_commit_super will poke cleaner thread and it will process it a
2147  * few seconds later.
2148  */
2149 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2150 {
2151         int ret;
2152         struct btrfs_fs_info *fs_info = root->fs_info;
2153
2154         spin_lock(&fs_info->trans_lock);
2155         if (list_empty(&fs_info->dead_roots)) {
2156                 spin_unlock(&fs_info->trans_lock);
2157                 return 0;
2158         }
2159         root = list_first_entry(&fs_info->dead_roots,
2160                         struct btrfs_root, root_list);
2161         list_del_init(&root->root_list);
2162         spin_unlock(&fs_info->trans_lock);
2163
2164         pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
2165
2166         btrfs_kill_all_delayed_nodes(root);
2167
2168         if (btrfs_header_backref_rev(root->node) <
2169                         BTRFS_MIXED_BACKREF_REV)
2170                 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
2171         else
2172                 ret = btrfs_drop_snapshot(root, NULL, 1, 0);
2173
2174         return (ret < 0) ? 0 : 1;
2175 }
2176
2177 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2178 {
2179         unsigned long prev;
2180         unsigned long bit;
2181
2182         prev = xchg(&fs_info->pending_changes, 0);
2183         if (!prev)
2184                 return;
2185
2186         bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2187         if (prev & bit)
2188                 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2189         prev &= ~bit;
2190
2191         bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2192         if (prev & bit)
2193                 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2194         prev &= ~bit;
2195
2196         bit = 1 << BTRFS_PENDING_COMMIT;
2197         if (prev & bit)
2198                 btrfs_debug(fs_info, "pending commit done");
2199         prev &= ~bit;
2200
2201         if (prev)
2202                 btrfs_warn(fs_info,
2203                         "unknown pending changes left 0x%lx, ignoring", prev);
2204 }