These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / fs / btrfs / ordered-data.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/blkdev.h>
21 #include <linux/writeback.h>
22 #include <linux/pagevec.h>
23 #include "ctree.h"
24 #include "transaction.h"
25 #include "btrfs_inode.h"
26 #include "extent_io.h"
27 #include "disk-io.h"
28
29 static struct kmem_cache *btrfs_ordered_extent_cache;
30
31 static u64 entry_end(struct btrfs_ordered_extent *entry)
32 {
33         if (entry->file_offset + entry->len < entry->file_offset)
34                 return (u64)-1;
35         return entry->file_offset + entry->len;
36 }
37
38 /* returns NULL if the insertion worked, or it returns the node it did find
39  * in the tree
40  */
41 static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
42                                    struct rb_node *node)
43 {
44         struct rb_node **p = &root->rb_node;
45         struct rb_node *parent = NULL;
46         struct btrfs_ordered_extent *entry;
47
48         while (*p) {
49                 parent = *p;
50                 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
51
52                 if (file_offset < entry->file_offset)
53                         p = &(*p)->rb_left;
54                 else if (file_offset >= entry_end(entry))
55                         p = &(*p)->rb_right;
56                 else
57                         return parent;
58         }
59
60         rb_link_node(node, parent, p);
61         rb_insert_color(node, root);
62         return NULL;
63 }
64
65 static void ordered_data_tree_panic(struct inode *inode, int errno,
66                                                u64 offset)
67 {
68         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
69         btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
70                     "%llu", offset);
71 }
72
73 /*
74  * look for a given offset in the tree, and if it can't be found return the
75  * first lesser offset
76  */
77 static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
78                                      struct rb_node **prev_ret)
79 {
80         struct rb_node *n = root->rb_node;
81         struct rb_node *prev = NULL;
82         struct rb_node *test;
83         struct btrfs_ordered_extent *entry;
84         struct btrfs_ordered_extent *prev_entry = NULL;
85
86         while (n) {
87                 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
88                 prev = n;
89                 prev_entry = entry;
90
91                 if (file_offset < entry->file_offset)
92                         n = n->rb_left;
93                 else if (file_offset >= entry_end(entry))
94                         n = n->rb_right;
95                 else
96                         return n;
97         }
98         if (!prev_ret)
99                 return NULL;
100
101         while (prev && file_offset >= entry_end(prev_entry)) {
102                 test = rb_next(prev);
103                 if (!test)
104                         break;
105                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
106                                       rb_node);
107                 if (file_offset < entry_end(prev_entry))
108                         break;
109
110                 prev = test;
111         }
112         if (prev)
113                 prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
114                                       rb_node);
115         while (prev && file_offset < entry_end(prev_entry)) {
116                 test = rb_prev(prev);
117                 if (!test)
118                         break;
119                 prev_entry = rb_entry(test, struct btrfs_ordered_extent,
120                                       rb_node);
121                 prev = test;
122         }
123         *prev_ret = prev;
124         return NULL;
125 }
126
127 /*
128  * helper to check if a given offset is inside a given entry
129  */
130 static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
131 {
132         if (file_offset < entry->file_offset ||
133             entry->file_offset + entry->len <= file_offset)
134                 return 0;
135         return 1;
136 }
137
138 static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
139                           u64 len)
140 {
141         if (file_offset + len <= entry->file_offset ||
142             entry->file_offset + entry->len <= file_offset)
143                 return 0;
144         return 1;
145 }
146
147 /*
148  * look find the first ordered struct that has this offset, otherwise
149  * the first one less than this offset
150  */
151 static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
152                                           u64 file_offset)
153 {
154         struct rb_root *root = &tree->tree;
155         struct rb_node *prev = NULL;
156         struct rb_node *ret;
157         struct btrfs_ordered_extent *entry;
158
159         if (tree->last) {
160                 entry = rb_entry(tree->last, struct btrfs_ordered_extent,
161                                  rb_node);
162                 if (offset_in_entry(entry, file_offset))
163                         return tree->last;
164         }
165         ret = __tree_search(root, file_offset, &prev);
166         if (!ret)
167                 ret = prev;
168         if (ret)
169                 tree->last = ret;
170         return ret;
171 }
172
173 /* allocate and add a new ordered_extent into the per-inode tree.
174  * file_offset is the logical offset in the file
175  *
176  * start is the disk block number of an extent already reserved in the
177  * extent allocation tree
178  *
179  * len is the length of the extent
180  *
181  * The tree is given a single reference on the ordered extent that was
182  * inserted.
183  */
184 static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
185                                       u64 start, u64 len, u64 disk_len,
186                                       int type, int dio, int compress_type)
187 {
188         struct btrfs_root *root = BTRFS_I(inode)->root;
189         struct btrfs_ordered_inode_tree *tree;
190         struct rb_node *node;
191         struct btrfs_ordered_extent *entry;
192
193         tree = &BTRFS_I(inode)->ordered_tree;
194         entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
195         if (!entry)
196                 return -ENOMEM;
197
198         entry->file_offset = file_offset;
199         entry->start = start;
200         entry->len = len;
201         entry->disk_len = disk_len;
202         entry->bytes_left = len;
203         entry->inode = igrab(inode);
204         entry->compress_type = compress_type;
205         entry->truncated_len = (u64)-1;
206         if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
207                 set_bit(type, &entry->flags);
208
209         if (dio)
210                 set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
211
212         /* one ref for the tree */
213         atomic_set(&entry->refs, 1);
214         init_waitqueue_head(&entry->wait);
215         INIT_LIST_HEAD(&entry->list);
216         INIT_LIST_HEAD(&entry->root_extent_list);
217         INIT_LIST_HEAD(&entry->work_list);
218         init_completion(&entry->completion);
219         INIT_LIST_HEAD(&entry->log_list);
220         INIT_LIST_HEAD(&entry->trans_list);
221
222         trace_btrfs_ordered_extent_add(inode, entry);
223
224         spin_lock_irq(&tree->lock);
225         node = tree_insert(&tree->tree, file_offset,
226                            &entry->rb_node);
227         if (node)
228                 ordered_data_tree_panic(inode, -EEXIST, file_offset);
229         spin_unlock_irq(&tree->lock);
230
231         spin_lock(&root->ordered_extent_lock);
232         list_add_tail(&entry->root_extent_list,
233                       &root->ordered_extents);
234         root->nr_ordered_extents++;
235         if (root->nr_ordered_extents == 1) {
236                 spin_lock(&root->fs_info->ordered_root_lock);
237                 BUG_ON(!list_empty(&root->ordered_root));
238                 list_add_tail(&root->ordered_root,
239                               &root->fs_info->ordered_roots);
240                 spin_unlock(&root->fs_info->ordered_root_lock);
241         }
242         spin_unlock(&root->ordered_extent_lock);
243
244         return 0;
245 }
246
247 int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
248                              u64 start, u64 len, u64 disk_len, int type)
249 {
250         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
251                                           disk_len, type, 0,
252                                           BTRFS_COMPRESS_NONE);
253 }
254
255 int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
256                                  u64 start, u64 len, u64 disk_len, int type)
257 {
258         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
259                                           disk_len, type, 1,
260                                           BTRFS_COMPRESS_NONE);
261 }
262
263 int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
264                                       u64 start, u64 len, u64 disk_len,
265                                       int type, int compress_type)
266 {
267         return __btrfs_add_ordered_extent(inode, file_offset, start, len,
268                                           disk_len, type, 0,
269                                           compress_type);
270 }
271
272 /*
273  * Add a struct btrfs_ordered_sum into the list of checksums to be inserted
274  * when an ordered extent is finished.  If the list covers more than one
275  * ordered extent, it is split across multiples.
276  */
277 void btrfs_add_ordered_sum(struct inode *inode,
278                            struct btrfs_ordered_extent *entry,
279                            struct btrfs_ordered_sum *sum)
280 {
281         struct btrfs_ordered_inode_tree *tree;
282
283         tree = &BTRFS_I(inode)->ordered_tree;
284         spin_lock_irq(&tree->lock);
285         list_add_tail(&sum->list, &entry->list);
286         spin_unlock_irq(&tree->lock);
287 }
288
289 /*
290  * this is used to account for finished IO across a given range
291  * of the file.  The IO may span ordered extents.  If
292  * a given ordered_extent is completely done, 1 is returned, otherwise
293  * 0.
294  *
295  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
296  * to make sure this function only returns 1 once for a given ordered extent.
297  *
298  * file_offset is updated to one byte past the range that is recorded as
299  * complete.  This allows you to walk forward in the file.
300  */
301 int btrfs_dec_test_first_ordered_pending(struct inode *inode,
302                                    struct btrfs_ordered_extent **cached,
303                                    u64 *file_offset, u64 io_size, int uptodate)
304 {
305         struct btrfs_ordered_inode_tree *tree;
306         struct rb_node *node;
307         struct btrfs_ordered_extent *entry = NULL;
308         int ret;
309         unsigned long flags;
310         u64 dec_end;
311         u64 dec_start;
312         u64 to_dec;
313
314         tree = &BTRFS_I(inode)->ordered_tree;
315         spin_lock_irqsave(&tree->lock, flags);
316         node = tree_search(tree, *file_offset);
317         if (!node) {
318                 ret = 1;
319                 goto out;
320         }
321
322         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
323         if (!offset_in_entry(entry, *file_offset)) {
324                 ret = 1;
325                 goto out;
326         }
327
328         dec_start = max(*file_offset, entry->file_offset);
329         dec_end = min(*file_offset + io_size, entry->file_offset +
330                       entry->len);
331         *file_offset = dec_end;
332         if (dec_start > dec_end) {
333                 btrfs_crit(BTRFS_I(inode)->root->fs_info,
334                         "bad ordering dec_start %llu end %llu", dec_start, dec_end);
335         }
336         to_dec = dec_end - dec_start;
337         if (to_dec > entry->bytes_left) {
338                 btrfs_crit(BTRFS_I(inode)->root->fs_info,
339                         "bad ordered accounting left %llu size %llu",
340                         entry->bytes_left, to_dec);
341         }
342         entry->bytes_left -= to_dec;
343         if (!uptodate)
344                 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
345
346         if (entry->bytes_left == 0) {
347                 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
348                 /*
349                  * Implicit memory barrier after test_and_set_bit
350                  */
351                 if (waitqueue_active(&entry->wait))
352                         wake_up(&entry->wait);
353         } else {
354                 ret = 1;
355         }
356 out:
357         if (!ret && cached && entry) {
358                 *cached = entry;
359                 atomic_inc(&entry->refs);
360         }
361         spin_unlock_irqrestore(&tree->lock, flags);
362         return ret == 0;
363 }
364
365 /*
366  * this is used to account for finished IO across a given range
367  * of the file.  The IO should not span ordered extents.  If
368  * a given ordered_extent is completely done, 1 is returned, otherwise
369  * 0.
370  *
371  * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
372  * to make sure this function only returns 1 once for a given ordered extent.
373  */
374 int btrfs_dec_test_ordered_pending(struct inode *inode,
375                                    struct btrfs_ordered_extent **cached,
376                                    u64 file_offset, u64 io_size, int uptodate)
377 {
378         struct btrfs_ordered_inode_tree *tree;
379         struct rb_node *node;
380         struct btrfs_ordered_extent *entry = NULL;
381         unsigned long flags;
382         int ret;
383
384         tree = &BTRFS_I(inode)->ordered_tree;
385         spin_lock_irqsave(&tree->lock, flags);
386         if (cached && *cached) {
387                 entry = *cached;
388                 goto have_entry;
389         }
390
391         node = tree_search(tree, file_offset);
392         if (!node) {
393                 ret = 1;
394                 goto out;
395         }
396
397         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
398 have_entry:
399         if (!offset_in_entry(entry, file_offset)) {
400                 ret = 1;
401                 goto out;
402         }
403
404         if (io_size > entry->bytes_left) {
405                 btrfs_crit(BTRFS_I(inode)->root->fs_info,
406                            "bad ordered accounting left %llu size %llu",
407                        entry->bytes_left, io_size);
408         }
409         entry->bytes_left -= io_size;
410         if (!uptodate)
411                 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
412
413         if (entry->bytes_left == 0) {
414                 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
415                 /*
416                  * Implicit memory barrier after test_and_set_bit
417                  */
418                 if (waitqueue_active(&entry->wait))
419                         wake_up(&entry->wait);
420         } else {
421                 ret = 1;
422         }
423 out:
424         if (!ret && cached && entry) {
425                 *cached = entry;
426                 atomic_inc(&entry->refs);
427         }
428         spin_unlock_irqrestore(&tree->lock, flags);
429         return ret == 0;
430 }
431
432 /* Needs to either be called under a log transaction or the log_mutex */
433 void btrfs_get_logged_extents(struct inode *inode,
434                               struct list_head *logged_list,
435                               const loff_t start,
436                               const loff_t end)
437 {
438         struct btrfs_ordered_inode_tree *tree;
439         struct btrfs_ordered_extent *ordered;
440         struct rb_node *n;
441         struct rb_node *prev;
442
443         tree = &BTRFS_I(inode)->ordered_tree;
444         spin_lock_irq(&tree->lock);
445         n = __tree_search(&tree->tree, end, &prev);
446         if (!n)
447                 n = prev;
448         for (; n; n = rb_prev(n)) {
449                 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
450                 if (ordered->file_offset > end)
451                         continue;
452                 if (entry_end(ordered) <= start)
453                         break;
454                 if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
455                         continue;
456                 list_add(&ordered->log_list, logged_list);
457                 atomic_inc(&ordered->refs);
458         }
459         spin_unlock_irq(&tree->lock);
460 }
461
462 void btrfs_put_logged_extents(struct list_head *logged_list)
463 {
464         struct btrfs_ordered_extent *ordered;
465
466         while (!list_empty(logged_list)) {
467                 ordered = list_first_entry(logged_list,
468                                            struct btrfs_ordered_extent,
469                                            log_list);
470                 list_del_init(&ordered->log_list);
471                 btrfs_put_ordered_extent(ordered);
472         }
473 }
474
475 void btrfs_submit_logged_extents(struct list_head *logged_list,
476                                  struct btrfs_root *log)
477 {
478         int index = log->log_transid % 2;
479
480         spin_lock_irq(&log->log_extents_lock[index]);
481         list_splice_tail(logged_list, &log->logged_list[index]);
482         spin_unlock_irq(&log->log_extents_lock[index]);
483 }
484
485 void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
486                                struct btrfs_root *log, u64 transid)
487 {
488         struct btrfs_ordered_extent *ordered;
489         int index = transid % 2;
490
491         spin_lock_irq(&log->log_extents_lock[index]);
492         while (!list_empty(&log->logged_list[index])) {
493                 struct inode *inode;
494                 ordered = list_first_entry(&log->logged_list[index],
495                                            struct btrfs_ordered_extent,
496                                            log_list);
497                 list_del_init(&ordered->log_list);
498                 inode = ordered->inode;
499                 spin_unlock_irq(&log->log_extents_lock[index]);
500
501                 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
502                     !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
503                         u64 start = ordered->file_offset;
504                         u64 end = ordered->file_offset + ordered->len - 1;
505
506                         WARN_ON(!inode);
507                         filemap_fdatawrite_range(inode->i_mapping, start, end);
508                 }
509                 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
510                                                    &ordered->flags));
511
512                 /*
513                  * In order to keep us from losing our ordered extent
514                  * information when committing the transaction we have to make
515                  * sure that any logged extents are completed when we go to
516                  * commit the transaction.  To do this we simply increase the
517                  * current transactions pending_ordered counter and decrement it
518                  * when the ordered extent completes.
519                  */
520                 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
521                         struct btrfs_ordered_inode_tree *tree;
522
523                         tree = &BTRFS_I(inode)->ordered_tree;
524                         spin_lock_irq(&tree->lock);
525                         if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
526                                 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
527                                 atomic_inc(&trans->transaction->pending_ordered);
528                         }
529                         spin_unlock_irq(&tree->lock);
530                 }
531                 btrfs_put_ordered_extent(ordered);
532                 spin_lock_irq(&log->log_extents_lock[index]);
533         }
534         spin_unlock_irq(&log->log_extents_lock[index]);
535 }
536
537 void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
538 {
539         struct btrfs_ordered_extent *ordered;
540         int index = transid % 2;
541
542         spin_lock_irq(&log->log_extents_lock[index]);
543         while (!list_empty(&log->logged_list[index])) {
544                 ordered = list_first_entry(&log->logged_list[index],
545                                            struct btrfs_ordered_extent,
546                                            log_list);
547                 list_del_init(&ordered->log_list);
548                 spin_unlock_irq(&log->log_extents_lock[index]);
549                 btrfs_put_ordered_extent(ordered);
550                 spin_lock_irq(&log->log_extents_lock[index]);
551         }
552         spin_unlock_irq(&log->log_extents_lock[index]);
553 }
554
555 /*
556  * used to drop a reference on an ordered extent.  This will free
557  * the extent if the last reference is dropped
558  */
559 void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
560 {
561         struct list_head *cur;
562         struct btrfs_ordered_sum *sum;
563
564         trace_btrfs_ordered_extent_put(entry->inode, entry);
565
566         if (atomic_dec_and_test(&entry->refs)) {
567                 ASSERT(list_empty(&entry->log_list));
568                 ASSERT(list_empty(&entry->trans_list));
569                 ASSERT(list_empty(&entry->root_extent_list));
570                 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
571                 if (entry->inode)
572                         btrfs_add_delayed_iput(entry->inode);
573                 while (!list_empty(&entry->list)) {
574                         cur = entry->list.next;
575                         sum = list_entry(cur, struct btrfs_ordered_sum, list);
576                         list_del(&sum->list);
577                         kfree(sum);
578                 }
579                 kmem_cache_free(btrfs_ordered_extent_cache, entry);
580         }
581 }
582
583 /*
584  * remove an ordered extent from the tree.  No references are dropped
585  * and waiters are woken up.
586  */
587 void btrfs_remove_ordered_extent(struct inode *inode,
588                                  struct btrfs_ordered_extent *entry)
589 {
590         struct btrfs_ordered_inode_tree *tree;
591         struct btrfs_root *root = BTRFS_I(inode)->root;
592         struct rb_node *node;
593         bool dec_pending_ordered = false;
594
595         tree = &BTRFS_I(inode)->ordered_tree;
596         spin_lock_irq(&tree->lock);
597         node = &entry->rb_node;
598         rb_erase(node, &tree->tree);
599         RB_CLEAR_NODE(node);
600         if (tree->last == node)
601                 tree->last = NULL;
602         set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
603         if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
604                 dec_pending_ordered = true;
605         spin_unlock_irq(&tree->lock);
606
607         /*
608          * The current running transaction is waiting on us, we need to let it
609          * know that we're complete and wake it up.
610          */
611         if (dec_pending_ordered) {
612                 struct btrfs_transaction *trans;
613
614                 /*
615                  * The checks for trans are just a formality, it should be set,
616                  * but if it isn't we don't want to deref/assert under the spin
617                  * lock, so be nice and check if trans is set, but ASSERT() so
618                  * if it isn't set a developer will notice.
619                  */
620                 spin_lock(&root->fs_info->trans_lock);
621                 trans = root->fs_info->running_transaction;
622                 if (trans)
623                         atomic_inc(&trans->use_count);
624                 spin_unlock(&root->fs_info->trans_lock);
625
626                 ASSERT(trans);
627                 if (trans) {
628                         if (atomic_dec_and_test(&trans->pending_ordered))
629                                 wake_up(&trans->pending_wait);
630                         btrfs_put_transaction(trans);
631                 }
632         }
633
634         spin_lock(&root->ordered_extent_lock);
635         list_del_init(&entry->root_extent_list);
636         root->nr_ordered_extents--;
637
638         trace_btrfs_ordered_extent_remove(inode, entry);
639
640         if (!root->nr_ordered_extents) {
641                 spin_lock(&root->fs_info->ordered_root_lock);
642                 BUG_ON(list_empty(&root->ordered_root));
643                 list_del_init(&root->ordered_root);
644                 spin_unlock(&root->fs_info->ordered_root_lock);
645         }
646         spin_unlock(&root->ordered_extent_lock);
647         wake_up(&entry->wait);
648 }
649
650 static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
651 {
652         struct btrfs_ordered_extent *ordered;
653
654         ordered = container_of(work, struct btrfs_ordered_extent, flush_work);
655         btrfs_start_ordered_extent(ordered->inode, ordered, 1);
656         complete(&ordered->completion);
657 }
658
659 /*
660  * wait for all the ordered extents in a root.  This is done when balancing
661  * space between drives.
662  */
663 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr)
664 {
665         struct list_head splice, works;
666         struct btrfs_ordered_extent *ordered, *next;
667         int count = 0;
668
669         INIT_LIST_HEAD(&splice);
670         INIT_LIST_HEAD(&works);
671
672         mutex_lock(&root->ordered_extent_mutex);
673         spin_lock(&root->ordered_extent_lock);
674         list_splice_init(&root->ordered_extents, &splice);
675         while (!list_empty(&splice) && nr) {
676                 ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
677                                            root_extent_list);
678                 list_move_tail(&ordered->root_extent_list,
679                                &root->ordered_extents);
680                 atomic_inc(&ordered->refs);
681                 spin_unlock(&root->ordered_extent_lock);
682
683                 btrfs_init_work(&ordered->flush_work,
684                                 btrfs_flush_delalloc_helper,
685                                 btrfs_run_ordered_extent_work, NULL, NULL);
686                 list_add_tail(&ordered->work_list, &works);
687                 btrfs_queue_work(root->fs_info->flush_workers,
688                                  &ordered->flush_work);
689
690                 cond_resched();
691                 spin_lock(&root->ordered_extent_lock);
692                 if (nr != -1)
693                         nr--;
694                 count++;
695         }
696         list_splice_tail(&splice, &root->ordered_extents);
697         spin_unlock(&root->ordered_extent_lock);
698
699         list_for_each_entry_safe(ordered, next, &works, work_list) {
700                 list_del_init(&ordered->work_list);
701                 wait_for_completion(&ordered->completion);
702                 btrfs_put_ordered_extent(ordered);
703                 cond_resched();
704         }
705         mutex_unlock(&root->ordered_extent_mutex);
706
707         return count;
708 }
709
710 void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
711 {
712         struct btrfs_root *root;
713         struct list_head splice;
714         int done;
715
716         INIT_LIST_HEAD(&splice);
717
718         mutex_lock(&fs_info->ordered_operations_mutex);
719         spin_lock(&fs_info->ordered_root_lock);
720         list_splice_init(&fs_info->ordered_roots, &splice);
721         while (!list_empty(&splice) && nr) {
722                 root = list_first_entry(&splice, struct btrfs_root,
723                                         ordered_root);
724                 root = btrfs_grab_fs_root(root);
725                 BUG_ON(!root);
726                 list_move_tail(&root->ordered_root,
727                                &fs_info->ordered_roots);
728                 spin_unlock(&fs_info->ordered_root_lock);
729
730                 done = btrfs_wait_ordered_extents(root, nr);
731                 btrfs_put_fs_root(root);
732
733                 spin_lock(&fs_info->ordered_root_lock);
734                 if (nr != -1) {
735                         nr -= done;
736                         WARN_ON(nr < 0);
737                 }
738         }
739         list_splice_tail(&splice, &fs_info->ordered_roots);
740         spin_unlock(&fs_info->ordered_root_lock);
741         mutex_unlock(&fs_info->ordered_operations_mutex);
742 }
743
744 /*
745  * Used to start IO or wait for a given ordered extent to finish.
746  *
747  * If wait is one, this effectively waits on page writeback for all the pages
748  * in the extent, and it waits on the io completion code to insert
749  * metadata into the btree corresponding to the extent
750  */
751 void btrfs_start_ordered_extent(struct inode *inode,
752                                        struct btrfs_ordered_extent *entry,
753                                        int wait)
754 {
755         u64 start = entry->file_offset;
756         u64 end = start + entry->len - 1;
757
758         trace_btrfs_ordered_extent_start(inode, entry);
759
760         /*
761          * pages in the range can be dirty, clean or writeback.  We
762          * start IO on any dirty ones so the wait doesn't stall waiting
763          * for the flusher thread to find them
764          */
765         if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
766                 filemap_fdatawrite_range(inode->i_mapping, start, end);
767         if (wait) {
768                 wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
769                                                  &entry->flags));
770         }
771 }
772
773 /*
774  * Used to wait on ordered extents across a large range of bytes.
775  */
776 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
777 {
778         int ret = 0;
779         int ret_wb = 0;
780         u64 end;
781         u64 orig_end;
782         struct btrfs_ordered_extent *ordered;
783
784         if (start + len < start) {
785                 orig_end = INT_LIMIT(loff_t);
786         } else {
787                 orig_end = start + len - 1;
788                 if (orig_end > INT_LIMIT(loff_t))
789                         orig_end = INT_LIMIT(loff_t);
790         }
791
792         /* start IO across the range first to instantiate any delalloc
793          * extents
794          */
795         ret = btrfs_fdatawrite_range(inode, start, orig_end);
796         if (ret)
797                 return ret;
798
799         /*
800          * If we have a writeback error don't return immediately. Wait first
801          * for any ordered extents that haven't completed yet. This is to make
802          * sure no one can dirty the same page ranges and call writepages()
803          * before the ordered extents complete - to avoid failures (-EEXIST)
804          * when adding the new ordered extents to the ordered tree.
805          */
806         ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
807
808         end = orig_end;
809         while (1) {
810                 ordered = btrfs_lookup_first_ordered_extent(inode, end);
811                 if (!ordered)
812                         break;
813                 if (ordered->file_offset > orig_end) {
814                         btrfs_put_ordered_extent(ordered);
815                         break;
816                 }
817                 if (ordered->file_offset + ordered->len <= start) {
818                         btrfs_put_ordered_extent(ordered);
819                         break;
820                 }
821                 btrfs_start_ordered_extent(inode, ordered, 1);
822                 end = ordered->file_offset;
823                 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
824                         ret = -EIO;
825                 btrfs_put_ordered_extent(ordered);
826                 if (ret || end == 0 || end == start)
827                         break;
828                 end--;
829         }
830         return ret_wb ? ret_wb : ret;
831 }
832
833 /*
834  * find an ordered extent corresponding to file_offset.  return NULL if
835  * nothing is found, otherwise take a reference on the extent and return it
836  */
837 struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
838                                                          u64 file_offset)
839 {
840         struct btrfs_ordered_inode_tree *tree;
841         struct rb_node *node;
842         struct btrfs_ordered_extent *entry = NULL;
843
844         tree = &BTRFS_I(inode)->ordered_tree;
845         spin_lock_irq(&tree->lock);
846         node = tree_search(tree, file_offset);
847         if (!node)
848                 goto out;
849
850         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
851         if (!offset_in_entry(entry, file_offset))
852                 entry = NULL;
853         if (entry)
854                 atomic_inc(&entry->refs);
855 out:
856         spin_unlock_irq(&tree->lock);
857         return entry;
858 }
859
860 /* Since the DIO code tries to lock a wide area we need to look for any ordered
861  * extents that exist in the range, rather than just the start of the range.
862  */
863 struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
864                                                         u64 file_offset,
865                                                         u64 len)
866 {
867         struct btrfs_ordered_inode_tree *tree;
868         struct rb_node *node;
869         struct btrfs_ordered_extent *entry = NULL;
870
871         tree = &BTRFS_I(inode)->ordered_tree;
872         spin_lock_irq(&tree->lock);
873         node = tree_search(tree, file_offset);
874         if (!node) {
875                 node = tree_search(tree, file_offset + len);
876                 if (!node)
877                         goto out;
878         }
879
880         while (1) {
881                 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
882                 if (range_overlaps(entry, file_offset, len))
883                         break;
884
885                 if (entry->file_offset >= file_offset + len) {
886                         entry = NULL;
887                         break;
888                 }
889                 entry = NULL;
890                 node = rb_next(node);
891                 if (!node)
892                         break;
893         }
894 out:
895         if (entry)
896                 atomic_inc(&entry->refs);
897         spin_unlock_irq(&tree->lock);
898         return entry;
899 }
900
901 bool btrfs_have_ordered_extents_in_range(struct inode *inode,
902                                          u64 file_offset,
903                                          u64 len)
904 {
905         struct btrfs_ordered_extent *oe;
906
907         oe = btrfs_lookup_ordered_range(inode, file_offset, len);
908         if (oe) {
909                 btrfs_put_ordered_extent(oe);
910                 return true;
911         }
912         return false;
913 }
914
915 /*
916  * lookup and return any extent before 'file_offset'.  NULL is returned
917  * if none is found
918  */
919 struct btrfs_ordered_extent *
920 btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
921 {
922         struct btrfs_ordered_inode_tree *tree;
923         struct rb_node *node;
924         struct btrfs_ordered_extent *entry = NULL;
925
926         tree = &BTRFS_I(inode)->ordered_tree;
927         spin_lock_irq(&tree->lock);
928         node = tree_search(tree, file_offset);
929         if (!node)
930                 goto out;
931
932         entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
933         atomic_inc(&entry->refs);
934 out:
935         spin_unlock_irq(&tree->lock);
936         return entry;
937 }
938
939 /*
940  * After an extent is done, call this to conditionally update the on disk
941  * i_size.  i_size is updated to cover any fully written part of the file.
942  */
943 int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
944                                 struct btrfs_ordered_extent *ordered)
945 {
946         struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
947         u64 disk_i_size;
948         u64 new_i_size;
949         u64 i_size = i_size_read(inode);
950         struct rb_node *node;
951         struct rb_node *prev = NULL;
952         struct btrfs_ordered_extent *test;
953         int ret = 1;
954
955         spin_lock_irq(&tree->lock);
956         if (ordered) {
957                 offset = entry_end(ordered);
958                 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags))
959                         offset = min(offset,
960                                      ordered->file_offset +
961                                      ordered->truncated_len);
962         } else {
963                 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
964         }
965         disk_i_size = BTRFS_I(inode)->disk_i_size;
966
967         /* truncate file */
968         if (disk_i_size > i_size) {
969                 BTRFS_I(inode)->disk_i_size = i_size;
970                 ret = 0;
971                 goto out;
972         }
973
974         /*
975          * if the disk i_size is already at the inode->i_size, or
976          * this ordered extent is inside the disk i_size, we're done
977          */
978         if (disk_i_size == i_size)
979                 goto out;
980
981         /*
982          * We still need to update disk_i_size if outstanding_isize is greater
983          * than disk_i_size.
984          */
985         if (offset <= disk_i_size &&
986             (!ordered || ordered->outstanding_isize <= disk_i_size))
987                 goto out;
988
989         /*
990          * walk backward from this ordered extent to disk_i_size.
991          * if we find an ordered extent then we can't update disk i_size
992          * yet
993          */
994         if (ordered) {
995                 node = rb_prev(&ordered->rb_node);
996         } else {
997                 prev = tree_search(tree, offset);
998                 /*
999                  * we insert file extents without involving ordered struct,
1000                  * so there should be no ordered struct cover this offset
1001                  */
1002                 if (prev) {
1003                         test = rb_entry(prev, struct btrfs_ordered_extent,
1004                                         rb_node);
1005                         BUG_ON(offset_in_entry(test, offset));
1006                 }
1007                 node = prev;
1008         }
1009         for (; node; node = rb_prev(node)) {
1010                 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
1011
1012                 /* We treat this entry as if it doesnt exist */
1013                 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
1014                         continue;
1015                 if (test->file_offset + test->len <= disk_i_size)
1016                         break;
1017                 if (test->file_offset >= i_size)
1018                         break;
1019                 if (entry_end(test) > disk_i_size) {
1020                         /*
1021                          * we don't update disk_i_size now, so record this
1022                          * undealt i_size. Or we will not know the real
1023                          * i_size.
1024                          */
1025                         if (test->outstanding_isize < offset)
1026                                 test->outstanding_isize = offset;
1027                         if (ordered &&
1028                             ordered->outstanding_isize >
1029                             test->outstanding_isize)
1030                                 test->outstanding_isize =
1031                                                 ordered->outstanding_isize;
1032                         goto out;
1033                 }
1034         }
1035         new_i_size = min_t(u64, offset, i_size);
1036
1037         /*
1038          * Some ordered extents may completed before the current one, and
1039          * we hold the real i_size in ->outstanding_isize.
1040          */
1041         if (ordered && ordered->outstanding_isize > new_i_size)
1042                 new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
1043         BTRFS_I(inode)->disk_i_size = new_i_size;
1044         ret = 0;
1045 out:
1046         /*
1047          * We need to do this because we can't remove ordered extents until
1048          * after the i_disk_size has been updated and then the inode has been
1049          * updated to reflect the change, so we need to tell anybody who finds
1050          * this ordered extent that we've already done all the real work, we
1051          * just haven't completed all the other work.
1052          */
1053         if (ordered)
1054                 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
1055         spin_unlock_irq(&tree->lock);
1056         return ret;
1057 }
1058
1059 /*
1060  * search the ordered extents for one corresponding to 'offset' and
1061  * try to find a checksum.  This is used because we allow pages to
1062  * be reclaimed before their checksum is actually put into the btree
1063  */
1064 int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
1065                            u32 *sum, int len)
1066 {
1067         struct btrfs_ordered_sum *ordered_sum;
1068         struct btrfs_ordered_extent *ordered;
1069         struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
1070         unsigned long num_sectors;
1071         unsigned long i;
1072         u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
1073         int index = 0;
1074
1075         ordered = btrfs_lookup_ordered_extent(inode, offset);
1076         if (!ordered)
1077                 return 0;
1078
1079         spin_lock_irq(&tree->lock);
1080         list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
1081                 if (disk_bytenr >= ordered_sum->bytenr &&
1082                     disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
1083                         i = (disk_bytenr - ordered_sum->bytenr) >>
1084                             inode->i_sb->s_blocksize_bits;
1085                         num_sectors = ordered_sum->len >>
1086                                       inode->i_sb->s_blocksize_bits;
1087                         num_sectors = min_t(int, len - index, num_sectors - i);
1088                         memcpy(sum + index, ordered_sum->sums + i,
1089                                num_sectors);
1090
1091                         index += (int)num_sectors;
1092                         if (index == len)
1093                                 goto out;
1094                         disk_bytenr += num_sectors * sectorsize;
1095                 }
1096         }
1097 out:
1098         spin_unlock_irq(&tree->lock);
1099         btrfs_put_ordered_extent(ordered);
1100         return index;
1101 }
1102
1103 int __init ordered_data_init(void)
1104 {
1105         btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
1106                                      sizeof(struct btrfs_ordered_extent), 0,
1107                                      SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1108                                      NULL);
1109         if (!btrfs_ordered_extent_cache)
1110                 return -ENOMEM;
1111
1112         return 0;
1113 }
1114
1115 void ordered_data_exit(void)
1116 {
1117         if (btrfs_ordered_extent_cache)
1118                 kmem_cache_destroy(btrfs_ordered_extent_cache);
1119 }