These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / fs / ocfs2 / aops.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public
17  * License along with this program; if not, write to the
18  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19  * Boston, MA 021110-1307, USA.
20  */
21
22 #include <linux/fs.h>
23 #include <linux/slab.h>
24 #include <linux/highmem.h>
25 #include <linux/pagemap.h>
26 #include <asm/byteorder.h>
27 #include <linux/swap.h>
28 #include <linux/pipe_fs_i.h>
29 #include <linux/mpage.h>
30 #include <linux/quotaops.h>
31 #include <linux/blkdev.h>
32 #include <linux/uio.h>
33
34 #include <cluster/masklog.h>
35
36 #include "ocfs2.h"
37
38 #include "alloc.h"
39 #include "aops.h"
40 #include "dlmglue.h"
41 #include "extent_map.h"
42 #include "file.h"
43 #include "inode.h"
44 #include "journal.h"
45 #include "suballoc.h"
46 #include "super.h"
47 #include "symlink.h"
48 #include "refcounttree.h"
49 #include "ocfs2_trace.h"
50
51 #include "buffer_head_io.h"
52 #include "dir.h"
53 #include "namei.h"
54 #include "sysfile.h"
55
56 static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
57                                    struct buffer_head *bh_result, int create)
58 {
59         int err = -EIO;
60         int status;
61         struct ocfs2_dinode *fe = NULL;
62         struct buffer_head *bh = NULL;
63         struct buffer_head *buffer_cache_bh = NULL;
64         struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
65         void *kaddr;
66
67         trace_ocfs2_symlink_get_block(
68                         (unsigned long long)OCFS2_I(inode)->ip_blkno,
69                         (unsigned long long)iblock, bh_result, create);
70
71         BUG_ON(ocfs2_inode_is_fast_symlink(inode));
72
73         if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) {
74                 mlog(ML_ERROR, "block offset > PATH_MAX: %llu",
75                      (unsigned long long)iblock);
76                 goto bail;
77         }
78
79         status = ocfs2_read_inode_block(inode, &bh);
80         if (status < 0) {
81                 mlog_errno(status);
82                 goto bail;
83         }
84         fe = (struct ocfs2_dinode *) bh->b_data;
85
86         if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
87                                                     le32_to_cpu(fe->i_clusters))) {
88                 err = -ENOMEM;
89                 mlog(ML_ERROR, "block offset is outside the allocated size: "
90                      "%llu\n", (unsigned long long)iblock);
91                 goto bail;
92         }
93
94         /* We don't use the page cache to create symlink data, so if
95          * need be, copy it over from the buffer cache. */
96         if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) {
97                 u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) +
98                             iblock;
99                 buffer_cache_bh = sb_getblk(osb->sb, blkno);
100                 if (!buffer_cache_bh) {
101                         err = -ENOMEM;
102                         mlog(ML_ERROR, "couldn't getblock for symlink!\n");
103                         goto bail;
104                 }
105
106                 /* we haven't locked out transactions, so a commit
107                  * could've happened. Since we've got a reference on
108                  * the bh, even if it commits while we're doing the
109                  * copy, the data is still good. */
110                 if (buffer_jbd(buffer_cache_bh)
111                     && ocfs2_inode_is_new(inode)) {
112                         kaddr = kmap_atomic(bh_result->b_page);
113                         if (!kaddr) {
114                                 mlog(ML_ERROR, "couldn't kmap!\n");
115                                 goto bail;
116                         }
117                         memcpy(kaddr + (bh_result->b_size * iblock),
118                                buffer_cache_bh->b_data,
119                                bh_result->b_size);
120                         kunmap_atomic(kaddr);
121                         set_buffer_uptodate(bh_result);
122                 }
123                 brelse(buffer_cache_bh);
124         }
125
126         map_bh(bh_result, inode->i_sb,
127                le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock);
128
129         err = 0;
130
131 bail:
132         brelse(bh);
133
134         return err;
135 }
136
137 int ocfs2_get_block(struct inode *inode, sector_t iblock,
138                     struct buffer_head *bh_result, int create)
139 {
140         int err = 0;
141         unsigned int ext_flags;
142         u64 max_blocks = bh_result->b_size >> inode->i_blkbits;
143         u64 p_blkno, count, past_eof;
144         struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
145
146         trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno,
147                               (unsigned long long)iblock, bh_result, create);
148
149         if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE)
150                 mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n",
151                      inode, inode->i_ino);
152
153         if (S_ISLNK(inode->i_mode)) {
154                 /* this always does I/O for some reason. */
155                 err = ocfs2_symlink_get_block(inode, iblock, bh_result, create);
156                 goto bail;
157         }
158
159         err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count,
160                                           &ext_flags);
161         if (err) {
162                 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
163                      "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
164                      (unsigned long long)p_blkno);
165                 goto bail;
166         }
167
168         if (max_blocks < count)
169                 count = max_blocks;
170
171         /*
172          * ocfs2 never allocates in this function - the only time we
173          * need to use BH_New is when we're extending i_size on a file
174          * system which doesn't support holes, in which case BH_New
175          * allows __block_write_begin() to zero.
176          *
177          * If we see this on a sparse file system, then a truncate has
178          * raced us and removed the cluster. In this case, we clear
179          * the buffers dirty and uptodate bits and let the buffer code
180          * ignore it as a hole.
181          */
182         if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) {
183                 clear_buffer_dirty(bh_result);
184                 clear_buffer_uptodate(bh_result);
185                 goto bail;
186         }
187
188         /* Treat the unwritten extent as a hole for zeroing purposes. */
189         if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
190                 map_bh(bh_result, inode->i_sb, p_blkno);
191
192         bh_result->b_size = count << inode->i_blkbits;
193
194         if (!ocfs2_sparse_alloc(osb)) {
195                 if (p_blkno == 0) {
196                         err = -EIO;
197                         mlog(ML_ERROR,
198                              "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
199                              (unsigned long long)iblock,
200                              (unsigned long long)p_blkno,
201                              (unsigned long long)OCFS2_I(inode)->ip_blkno);
202                         mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
203                         dump_stack();
204                         goto bail;
205                 }
206         }
207
208         past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
209
210         trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno,
211                                   (unsigned long long)past_eof);
212         if (create && (iblock >= past_eof))
213                 set_buffer_new(bh_result);
214
215 bail:
216         if (err < 0)
217                 err = -EIO;
218
219         return err;
220 }
221
222 int ocfs2_read_inline_data(struct inode *inode, struct page *page,
223                            struct buffer_head *di_bh)
224 {
225         void *kaddr;
226         loff_t size;
227         struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
228
229         if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) {
230                 ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n",
231                             (unsigned long long)OCFS2_I(inode)->ip_blkno);
232                 return -EROFS;
233         }
234
235         size = i_size_read(inode);
236
237         if (size > PAGE_CACHE_SIZE ||
238             size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
239                 ocfs2_error(inode->i_sb,
240                             "Inode %llu has with inline data has bad size: %Lu\n",
241                             (unsigned long long)OCFS2_I(inode)->ip_blkno,
242                             (unsigned long long)size);
243                 return -EROFS;
244         }
245
246         kaddr = kmap_atomic(page);
247         if (size)
248                 memcpy(kaddr, di->id2.i_data.id_data, size);
249         /* Clear the remaining part of the page */
250         memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
251         flush_dcache_page(page);
252         kunmap_atomic(kaddr);
253
254         SetPageUptodate(page);
255
256         return 0;
257 }
258
259 static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
260 {
261         int ret;
262         struct buffer_head *di_bh = NULL;
263
264         BUG_ON(!PageLocked(page));
265         BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
266
267         ret = ocfs2_read_inode_block(inode, &di_bh);
268         if (ret) {
269                 mlog_errno(ret);
270                 goto out;
271         }
272
273         ret = ocfs2_read_inline_data(inode, page, di_bh);
274 out:
275         unlock_page(page);
276
277         brelse(di_bh);
278         return ret;
279 }
280
281 static int ocfs2_readpage(struct file *file, struct page *page)
282 {
283         struct inode *inode = page->mapping->host;
284         struct ocfs2_inode_info *oi = OCFS2_I(inode);
285         loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT;
286         int ret, unlock = 1;
287
288         trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
289                              (page ? page->index : 0));
290
291         ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
292         if (ret != 0) {
293                 if (ret == AOP_TRUNCATED_PAGE)
294                         unlock = 0;
295                 mlog_errno(ret);
296                 goto out;
297         }
298
299         if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
300                 /*
301                  * Unlock the page and cycle ip_alloc_sem so that we don't
302                  * busyloop waiting for ip_alloc_sem to unlock
303                  */
304                 ret = AOP_TRUNCATED_PAGE;
305                 unlock_page(page);
306                 unlock = 0;
307                 down_read(&oi->ip_alloc_sem);
308                 up_read(&oi->ip_alloc_sem);
309                 goto out_inode_unlock;
310         }
311
312         /*
313          * i_size might have just been updated as we grabed the meta lock.  We
314          * might now be discovering a truncate that hit on another node.
315          * block_read_full_page->get_block freaks out if it is asked to read
316          * beyond the end of a file, so we check here.  Callers
317          * (generic_file_read, vm_ops->fault) are clever enough to check i_size
318          * and notice that the page they just read isn't needed.
319          *
320          * XXX sys_readahead() seems to get that wrong?
321          */
322         if (start >= i_size_read(inode)) {
323                 zero_user(page, 0, PAGE_SIZE);
324                 SetPageUptodate(page);
325                 ret = 0;
326                 goto out_alloc;
327         }
328
329         if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
330                 ret = ocfs2_readpage_inline(inode, page);
331         else
332                 ret = block_read_full_page(page, ocfs2_get_block);
333         unlock = 0;
334
335 out_alloc:
336         up_read(&OCFS2_I(inode)->ip_alloc_sem);
337 out_inode_unlock:
338         ocfs2_inode_unlock(inode, 0);
339 out:
340         if (unlock)
341                 unlock_page(page);
342         return ret;
343 }
344
345 /*
346  * This is used only for read-ahead. Failures or difficult to handle
347  * situations are safe to ignore.
348  *
349  * Right now, we don't bother with BH_Boundary - in-inode extent lists
350  * are quite large (243 extents on 4k blocks), so most inodes don't
351  * grow out to a tree. If need be, detecting boundary extents could
352  * trivially be added in a future version of ocfs2_get_block().
353  */
354 static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
355                            struct list_head *pages, unsigned nr_pages)
356 {
357         int ret, err = -EIO;
358         struct inode *inode = mapping->host;
359         struct ocfs2_inode_info *oi = OCFS2_I(inode);
360         loff_t start;
361         struct page *last;
362
363         /*
364          * Use the nonblocking flag for the dlm code to avoid page
365          * lock inversion, but don't bother with retrying.
366          */
367         ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
368         if (ret)
369                 return err;
370
371         if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
372                 ocfs2_inode_unlock(inode, 0);
373                 return err;
374         }
375
376         /*
377          * Don't bother with inline-data. There isn't anything
378          * to read-ahead in that case anyway...
379          */
380         if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
381                 goto out_unlock;
382
383         /*
384          * Check whether a remote node truncated this file - we just
385          * drop out in that case as it's not worth handling here.
386          */
387         last = list_entry(pages->prev, struct page, lru);
388         start = (loff_t)last->index << PAGE_CACHE_SHIFT;
389         if (start >= i_size_read(inode))
390                 goto out_unlock;
391
392         err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);
393
394 out_unlock:
395         up_read(&oi->ip_alloc_sem);
396         ocfs2_inode_unlock(inode, 0);
397
398         return err;
399 }
400
401 /* Note: Because we don't support holes, our allocation has
402  * already happened (allocation writes zeros to the file data)
403  * so we don't have to worry about ordered writes in
404  * ocfs2_writepage.
405  *
406  * ->writepage is called during the process of invalidating the page cache
407  * during blocked lock processing.  It can't block on any cluster locks
408  * to during block mapping.  It's relying on the fact that the block
409  * mapping can't have disappeared under the dirty pages that it is
410  * being asked to write back.
411  */
412 static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
413 {
414         trace_ocfs2_writepage(
415                 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno,
416                 page->index);
417
418         return block_write_full_page(page, ocfs2_get_block, wbc);
419 }
420
421 /* Taken from ext3. We don't necessarily need the full blown
422  * functionality yet, but IMHO it's better to cut and paste the whole
423  * thing so we can avoid introducing our own bugs (and easily pick up
424  * their fixes when they happen) --Mark */
425 int walk_page_buffers(  handle_t *handle,
426                         struct buffer_head *head,
427                         unsigned from,
428                         unsigned to,
429                         int *partial,
430                         int (*fn)(      handle_t *handle,
431                                         struct buffer_head *bh))
432 {
433         struct buffer_head *bh;
434         unsigned block_start, block_end;
435         unsigned blocksize = head->b_size;
436         int err, ret = 0;
437         struct buffer_head *next;
438
439         for (   bh = head, block_start = 0;
440                 ret == 0 && (bh != head || !block_start);
441                 block_start = block_end, bh = next)
442         {
443                 next = bh->b_this_page;
444                 block_end = block_start + blocksize;
445                 if (block_end <= from || block_start >= to) {
446                         if (partial && !buffer_uptodate(bh))
447                                 *partial = 1;
448                         continue;
449                 }
450                 err = (*fn)(handle, bh);
451                 if (!ret)
452                         ret = err;
453         }
454         return ret;
455 }
456
457 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
458 {
459         sector_t status;
460         u64 p_blkno = 0;
461         int err = 0;
462         struct inode *inode = mapping->host;
463
464         trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno,
465                          (unsigned long long)block);
466
467         /* We don't need to lock journal system files, since they aren't
468          * accessed concurrently from multiple nodes.
469          */
470         if (!INODE_JOURNAL(inode)) {
471                 err = ocfs2_inode_lock(inode, NULL, 0);
472                 if (err) {
473                         if (err != -ENOENT)
474                                 mlog_errno(err);
475                         goto bail;
476                 }
477                 down_read(&OCFS2_I(inode)->ip_alloc_sem);
478         }
479
480         if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
481                 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
482                                                   NULL);
483
484         if (!INODE_JOURNAL(inode)) {
485                 up_read(&OCFS2_I(inode)->ip_alloc_sem);
486                 ocfs2_inode_unlock(inode, 0);
487         }
488
489         if (err) {
490                 mlog(ML_ERROR, "get_blocks() failed, block = %llu\n",
491                      (unsigned long long)block);
492                 mlog_errno(err);
493                 goto bail;
494         }
495
496 bail:
497         status = err ? 0 : p_blkno;
498
499         return status;
500 }
501
502 /*
503  * TODO: Make this into a generic get_blocks function.
504  *
505  * From do_direct_io in direct-io.c:
506  *  "So what we do is to permit the ->get_blocks function to populate
507  *   bh.b_size with the size of IO which is permitted at this offset and
508  *   this i_blkbits."
509  *
510  * This function is called directly from get_more_blocks in direct-io.c.
511  *
512  * called like this: dio->get_blocks(dio->inode, fs_startblk,
513  *                                      fs_count, map_bh, dio->rw == WRITE);
514  */
515 static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
516                                      struct buffer_head *bh_result, int create)
517 {
518         int ret;
519         u32 cpos = 0;
520         int alloc_locked = 0;
521         u64 p_blkno, inode_blocks, contig_blocks;
522         unsigned int ext_flags;
523         unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
524         unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
525         unsigned long len = bh_result->b_size;
526         unsigned int clusters_to_alloc = 0, contig_clusters = 0;
527
528         cpos = ocfs2_blocks_to_clusters(inode->i_sb, iblock);
529
530         /* This function won't even be called if the request isn't all
531          * nicely aligned and of the right size, so there's no need
532          * for us to check any of that. */
533
534         inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
535
536         down_read(&OCFS2_I(inode)->ip_alloc_sem);
537
538         /* This figures out the size of the next contiguous block, and
539          * our logical offset */
540         ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
541                                           &contig_blocks, &ext_flags);
542         up_read(&OCFS2_I(inode)->ip_alloc_sem);
543
544         if (ret) {
545                 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
546                      (unsigned long long)iblock);
547                 ret = -EIO;
548                 goto bail;
549         }
550
551         /* We should already CoW the refcounted extent in case of create. */
552         BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED));
553
554         /* allocate blocks if no p_blkno is found, and create == 1 */
555         if (!p_blkno && create) {
556                 ret = ocfs2_inode_lock(inode, NULL, 1);
557                 if (ret < 0) {
558                         mlog_errno(ret);
559                         goto bail;
560                 }
561
562                 alloc_locked = 1;
563
564                 down_write(&OCFS2_I(inode)->ip_alloc_sem);
565
566                 /* fill hole, allocate blocks can't be larger than the size
567                  * of the hole */
568                 clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len);
569                 contig_clusters = ocfs2_clusters_for_blocks(inode->i_sb,
570                                 contig_blocks);
571                 if (clusters_to_alloc > contig_clusters)
572                         clusters_to_alloc = contig_clusters;
573
574                 /* allocate extent and insert them into the extent tree */
575                 ret = ocfs2_extend_allocation(inode, cpos,
576                                 clusters_to_alloc, 0);
577                 if (ret < 0) {
578                         up_write(&OCFS2_I(inode)->ip_alloc_sem);
579                         mlog_errno(ret);
580                         goto bail;
581                 }
582
583                 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
584                                 &contig_blocks, &ext_flags);
585                 if (ret < 0) {
586                         up_write(&OCFS2_I(inode)->ip_alloc_sem);
587                         mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
588                                         (unsigned long long)iblock);
589                         ret = -EIO;
590                         goto bail;
591                 }
592                 set_buffer_new(bh_result);
593                 up_write(&OCFS2_I(inode)->ip_alloc_sem);
594         }
595
596         /*
597          * get_more_blocks() expects us to describe a hole by clearing
598          * the mapped bit on bh_result().
599          *
600          * Consider an unwritten extent as a hole.
601          */
602         if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
603                 map_bh(bh_result, inode->i_sb, p_blkno);
604         else
605                 clear_buffer_mapped(bh_result);
606
607         /* make sure we don't map more than max_blocks blocks here as
608            that's all the kernel will handle at this point. */
609         if (max_blocks < contig_blocks)
610                 contig_blocks = max_blocks;
611         bh_result->b_size = contig_blocks << blocksize_bits;
612 bail:
613         if (alloc_locked)
614                 ocfs2_inode_unlock(inode, 1);
615         return ret;
616 }
617
618 /*
619  * ocfs2_dio_end_io is called by the dio core when a dio is finished.  We're
620  * particularly interested in the aio/dio case.  We use the rw_lock DLM lock
621  * to protect io on one node from truncation on another.
622  */
623 static void ocfs2_dio_end_io(struct kiocb *iocb,
624                              loff_t offset,
625                              ssize_t bytes,
626                              void *private)
627 {
628         struct inode *inode = file_inode(iocb->ki_filp);
629         int level;
630
631         /* this io's submitter should not have unlocked this before we could */
632         BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
633
634         if (ocfs2_iocb_is_unaligned_aio(iocb)) {
635                 ocfs2_iocb_clear_unaligned_aio(iocb);
636
637                 mutex_unlock(&OCFS2_I(inode)->ip_unaligned_aio);
638         }
639
640         /* Let rw unlock to be done later to protect append direct io write */
641         if (offset + bytes <= i_size_read(inode)) {
642                 ocfs2_iocb_clear_rw_locked(iocb);
643
644                 level = ocfs2_iocb_rw_locked_level(iocb);
645                 ocfs2_rw_unlock(inode, level);
646         }
647 }
648
649 static int ocfs2_releasepage(struct page *page, gfp_t wait)
650 {
651         if (!page_has_buffers(page))
652                 return 0;
653         return try_to_free_buffers(page);
654 }
655
656 static int ocfs2_is_overwrite(struct ocfs2_super *osb,
657                 struct inode *inode, loff_t offset)
658 {
659         int ret = 0;
660         u32 v_cpos = 0;
661         u32 p_cpos = 0;
662         unsigned int num_clusters = 0;
663         unsigned int ext_flags = 0;
664
665         v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
666         ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
667                         &num_clusters, &ext_flags);
668         if (ret < 0) {
669                 mlog_errno(ret);
670                 return ret;
671         }
672
673         if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN))
674                 return 1;
675
676         return 0;
677 }
678
679 static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
680                 struct inode *inode, loff_t offset,
681                 u64 zero_len, int cluster_align)
682 {
683         u32 p_cpos = 0;
684         u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode));
685         unsigned int num_clusters = 0;
686         unsigned int ext_flags = 0;
687         int ret = 0;
688
689         if (offset <= i_size_read(inode) || cluster_align)
690                 return 0;
691
692         ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters,
693                         &ext_flags);
694         if (ret < 0) {
695                 mlog_errno(ret);
696                 return ret;
697         }
698
699         if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
700                 u64 s = i_size_read(inode);
701                 sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
702                         (do_div(s, osb->s_clustersize) >> 9);
703
704                 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
705                                 zero_len >> 9, GFP_NOFS, false);
706                 if (ret < 0)
707                         mlog_errno(ret);
708         }
709
710         return ret;
711 }
712
713 static int ocfs2_direct_IO_extend_no_holes(struct ocfs2_super *osb,
714                 struct inode *inode, loff_t offset)
715 {
716         u64 zero_start, zero_len, total_zero_len;
717         u32 p_cpos = 0, clusters_to_add;
718         u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, i_size_read(inode));
719         unsigned int num_clusters = 0;
720         unsigned int ext_flags = 0;
721         u32 size_div, offset_div;
722         int ret = 0;
723
724         {
725                 u64 o = offset;
726                 u64 s = i_size_read(inode);
727
728                 offset_div = do_div(o, osb->s_clustersize);
729                 size_div = do_div(s, osb->s_clustersize);
730         }
731
732         if (offset <= i_size_read(inode))
733                 return 0;
734
735         clusters_to_add = ocfs2_bytes_to_clusters(inode->i_sb, offset) -
736                 ocfs2_bytes_to_clusters(inode->i_sb, i_size_read(inode));
737         total_zero_len = offset - i_size_read(inode);
738         if (clusters_to_add)
739                 total_zero_len -= offset_div;
740
741         /* Allocate clusters to fill out holes, and this is only needed
742          * when we add more than one clusters. Otherwise the cluster will
743          * be allocated during direct IO */
744         if (clusters_to_add > 1) {
745                 ret = ocfs2_extend_allocation(inode,
746                                 OCFS2_I(inode)->ip_clusters,
747                                 clusters_to_add - 1, 0);
748                 if (ret) {
749                         mlog_errno(ret);
750                         goto out;
751                 }
752         }
753
754         while (total_zero_len) {
755                 ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, &num_clusters,
756                                 &ext_flags);
757                 if (ret < 0) {
758                         mlog_errno(ret);
759                         goto out;
760                 }
761
762                 zero_start = ocfs2_clusters_to_bytes(osb->sb, p_cpos) +
763                         size_div;
764                 zero_len = ocfs2_clusters_to_bytes(osb->sb, num_clusters) -
765                         size_div;
766                 zero_len = min(total_zero_len, zero_len);
767
768                 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
769                         ret = blkdev_issue_zeroout(osb->sb->s_bdev,
770                                         zero_start >> 9, zero_len >> 9,
771                                         GFP_NOFS, false);
772                         if (ret < 0) {
773                                 mlog_errno(ret);
774                                 goto out;
775                         }
776                 }
777
778                 total_zero_len -= zero_len;
779                 v_cpos += ocfs2_bytes_to_clusters(osb->sb, zero_len + size_div);
780
781                 /* Only at first iteration can be cluster not aligned.
782                  * So set size_div to 0 for the rest */
783                 size_div = 0;
784         }
785
786 out:
787         return ret;
788 }
789
790 static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
791                 struct iov_iter *iter,
792                 loff_t offset)
793 {
794         ssize_t ret = 0;
795         ssize_t written = 0;
796         bool orphaned = false;
797         int is_overwrite = 0;
798         struct file *file = iocb->ki_filp;
799         struct inode *inode = file_inode(file)->i_mapping->host;
800         struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
801         struct buffer_head *di_bh = NULL;
802         size_t count = iter->count;
803         journal_t *journal = osb->journal->j_journal;
804         u64 zero_len_head, zero_len_tail;
805         int cluster_align_head, cluster_align_tail;
806         loff_t final_size = offset + count;
807         int append_write = offset >= i_size_read(inode) ? 1 : 0;
808         unsigned int num_clusters = 0;
809         unsigned int ext_flags = 0;
810
811         {
812                 u64 o = offset;
813                 u64 s = i_size_read(inode);
814
815                 zero_len_head = do_div(o, 1 << osb->s_clustersize_bits);
816                 cluster_align_head = !zero_len_head;
817
818                 zero_len_tail = osb->s_clustersize -
819                         do_div(s, osb->s_clustersize);
820                 if ((offset - i_size_read(inode)) < zero_len_tail)
821                         zero_len_tail = offset - i_size_read(inode);
822                 cluster_align_tail = !zero_len_tail;
823         }
824
825         /*
826          * when final_size > inode->i_size, inode->i_size will be
827          * updated after direct write, so add the inode to orphan
828          * dir first.
829          */
830         if (final_size > i_size_read(inode)) {
831                 ret = ocfs2_add_inode_to_orphan(osb, inode);
832                 if (ret < 0) {
833                         mlog_errno(ret);
834                         goto out;
835                 }
836                 orphaned = true;
837         }
838
839         if (append_write) {
840                 ret = ocfs2_inode_lock(inode, NULL, 1);
841                 if (ret < 0) {
842                         mlog_errno(ret);
843                         goto clean_orphan;
844                 }
845
846                 /* zeroing out the previously allocated cluster tail
847                  * that but not zeroed */
848                 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
849                         down_read(&OCFS2_I(inode)->ip_alloc_sem);
850                         ret = ocfs2_direct_IO_zero_extend(osb, inode, offset,
851                                         zero_len_tail, cluster_align_tail);
852                         up_read(&OCFS2_I(inode)->ip_alloc_sem);
853                 } else {
854                         down_write(&OCFS2_I(inode)->ip_alloc_sem);
855                         ret = ocfs2_direct_IO_extend_no_holes(osb, inode,
856                                         offset);
857                         up_write(&OCFS2_I(inode)->ip_alloc_sem);
858                 }
859                 if (ret < 0) {
860                         mlog_errno(ret);
861                         ocfs2_inode_unlock(inode, 1);
862                         goto clean_orphan;
863                 }
864
865                 is_overwrite = ocfs2_is_overwrite(osb, inode, offset);
866                 if (is_overwrite < 0) {
867                         mlog_errno(is_overwrite);
868                         ret = is_overwrite;
869                         ocfs2_inode_unlock(inode, 1);
870                         goto clean_orphan;
871                 }
872
873                 ocfs2_inode_unlock(inode, 1);
874         }
875
876         written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
877                                        offset, ocfs2_direct_IO_get_blocks,
878                                        ocfs2_dio_end_io, NULL, 0);
879         /* overwrite aio may return -EIOCBQUEUED, and it is not an error */
880         if ((written < 0) && (written != -EIOCBQUEUED)) {
881                 loff_t i_size = i_size_read(inode);
882
883                 if (offset + count > i_size) {
884                         ret = ocfs2_inode_lock(inode, &di_bh, 1);
885                         if (ret < 0) {
886                                 mlog_errno(ret);
887                                 goto clean_orphan;
888                         }
889
890                         if (i_size == i_size_read(inode)) {
891                                 ret = ocfs2_truncate_file(inode, di_bh,
892                                                 i_size);
893                                 if (ret < 0) {
894                                         if (ret != -ENOSPC)
895                                                 mlog_errno(ret);
896
897                                         ocfs2_inode_unlock(inode, 1);
898                                         brelse(di_bh);
899                                         di_bh = NULL;
900                                         goto clean_orphan;
901                                 }
902                         }
903
904                         ocfs2_inode_unlock(inode, 1);
905                         brelse(di_bh);
906                         di_bh = NULL;
907
908                         ret = jbd2_journal_force_commit(journal);
909                         if (ret < 0)
910                                 mlog_errno(ret);
911                 }
912         } else if (written > 0 && append_write && !is_overwrite &&
913                         !cluster_align_head) {
914                 /* zeroing out the allocated cluster head */
915                 u32 p_cpos = 0;
916                 u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset);
917
918                 ret = ocfs2_inode_lock(inode, NULL, 0);
919                 if (ret < 0) {
920                         mlog_errno(ret);
921                         goto clean_orphan;
922                 }
923
924                 ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos,
925                                 &num_clusters, &ext_flags);
926                 if (ret < 0) {
927                         mlog_errno(ret);
928                         ocfs2_inode_unlock(inode, 0);
929                         goto clean_orphan;
930                 }
931
932                 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
933
934                 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
935                                 (u64)p_cpos << (osb->s_clustersize_bits - 9),
936                                 zero_len_head >> 9, GFP_NOFS, false);
937                 if (ret < 0)
938                         mlog_errno(ret);
939
940                 ocfs2_inode_unlock(inode, 0);
941         }
942
943 clean_orphan:
944         if (orphaned) {
945                 int tmp_ret;
946                 int update_isize = written > 0 ? 1 : 0;
947                 loff_t end = update_isize ? offset + written : 0;
948
949                 tmp_ret = ocfs2_inode_lock(inode, &di_bh, 1);
950                 if (tmp_ret < 0) {
951                         ret = tmp_ret;
952                         mlog_errno(ret);
953                         goto out;
954                 }
955
956                 tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
957                                 update_isize, end);
958                 if (tmp_ret < 0) {
959                         ocfs2_inode_unlock(inode, 1);
960                         ret = tmp_ret;
961                         mlog_errno(ret);
962                         brelse(di_bh);
963                         goto out;
964                 }
965
966                 ocfs2_inode_unlock(inode, 1);
967                 brelse(di_bh);
968
969                 tmp_ret = jbd2_journal_force_commit(journal);
970                 if (tmp_ret < 0) {
971                         ret = tmp_ret;
972                         mlog_errno(tmp_ret);
973                 }
974         }
975
976 out:
977         if (ret >= 0)
978                 ret = written;
979         return ret;
980 }
981
982 static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
983                                loff_t offset)
984 {
985         struct file *file = iocb->ki_filp;
986         struct inode *inode = file_inode(file)->i_mapping->host;
987         struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
988         int full_coherency = !(osb->s_mount_opt &
989                         OCFS2_MOUNT_COHERENCY_BUFFERED);
990
991         /*
992          * Fallback to buffered I/O if we see an inode without
993          * extents.
994          */
995         if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
996                 return 0;
997
998         /* Fallback to buffered I/O if we are appending and
999          * concurrent O_DIRECT writes are allowed.
1000          */
1001         if (i_size_read(inode) <= offset && !full_coherency)
1002                 return 0;
1003
1004         if (iov_iter_rw(iter) == READ)
1005                 return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
1006                                             iter, offset,
1007                                             ocfs2_direct_IO_get_blocks,
1008                                             ocfs2_dio_end_io, NULL, 0);
1009         else
1010                 return ocfs2_direct_IO_write(iocb, iter, offset);
1011 }
1012
1013 static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
1014                                             u32 cpos,
1015                                             unsigned int *start,
1016                                             unsigned int *end)
1017 {
1018         unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
1019
1020         if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
1021                 unsigned int cpp;
1022
1023                 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
1024
1025                 cluster_start = cpos % cpp;
1026                 cluster_start = cluster_start << osb->s_clustersize_bits;
1027
1028                 cluster_end = cluster_start + osb->s_clustersize;
1029         }
1030
1031         BUG_ON(cluster_start > PAGE_SIZE);
1032         BUG_ON(cluster_end > PAGE_SIZE);
1033
1034         if (start)
1035                 *start = cluster_start;
1036         if (end)
1037                 *end = cluster_end;
1038 }
1039
1040 /*
1041  * 'from' and 'to' are the region in the page to avoid zeroing.
1042  *
1043  * If pagesize > clustersize, this function will avoid zeroing outside
1044  * of the cluster boundary.
1045  *
1046  * from == to == 0 is code for "zero the entire cluster region"
1047  */
1048 static void ocfs2_clear_page_regions(struct page *page,
1049                                      struct ocfs2_super *osb, u32 cpos,
1050                                      unsigned from, unsigned to)
1051 {
1052         void *kaddr;
1053         unsigned int cluster_start, cluster_end;
1054
1055         ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
1056
1057         kaddr = kmap_atomic(page);
1058
1059         if (from || to) {
1060                 if (from > cluster_start)
1061                         memset(kaddr + cluster_start, 0, from - cluster_start);
1062                 if (to < cluster_end)
1063                         memset(kaddr + to, 0, cluster_end - to);
1064         } else {
1065                 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
1066         }
1067
1068         kunmap_atomic(kaddr);
1069 }
1070
1071 /*
1072  * Nonsparse file systems fully allocate before we get to the write
1073  * code. This prevents ocfs2_write() from tagging the write as an
1074  * allocating one, which means ocfs2_map_page_blocks() might try to
1075  * read-in the blocks at the tail of our file. Avoid reading them by
1076  * testing i_size against each block offset.
1077  */
1078 static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
1079                                  unsigned int block_start)
1080 {
1081         u64 offset = page_offset(page) + block_start;
1082
1083         if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1084                 return 1;
1085
1086         if (i_size_read(inode) > offset)
1087                 return 1;
1088
1089         return 0;
1090 }
1091
1092 /*
1093  * Some of this taken from __block_write_begin(). We already have our
1094  * mapping by now though, and the entire write will be allocating or
1095  * it won't, so not much need to use BH_New.
1096  *
1097  * This will also skip zeroing, which is handled externally.
1098  */
1099 int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
1100                           struct inode *inode, unsigned int from,
1101                           unsigned int to, int new)
1102 {
1103         int ret = 0;
1104         struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
1105         unsigned int block_end, block_start;
1106         unsigned int bsize = 1 << inode->i_blkbits;
1107
1108         if (!page_has_buffers(page))
1109                 create_empty_buffers(page, bsize, 0);
1110
1111         head = page_buffers(page);
1112         for (bh = head, block_start = 0; bh != head || !block_start;
1113              bh = bh->b_this_page, block_start += bsize) {
1114                 block_end = block_start + bsize;
1115
1116                 clear_buffer_new(bh);
1117
1118                 /*
1119                  * Ignore blocks outside of our i/o range -
1120                  * they may belong to unallocated clusters.
1121                  */
1122                 if (block_start >= to || block_end <= from) {
1123                         if (PageUptodate(page))
1124                                 set_buffer_uptodate(bh);
1125                         continue;
1126                 }
1127
1128                 /*
1129                  * For an allocating write with cluster size >= page
1130                  * size, we always write the entire page.
1131                  */
1132                 if (new)
1133                         set_buffer_new(bh);
1134
1135                 if (!buffer_mapped(bh)) {
1136                         map_bh(bh, inode->i_sb, *p_blkno);
1137                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
1138                 }
1139
1140                 if (PageUptodate(page)) {
1141                         if (!buffer_uptodate(bh))
1142                                 set_buffer_uptodate(bh);
1143                 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1144                            !buffer_new(bh) &&
1145                            ocfs2_should_read_blk(inode, page, block_start) &&
1146                            (block_start < from || block_end > to)) {
1147                         ll_rw_block(READ, 1, &bh);
1148                         *wait_bh++=bh;
1149                 }
1150
1151                 *p_blkno = *p_blkno + 1;
1152         }
1153
1154         /*
1155          * If we issued read requests - let them complete.
1156          */
1157         while(wait_bh > wait) {
1158                 wait_on_buffer(*--wait_bh);
1159                 if (!buffer_uptodate(*wait_bh))
1160                         ret = -EIO;
1161         }
1162
1163         if (ret == 0 || !new)
1164                 return ret;
1165
1166         /*
1167          * If we get -EIO above, zero out any newly allocated blocks
1168          * to avoid exposing stale data.
1169          */
1170         bh = head;
1171         block_start = 0;
1172         do {
1173                 block_end = block_start + bsize;
1174                 if (block_end <= from)
1175                         goto next_bh;
1176                 if (block_start >= to)
1177                         break;
1178
1179                 zero_user(page, block_start, bh->b_size);
1180                 set_buffer_uptodate(bh);
1181                 mark_buffer_dirty(bh);
1182
1183 next_bh:
1184                 block_start = block_end;
1185                 bh = bh->b_this_page;
1186         } while (bh != head);
1187
1188         return ret;
1189 }
1190
1191 #if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE)
1192 #define OCFS2_MAX_CTXT_PAGES    1
1193 #else
1194 #define OCFS2_MAX_CTXT_PAGES    (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE)
1195 #endif
1196
1197 #define OCFS2_MAX_CLUSTERS_PER_PAGE     (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE)
1198
1199 /*
1200  * Describe the state of a single cluster to be written to.
1201  */
1202 struct ocfs2_write_cluster_desc {
1203         u32             c_cpos;
1204         u32             c_phys;
1205         /*
1206          * Give this a unique field because c_phys eventually gets
1207          * filled.
1208          */
1209         unsigned        c_new;
1210         unsigned        c_unwritten;
1211         unsigned        c_needs_zero;
1212 };
1213
1214 struct ocfs2_write_ctxt {
1215         /* Logical cluster position / len of write */
1216         u32                             w_cpos;
1217         u32                             w_clen;
1218
1219         /* First cluster allocated in a nonsparse extend */
1220         u32                             w_first_new_cpos;
1221
1222         struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
1223
1224         /*
1225          * This is true if page_size > cluster_size.
1226          *
1227          * It triggers a set of special cases during write which might
1228          * have to deal with allocating writes to partial pages.
1229          */
1230         unsigned int                    w_large_pages;
1231
1232         /*
1233          * Pages involved in this write.
1234          *
1235          * w_target_page is the page being written to by the user.
1236          *
1237          * w_pages is an array of pages which always contains
1238          * w_target_page, and in the case of an allocating write with
1239          * page_size < cluster size, it will contain zero'd and mapped
1240          * pages adjacent to w_target_page which need to be written
1241          * out in so that future reads from that region will get
1242          * zero's.
1243          */
1244         unsigned int                    w_num_pages;
1245         struct page                     *w_pages[OCFS2_MAX_CTXT_PAGES];
1246         struct page                     *w_target_page;
1247
1248         /*
1249          * w_target_locked is used for page_mkwrite path indicating no unlocking
1250          * against w_target_page in ocfs2_write_end_nolock.
1251          */
1252         unsigned int                    w_target_locked:1;
1253
1254         /*
1255          * ocfs2_write_end() uses this to know what the real range to
1256          * write in the target should be.
1257          */
1258         unsigned int                    w_target_from;
1259         unsigned int                    w_target_to;
1260
1261         /*
1262          * We could use journal_current_handle() but this is cleaner,
1263          * IMHO -Mark
1264          */
1265         handle_t                        *w_handle;
1266
1267         struct buffer_head              *w_di_bh;
1268
1269         struct ocfs2_cached_dealloc_ctxt w_dealloc;
1270 };
1271
1272 void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
1273 {
1274         int i;
1275
1276         for(i = 0; i < num_pages; i++) {
1277                 if (pages[i]) {
1278                         unlock_page(pages[i]);
1279                         mark_page_accessed(pages[i]);
1280                         page_cache_release(pages[i]);
1281                 }
1282         }
1283 }
1284
1285 static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
1286 {
1287         int i;
1288
1289         /*
1290          * w_target_locked is only set to true in the page_mkwrite() case.
1291          * The intent is to allow us to lock the target page from write_begin()
1292          * to write_end(). The caller must hold a ref on w_target_page.
1293          */
1294         if (wc->w_target_locked) {
1295                 BUG_ON(!wc->w_target_page);
1296                 for (i = 0; i < wc->w_num_pages; i++) {
1297                         if (wc->w_target_page == wc->w_pages[i]) {
1298                                 wc->w_pages[i] = NULL;
1299                                 break;
1300                         }
1301                 }
1302                 mark_page_accessed(wc->w_target_page);
1303                 page_cache_release(wc->w_target_page);
1304         }
1305         ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
1306 }
1307
1308 static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
1309 {
1310         ocfs2_unlock_pages(wc);
1311         brelse(wc->w_di_bh);
1312         kfree(wc);
1313 }
1314
1315 static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
1316                                   struct ocfs2_super *osb, loff_t pos,
1317                                   unsigned len, struct buffer_head *di_bh)
1318 {
1319         u32 cend;
1320         struct ocfs2_write_ctxt *wc;
1321
1322         wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS);
1323         if (!wc)
1324                 return -ENOMEM;
1325
1326         wc->w_cpos = pos >> osb->s_clustersize_bits;
1327         wc->w_first_new_cpos = UINT_MAX;
1328         cend = (pos + len - 1) >> osb->s_clustersize_bits;
1329         wc->w_clen = cend - wc->w_cpos + 1;
1330         get_bh(di_bh);
1331         wc->w_di_bh = di_bh;
1332
1333         if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
1334                 wc->w_large_pages = 1;
1335         else
1336                 wc->w_large_pages = 0;
1337
1338         ocfs2_init_dealloc_ctxt(&wc->w_dealloc);
1339
1340         *wcp = wc;
1341
1342         return 0;
1343 }
1344
1345 /*
1346  * If a page has any new buffers, zero them out here, and mark them uptodate
1347  * and dirty so they'll be written out (in order to prevent uninitialised
1348  * block data from leaking). And clear the new bit.
1349  */
1350 static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1351 {
1352         unsigned int block_start, block_end;
1353         struct buffer_head *head, *bh;
1354
1355         BUG_ON(!PageLocked(page));
1356         if (!page_has_buffers(page))
1357                 return;
1358
1359         bh = head = page_buffers(page);
1360         block_start = 0;
1361         do {
1362                 block_end = block_start + bh->b_size;
1363
1364                 if (buffer_new(bh)) {
1365                         if (block_end > from && block_start < to) {
1366                                 if (!PageUptodate(page)) {
1367                                         unsigned start, end;
1368
1369                                         start = max(from, block_start);
1370                                         end = min(to, block_end);
1371
1372                                         zero_user_segment(page, start, end);
1373                                         set_buffer_uptodate(bh);
1374                                 }
1375
1376                                 clear_buffer_new(bh);
1377                                 mark_buffer_dirty(bh);
1378                         }
1379                 }
1380
1381                 block_start = block_end;
1382                 bh = bh->b_this_page;
1383         } while (bh != head);
1384 }
1385
1386 /*
1387  * Only called when we have a failure during allocating write to write
1388  * zero's to the newly allocated region.
1389  */
1390 static void ocfs2_write_failure(struct inode *inode,
1391                                 struct ocfs2_write_ctxt *wc,
1392                                 loff_t user_pos, unsigned user_len)
1393 {
1394         int i;
1395         unsigned from = user_pos & (PAGE_CACHE_SIZE - 1),
1396                 to = user_pos + user_len;
1397         struct page *tmppage;
1398
1399         ocfs2_zero_new_buffers(wc->w_target_page, from, to);
1400
1401         for(i = 0; i < wc->w_num_pages; i++) {
1402                 tmppage = wc->w_pages[i];
1403
1404                 if (page_has_buffers(tmppage)) {
1405                         if (ocfs2_should_order_data(inode))
1406                                 ocfs2_jbd2_file_inode(wc->w_handle, inode);
1407
1408                         block_commit_write(tmppage, from, to);
1409                 }
1410         }
1411 }
1412
1413 static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
1414                                         struct ocfs2_write_ctxt *wc,
1415                                         struct page *page, u32 cpos,
1416                                         loff_t user_pos, unsigned user_len,
1417                                         int new)
1418 {
1419         int ret;
1420         unsigned int map_from = 0, map_to = 0;
1421         unsigned int cluster_start, cluster_end;
1422         unsigned int user_data_from = 0, user_data_to = 0;
1423
1424         ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos,
1425                                         &cluster_start, &cluster_end);
1426
1427         /* treat the write as new if the a hole/lseek spanned across
1428          * the page boundary.
1429          */
1430         new = new | ((i_size_read(inode) <= page_offset(page)) &&
1431                         (page_offset(page) <= user_pos));
1432
1433         if (page == wc->w_target_page) {
1434                 map_from = user_pos & (PAGE_CACHE_SIZE - 1);
1435                 map_to = map_from + user_len;
1436
1437                 if (new)
1438                         ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1439                                                     cluster_start, cluster_end,
1440                                                     new);
1441                 else
1442                         ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1443                                                     map_from, map_to, new);
1444                 if (ret) {
1445                         mlog_errno(ret);
1446                         goto out;
1447                 }
1448
1449                 user_data_from = map_from;
1450                 user_data_to = map_to;
1451                 if (new) {
1452                         map_from = cluster_start;
1453                         map_to = cluster_end;
1454                 }
1455         } else {
1456                 /*
1457                  * If we haven't allocated the new page yet, we
1458                  * shouldn't be writing it out without copying user
1459                  * data. This is likely a math error from the caller.
1460                  */
1461                 BUG_ON(!new);
1462
1463                 map_from = cluster_start;
1464                 map_to = cluster_end;
1465
1466                 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
1467                                             cluster_start, cluster_end, new);
1468                 if (ret) {
1469                         mlog_errno(ret);
1470                         goto out;
1471                 }
1472         }
1473
1474         /*
1475          * Parts of newly allocated pages need to be zero'd.
1476          *
1477          * Above, we have also rewritten 'to' and 'from' - as far as
1478          * the rest of the function is concerned, the entire cluster
1479          * range inside of a page needs to be written.
1480          *
1481          * We can skip this if the page is up to date - it's already
1482          * been zero'd from being read in as a hole.
1483          */
1484         if (new && !PageUptodate(page))
1485                 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
1486                                          cpos, user_data_from, user_data_to);
1487
1488         flush_dcache_page(page);
1489
1490 out:
1491         return ret;
1492 }
1493
1494 /*
1495  * This function will only grab one clusters worth of pages.
1496  */
1497 static int ocfs2_grab_pages_for_write(struct address_space *mapping,
1498                                       struct ocfs2_write_ctxt *wc,
1499                                       u32 cpos, loff_t user_pos,
1500                                       unsigned user_len, int new,
1501                                       struct page *mmap_page)
1502 {
1503         int ret = 0, i;
1504         unsigned long start, target_index, end_index, index;
1505         struct inode *inode = mapping->host;
1506         loff_t last_byte;
1507
1508         target_index = user_pos >> PAGE_CACHE_SHIFT;
1509
1510         /*
1511          * Figure out how many pages we'll be manipulating here. For
1512          * non allocating write, we just change the one
1513          * page. Otherwise, we'll need a whole clusters worth.  If we're
1514          * writing past i_size, we only need enough pages to cover the
1515          * last page of the write.
1516          */
1517         if (new) {
1518                 wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
1519                 start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
1520                 /*
1521                  * We need the index *past* the last page we could possibly
1522                  * touch.  This is the page past the end of the write or
1523                  * i_size, whichever is greater.
1524                  */
1525                 last_byte = max(user_pos + user_len, i_size_read(inode));
1526                 BUG_ON(last_byte < 1);
1527                 end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1;
1528                 if ((start + wc->w_num_pages) > end_index)
1529                         wc->w_num_pages = end_index - start;
1530         } else {
1531                 wc->w_num_pages = 1;
1532                 start = target_index;
1533         }
1534
1535         for(i = 0; i < wc->w_num_pages; i++) {
1536                 index = start + i;
1537
1538                 if (index == target_index && mmap_page) {
1539                         /*
1540                          * ocfs2_pagemkwrite() is a little different
1541                          * and wants us to directly use the page
1542                          * passed in.
1543                          */
1544                         lock_page(mmap_page);
1545
1546                         /* Exit and let the caller retry */
1547                         if (mmap_page->mapping != mapping) {
1548                                 WARN_ON(mmap_page->mapping);
1549                                 unlock_page(mmap_page);
1550                                 ret = -EAGAIN;
1551                                 goto out;
1552                         }
1553
1554                         page_cache_get(mmap_page);
1555                         wc->w_pages[i] = mmap_page;
1556                         wc->w_target_locked = true;
1557                 } else {
1558                         wc->w_pages[i] = find_or_create_page(mapping, index,
1559                                                              GFP_NOFS);
1560                         if (!wc->w_pages[i]) {
1561                                 ret = -ENOMEM;
1562                                 mlog_errno(ret);
1563                                 goto out;
1564                         }
1565                 }
1566                 wait_for_stable_page(wc->w_pages[i]);
1567
1568                 if (index == target_index)
1569                         wc->w_target_page = wc->w_pages[i];
1570         }
1571 out:
1572         if (ret)
1573                 wc->w_target_locked = false;
1574         return ret;
1575 }
1576
1577 /*
1578  * Prepare a single cluster for write one cluster into the file.
1579  */
1580 static int ocfs2_write_cluster(struct address_space *mapping,
1581                                u32 phys, unsigned int unwritten,
1582                                unsigned int should_zero,
1583                                struct ocfs2_alloc_context *data_ac,
1584                                struct ocfs2_alloc_context *meta_ac,
1585                                struct ocfs2_write_ctxt *wc, u32 cpos,
1586                                loff_t user_pos, unsigned user_len)
1587 {
1588         int ret, i, new;
1589         u64 v_blkno, p_blkno;
1590         struct inode *inode = mapping->host;
1591         struct ocfs2_extent_tree et;
1592
1593         new = phys == 0 ? 1 : 0;
1594         if (new) {
1595                 u32 tmp_pos;
1596
1597                 /*
1598                  * This is safe to call with the page locks - it won't take
1599                  * any additional semaphores or cluster locks.
1600                  */
1601                 tmp_pos = cpos;
1602                 ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode,
1603                                            &tmp_pos, 1, 0, wc->w_di_bh,
1604                                            wc->w_handle, data_ac,
1605                                            meta_ac, NULL);
1606                 /*
1607                  * This shouldn't happen because we must have already
1608                  * calculated the correct meta data allocation required. The
1609                  * internal tree allocation code should know how to increase
1610                  * transaction credits itself.
1611                  *
1612                  * If need be, we could handle -EAGAIN for a
1613                  * RESTART_TRANS here.
1614                  */
1615                 mlog_bug_on_msg(ret == -EAGAIN,
1616                                 "Inode %llu: EAGAIN return during allocation.\n",
1617                                 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1618                 if (ret < 0) {
1619                         mlog_errno(ret);
1620                         goto out;
1621                 }
1622         } else if (unwritten) {
1623                 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
1624                                               wc->w_di_bh);
1625                 ret = ocfs2_mark_extent_written(inode, &et,
1626                                                 wc->w_handle, cpos, 1, phys,
1627                                                 meta_ac, &wc->w_dealloc);
1628                 if (ret < 0) {
1629                         mlog_errno(ret);
1630                         goto out;
1631                 }
1632         }
1633
1634         if (should_zero)
1635                 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, cpos);
1636         else
1637                 v_blkno = user_pos >> inode->i_sb->s_blocksize_bits;
1638
1639         /*
1640          * The only reason this should fail is due to an inability to
1641          * find the extent added.
1642          */
1643         ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL,
1644                                           NULL);
1645         if (ret < 0) {
1646                 mlog(ML_ERROR, "Get physical blkno failed for inode %llu, "
1647                             "at logical block %llu",
1648                             (unsigned long long)OCFS2_I(inode)->ip_blkno,
1649                             (unsigned long long)v_blkno);
1650                 goto out;
1651         }
1652
1653         BUG_ON(p_blkno == 0);
1654
1655         for(i = 0; i < wc->w_num_pages; i++) {
1656                 int tmpret;
1657
1658                 tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
1659                                                       wc->w_pages[i], cpos,
1660                                                       user_pos, user_len,
1661                                                       should_zero);
1662                 if (tmpret) {
1663                         mlog_errno(tmpret);
1664                         if (ret == 0)
1665                                 ret = tmpret;
1666                 }
1667         }
1668
1669         /*
1670          * We only have cleanup to do in case of allocating write.
1671          */
1672         if (ret && new)
1673                 ocfs2_write_failure(inode, wc, user_pos, user_len);
1674
1675 out:
1676
1677         return ret;
1678 }
1679
1680 static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1681                                        struct ocfs2_alloc_context *data_ac,
1682                                        struct ocfs2_alloc_context *meta_ac,
1683                                        struct ocfs2_write_ctxt *wc,
1684                                        loff_t pos, unsigned len)
1685 {
1686         int ret, i;
1687         loff_t cluster_off;
1688         unsigned int local_len = len;
1689         struct ocfs2_write_cluster_desc *desc;
1690         struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb);
1691
1692         for (i = 0; i < wc->w_clen; i++) {
1693                 desc = &wc->w_desc[i];
1694
1695                 /*
1696                  * We have to make sure that the total write passed in
1697                  * doesn't extend past a single cluster.
1698                  */
1699                 local_len = len;
1700                 cluster_off = pos & (osb->s_clustersize - 1);
1701                 if ((cluster_off + local_len) > osb->s_clustersize)
1702                         local_len = osb->s_clustersize - cluster_off;
1703
1704                 ret = ocfs2_write_cluster(mapping, desc->c_phys,
1705                                           desc->c_unwritten,
1706                                           desc->c_needs_zero,
1707                                           data_ac, meta_ac,
1708                                           wc, desc->c_cpos, pos, local_len);
1709                 if (ret) {
1710                         mlog_errno(ret);
1711                         goto out;
1712                 }
1713
1714                 len -= local_len;
1715                 pos += local_len;
1716         }
1717
1718         ret = 0;
1719 out:
1720         return ret;
1721 }
1722
1723 /*
1724  * ocfs2_write_end() wants to know which parts of the target page it
1725  * should complete the write on. It's easiest to compute them ahead of
1726  * time when a more complete view of the write is available.
1727  */
1728 static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1729                                         struct ocfs2_write_ctxt *wc,
1730                                         loff_t pos, unsigned len, int alloc)
1731 {
1732         struct ocfs2_write_cluster_desc *desc;
1733
1734         wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1);
1735         wc->w_target_to = wc->w_target_from + len;
1736
1737         if (alloc == 0)
1738                 return;
1739
1740         /*
1741          * Allocating write - we may have different boundaries based
1742          * on page size and cluster size.
1743          *
1744          * NOTE: We can no longer compute one value from the other as
1745          * the actual write length and user provided length may be
1746          * different.
1747          */
1748
1749         if (wc->w_large_pages) {
1750                 /*
1751                  * We only care about the 1st and last cluster within
1752                  * our range and whether they should be zero'd or not. Either
1753                  * value may be extended out to the start/end of a
1754                  * newly allocated cluster.
1755                  */
1756                 desc = &wc->w_desc[0];
1757                 if (desc->c_needs_zero)
1758                         ocfs2_figure_cluster_boundaries(osb,
1759                                                         desc->c_cpos,
1760                                                         &wc->w_target_from,
1761                                                         NULL);
1762
1763                 desc = &wc->w_desc[wc->w_clen - 1];
1764                 if (desc->c_needs_zero)
1765                         ocfs2_figure_cluster_boundaries(osb,
1766                                                         desc->c_cpos,
1767                                                         NULL,
1768                                                         &wc->w_target_to);
1769         } else {
1770                 wc->w_target_from = 0;
1771                 wc->w_target_to = PAGE_CACHE_SIZE;
1772         }
1773 }
1774
1775 /*
1776  * Populate each single-cluster write descriptor in the write context
1777  * with information about the i/o to be done.
1778  *
1779  * Returns the number of clusters that will have to be allocated, as
1780  * well as a worst case estimate of the number of extent records that
1781  * would have to be created during a write to an unwritten region.
1782  */
1783 static int ocfs2_populate_write_desc(struct inode *inode,
1784                                      struct ocfs2_write_ctxt *wc,
1785                                      unsigned int *clusters_to_alloc,
1786                                      unsigned int *extents_to_split)
1787 {
1788         int ret;
1789         struct ocfs2_write_cluster_desc *desc;
1790         unsigned int num_clusters = 0;
1791         unsigned int ext_flags = 0;
1792         u32 phys = 0;
1793         int i;
1794
1795         *clusters_to_alloc = 0;
1796         *extents_to_split = 0;
1797
1798         for (i = 0; i < wc->w_clen; i++) {
1799                 desc = &wc->w_desc[i];
1800                 desc->c_cpos = wc->w_cpos + i;
1801
1802                 if (num_clusters == 0) {
1803                         /*
1804                          * Need to look up the next extent record.
1805                          */
1806                         ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys,
1807                                                  &num_clusters, &ext_flags);
1808                         if (ret) {
1809                                 mlog_errno(ret);
1810                                 goto out;
1811                         }
1812
1813                         /* We should already CoW the refcountd extent. */
1814                         BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
1815
1816                         /*
1817                          * Assume worst case - that we're writing in
1818                          * the middle of the extent.
1819                          *
1820                          * We can assume that the write proceeds from
1821                          * left to right, in which case the extent
1822                          * insert code is smart enough to coalesce the
1823                          * next splits into the previous records created.
1824                          */
1825                         if (ext_flags & OCFS2_EXT_UNWRITTEN)
1826                                 *extents_to_split = *extents_to_split + 2;
1827                 } else if (phys) {
1828                         /*
1829                          * Only increment phys if it doesn't describe
1830                          * a hole.
1831                          */
1832                         phys++;
1833                 }
1834
1835                 /*
1836                  * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1837                  * file that got extended.  w_first_new_cpos tells us
1838                  * where the newly allocated clusters are so we can
1839                  * zero them.
1840                  */
1841                 if (desc->c_cpos >= wc->w_first_new_cpos) {
1842                         BUG_ON(phys == 0);
1843                         desc->c_needs_zero = 1;
1844                 }
1845
1846                 desc->c_phys = phys;
1847                 if (phys == 0) {
1848                         desc->c_new = 1;
1849                         desc->c_needs_zero = 1;
1850                         *clusters_to_alloc = *clusters_to_alloc + 1;
1851                 }
1852
1853                 if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1854                         desc->c_unwritten = 1;
1855                         desc->c_needs_zero = 1;
1856                 }
1857
1858                 num_clusters--;
1859         }
1860
1861         ret = 0;
1862 out:
1863         return ret;
1864 }
1865
1866 static int ocfs2_write_begin_inline(struct address_space *mapping,
1867                                     struct inode *inode,
1868                                     struct ocfs2_write_ctxt *wc)
1869 {
1870         int ret;
1871         struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1872         struct page *page;
1873         handle_t *handle;
1874         struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1875
1876         handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1877         if (IS_ERR(handle)) {
1878                 ret = PTR_ERR(handle);
1879                 mlog_errno(ret);
1880                 goto out;
1881         }
1882
1883         page = find_or_create_page(mapping, 0, GFP_NOFS);
1884         if (!page) {
1885                 ocfs2_commit_trans(osb, handle);
1886                 ret = -ENOMEM;
1887                 mlog_errno(ret);
1888                 goto out;
1889         }
1890         /*
1891          * If we don't set w_num_pages then this page won't get unlocked
1892          * and freed on cleanup of the write context.
1893          */
1894         wc->w_pages[0] = wc->w_target_page = page;
1895         wc->w_num_pages = 1;
1896
1897         ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
1898                                       OCFS2_JOURNAL_ACCESS_WRITE);
1899         if (ret) {
1900                 ocfs2_commit_trans(osb, handle);
1901
1902                 mlog_errno(ret);
1903                 goto out;
1904         }
1905
1906         if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1907                 ocfs2_set_inode_data_inline(inode, di);
1908
1909         if (!PageUptodate(page)) {
1910                 ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
1911                 if (ret) {
1912                         ocfs2_commit_trans(osb, handle);
1913
1914                         goto out;
1915                 }
1916         }
1917
1918         wc->w_handle = handle;
1919 out:
1920         return ret;
1921 }
1922
1923 int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
1924 {
1925         struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1926
1927         if (new_size <= le16_to_cpu(di->id2.i_data.id_count))
1928                 return 1;
1929         return 0;
1930 }
1931
1932 static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
1933                                           struct inode *inode, loff_t pos,
1934                                           unsigned len, struct page *mmap_page,
1935                                           struct ocfs2_write_ctxt *wc)
1936 {
1937         int ret, written = 0;
1938         loff_t end = pos + len;
1939         struct ocfs2_inode_info *oi = OCFS2_I(inode);
1940         struct ocfs2_dinode *di = NULL;
1941
1942         trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno,
1943                                              len, (unsigned long long)pos,
1944                                              oi->ip_dyn_features);
1945
1946         /*
1947          * Handle inodes which already have inline data 1st.
1948          */
1949         if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1950                 if (mmap_page == NULL &&
1951                     ocfs2_size_fits_inline_data(wc->w_di_bh, end))
1952                         goto do_inline_write;
1953
1954                 /*
1955                  * The write won't fit - we have to give this inode an
1956                  * inline extent list now.
1957                  */
1958                 ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh);
1959                 if (ret)
1960                         mlog_errno(ret);
1961                 goto out;
1962         }
1963
1964         /*
1965          * Check whether the inode can accept inline data.
1966          */
1967         if (oi->ip_clusters != 0 || i_size_read(inode) != 0)
1968                 return 0;
1969
1970         /*
1971          * Check whether the write can fit.
1972          */
1973         di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1974         if (mmap_page ||
1975             end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
1976                 return 0;
1977
1978 do_inline_write:
1979         ret = ocfs2_write_begin_inline(mapping, inode, wc);
1980         if (ret) {
1981                 mlog_errno(ret);
1982                 goto out;
1983         }
1984
1985         /*
1986          * This signals to the caller that the data can be written
1987          * inline.
1988          */
1989         written = 1;
1990 out:
1991         return written ? written : ret;
1992 }
1993
1994 /*
1995  * This function only does anything for file systems which can't
1996  * handle sparse files.
1997  *
1998  * What we want to do here is fill in any hole between the current end
1999  * of allocation and the end of our write. That way the rest of the
2000  * write path can treat it as an non-allocating write, which has no
2001  * special case code for sparse/nonsparse files.
2002  */
2003 static int ocfs2_expand_nonsparse_inode(struct inode *inode,
2004                                         struct buffer_head *di_bh,
2005                                         loff_t pos, unsigned len,
2006                                         struct ocfs2_write_ctxt *wc)
2007 {
2008         int ret;
2009         loff_t newsize = pos + len;
2010
2011         BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
2012
2013         if (newsize <= i_size_read(inode))
2014                 return 0;
2015
2016         ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos);
2017         if (ret)
2018                 mlog_errno(ret);
2019
2020         wc->w_first_new_cpos =
2021                 ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
2022
2023         return ret;
2024 }
2025
2026 static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
2027                            loff_t pos)
2028 {
2029         int ret = 0;
2030
2031         BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)));
2032         if (pos > i_size_read(inode))
2033                 ret = ocfs2_zero_extend(inode, di_bh, pos);
2034
2035         return ret;
2036 }
2037
2038 /*
2039  * Try to flush truncate logs if we can free enough clusters from it.
2040  * As for return value, "< 0" means error, "0" no space and "1" means
2041  * we have freed enough spaces and let the caller try to allocate again.
2042  */
2043 static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
2044                                           unsigned int needed)
2045 {
2046         tid_t target;
2047         int ret = 0;
2048         unsigned int truncated_clusters;
2049
2050         mutex_lock(&osb->osb_tl_inode->i_mutex);
2051         truncated_clusters = osb->truncated_clusters;
2052         mutex_unlock(&osb->osb_tl_inode->i_mutex);
2053
2054         /*
2055          * Check whether we can succeed in allocating if we free
2056          * the truncate log.
2057          */
2058         if (truncated_clusters < needed)
2059                 goto out;
2060
2061         ret = ocfs2_flush_truncate_log(osb);
2062         if (ret) {
2063                 mlog_errno(ret);
2064                 goto out;
2065         }
2066
2067         if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
2068                 jbd2_log_wait_commit(osb->journal->j_journal, target);
2069                 ret = 1;
2070         }
2071 out:
2072         return ret;
2073 }
2074
2075 int ocfs2_write_begin_nolock(struct file *filp,
2076                              struct address_space *mapping,
2077                              loff_t pos, unsigned len, unsigned flags,
2078                              struct page **pagep, void **fsdata,
2079                              struct buffer_head *di_bh, struct page *mmap_page)
2080 {
2081         int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
2082         unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
2083         struct ocfs2_write_ctxt *wc;
2084         struct inode *inode = mapping->host;
2085         struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2086         struct ocfs2_dinode *di;
2087         struct ocfs2_alloc_context *data_ac = NULL;
2088         struct ocfs2_alloc_context *meta_ac = NULL;
2089         handle_t *handle;
2090         struct ocfs2_extent_tree et;
2091         int try_free = 1, ret1;
2092
2093 try_again:
2094         ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
2095         if (ret) {
2096                 mlog_errno(ret);
2097                 return ret;
2098         }
2099
2100         if (ocfs2_supports_inline_data(osb)) {
2101                 ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
2102                                                      mmap_page, wc);
2103                 if (ret == 1) {
2104                         ret = 0;
2105                         goto success;
2106                 }
2107                 if (ret < 0) {
2108                         mlog_errno(ret);
2109                         goto out;
2110                 }
2111         }
2112
2113         if (ocfs2_sparse_alloc(osb))
2114                 ret = ocfs2_zero_tail(inode, di_bh, pos);
2115         else
2116                 ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len,
2117                                                    wc);
2118         if (ret) {
2119                 mlog_errno(ret);
2120                 goto out;
2121         }
2122
2123         ret = ocfs2_check_range_for_refcount(inode, pos, len);
2124         if (ret < 0) {
2125                 mlog_errno(ret);
2126                 goto out;
2127         } else if (ret == 1) {
2128                 clusters_need = wc->w_clen;
2129                 ret = ocfs2_refcount_cow(inode, di_bh,
2130                                          wc->w_cpos, wc->w_clen, UINT_MAX);
2131                 if (ret) {
2132                         mlog_errno(ret);
2133                         goto out;
2134                 }
2135         }
2136
2137         ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc,
2138                                         &extents_to_split);
2139         if (ret) {
2140                 mlog_errno(ret);
2141                 goto out;
2142         }
2143         clusters_need += clusters_to_alloc;
2144
2145         di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
2146
2147         trace_ocfs2_write_begin_nolock(
2148                         (unsigned long long)OCFS2_I(inode)->ip_blkno,
2149                         (long long)i_size_read(inode),
2150                         le32_to_cpu(di->i_clusters),
2151                         pos, len, flags, mmap_page,
2152                         clusters_to_alloc, extents_to_split);
2153
2154         /*
2155          * We set w_target_from, w_target_to here so that
2156          * ocfs2_write_end() knows which range in the target page to
2157          * write out. An allocation requires that we write the entire
2158          * cluster range.
2159          */
2160         if (clusters_to_alloc || extents_to_split) {
2161                 /*
2162                  * XXX: We are stretching the limits of
2163                  * ocfs2_lock_allocators(). It greatly over-estimates
2164                  * the work to be done.
2165                  */
2166                 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode),
2167                                               wc->w_di_bh);
2168                 ret = ocfs2_lock_allocators(inode, &et,
2169                                             clusters_to_alloc, extents_to_split,
2170                                             &data_ac, &meta_ac);
2171                 if (ret) {
2172                         mlog_errno(ret);
2173                         goto out;
2174                 }
2175
2176                 if (data_ac)
2177                         data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
2178
2179                 credits = ocfs2_calc_extend_credits(inode->i_sb,
2180                                                     &di->id2.i_list);
2181
2182         }
2183
2184         /*
2185          * We have to zero sparse allocated clusters, unwritten extent clusters,
2186          * and non-sparse clusters we just extended.  For non-sparse writes,
2187          * we know zeros will only be needed in the first and/or last cluster.
2188          */
2189         if (clusters_to_alloc || extents_to_split ||
2190             (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
2191                             wc->w_desc[wc->w_clen - 1].c_needs_zero)))
2192                 cluster_of_pages = 1;
2193         else
2194                 cluster_of_pages = 0;
2195
2196         ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
2197
2198         handle = ocfs2_start_trans(osb, credits);
2199         if (IS_ERR(handle)) {
2200                 ret = PTR_ERR(handle);
2201                 mlog_errno(ret);
2202                 goto out;
2203         }
2204
2205         wc->w_handle = handle;
2206
2207         if (clusters_to_alloc) {
2208                 ret = dquot_alloc_space_nodirty(inode,
2209                         ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
2210                 if (ret)
2211                         goto out_commit;
2212         }
2213
2214         ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
2215                                       OCFS2_JOURNAL_ACCESS_WRITE);
2216         if (ret) {
2217                 mlog_errno(ret);
2218                 goto out_quota;
2219         }
2220
2221         /*
2222          * Fill our page array first. That way we've grabbed enough so
2223          * that we can zero and flush if we error after adding the
2224          * extent.
2225          */
2226         ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
2227                                          cluster_of_pages, mmap_page);
2228         if (ret && ret != -EAGAIN) {
2229                 mlog_errno(ret);
2230                 goto out_quota;
2231         }
2232
2233         /*
2234          * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
2235          * the target page. In this case, we exit with no error and no target
2236          * page. This will trigger the caller, page_mkwrite(), to re-try
2237          * the operation.
2238          */
2239         if (ret == -EAGAIN) {
2240                 BUG_ON(wc->w_target_page);
2241                 ret = 0;
2242                 goto out_quota;
2243         }
2244
2245         ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos,
2246                                           len);
2247         if (ret) {
2248                 mlog_errno(ret);
2249                 goto out_quota;
2250         }
2251
2252         if (data_ac)
2253                 ocfs2_free_alloc_context(data_ac);
2254         if (meta_ac)
2255                 ocfs2_free_alloc_context(meta_ac);
2256
2257 success:
2258         *pagep = wc->w_target_page;
2259         *fsdata = wc;
2260         return 0;
2261 out_quota:
2262         if (clusters_to_alloc)
2263                 dquot_free_space(inode,
2264                           ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc));
2265 out_commit:
2266         ocfs2_commit_trans(osb, handle);
2267
2268 out:
2269         ocfs2_free_write_ctxt(wc);
2270
2271         if (data_ac) {
2272                 ocfs2_free_alloc_context(data_ac);
2273                 data_ac = NULL;
2274         }
2275         if (meta_ac) {
2276                 ocfs2_free_alloc_context(meta_ac);
2277                 meta_ac = NULL;
2278         }
2279
2280         if (ret == -ENOSPC && try_free) {
2281                 /*
2282                  * Try to free some truncate log so that we can have enough
2283                  * clusters to allocate.
2284                  */
2285                 try_free = 0;
2286
2287                 ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
2288                 if (ret1 == 1)
2289                         goto try_again;
2290
2291                 if (ret1 < 0)
2292                         mlog_errno(ret1);
2293         }
2294
2295         return ret;
2296 }
2297
2298 static int ocfs2_write_begin(struct file *file, struct address_space *mapping,
2299                              loff_t pos, unsigned len, unsigned flags,
2300                              struct page **pagep, void **fsdata)
2301 {
2302         int ret;
2303         struct buffer_head *di_bh = NULL;
2304         struct inode *inode = mapping->host;
2305
2306         ret = ocfs2_inode_lock(inode, &di_bh, 1);
2307         if (ret) {
2308                 mlog_errno(ret);
2309                 return ret;
2310         }
2311
2312         /*
2313          * Take alloc sem here to prevent concurrent lookups. That way
2314          * the mapping, zeroing and tree manipulation within
2315          * ocfs2_write() will be safe against ->readpage(). This
2316          * should also serve to lock out allocation from a shared
2317          * writeable region.
2318          */
2319         down_write(&OCFS2_I(inode)->ip_alloc_sem);
2320
2321         ret = ocfs2_write_begin_nolock(file, mapping, pos, len, flags, pagep,
2322                                        fsdata, di_bh, NULL);
2323         if (ret) {
2324                 mlog_errno(ret);
2325                 goto out_fail;
2326         }
2327
2328         brelse(di_bh);
2329
2330         return 0;
2331
2332 out_fail:
2333         up_write(&OCFS2_I(inode)->ip_alloc_sem);
2334
2335         brelse(di_bh);
2336         ocfs2_inode_unlock(inode, 1);
2337
2338         return ret;
2339 }
2340
2341 static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
2342                                    unsigned len, unsigned *copied,
2343                                    struct ocfs2_dinode *di,
2344                                    struct ocfs2_write_ctxt *wc)
2345 {
2346         void *kaddr;
2347
2348         if (unlikely(*copied < len)) {
2349                 if (!PageUptodate(wc->w_target_page)) {
2350                         *copied = 0;
2351                         return;
2352                 }
2353         }
2354
2355         kaddr = kmap_atomic(wc->w_target_page);
2356         memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
2357         kunmap_atomic(kaddr);
2358
2359         trace_ocfs2_write_end_inline(
2360              (unsigned long long)OCFS2_I(inode)->ip_blkno,
2361              (unsigned long long)pos, *copied,
2362              le16_to_cpu(di->id2.i_data.id_count),
2363              le16_to_cpu(di->i_dyn_features));
2364 }
2365
2366 int ocfs2_write_end_nolock(struct address_space *mapping,
2367                            loff_t pos, unsigned len, unsigned copied,
2368                            struct page *page, void *fsdata)
2369 {
2370         int i, ret;
2371         unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1);
2372         struct inode *inode = mapping->host;
2373         struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2374         struct ocfs2_write_ctxt *wc = fsdata;
2375         struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
2376         handle_t *handle = wc->w_handle;
2377         struct page *tmppage;
2378
2379         ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
2380                         OCFS2_JOURNAL_ACCESS_WRITE);
2381         if (ret) {
2382                 copied = ret;
2383                 mlog_errno(ret);
2384                 goto out;
2385         }
2386
2387         if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
2388                 ocfs2_write_end_inline(inode, pos, len, &copied, di, wc);
2389                 goto out_write_size;
2390         }
2391
2392         if (unlikely(copied < len)) {
2393                 if (!PageUptodate(wc->w_target_page))
2394                         copied = 0;
2395
2396                 ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
2397                                        start+len);
2398         }
2399         flush_dcache_page(wc->w_target_page);
2400
2401         for(i = 0; i < wc->w_num_pages; i++) {
2402                 tmppage = wc->w_pages[i];
2403
2404                 if (tmppage == wc->w_target_page) {
2405                         from = wc->w_target_from;
2406                         to = wc->w_target_to;
2407
2408                         BUG_ON(from > PAGE_CACHE_SIZE ||
2409                                to > PAGE_CACHE_SIZE ||
2410                                to < from);
2411                 } else {
2412                         /*
2413                          * Pages adjacent to the target (if any) imply
2414                          * a hole-filling write in which case we want
2415                          * to flush their entire range.
2416                          */
2417                         from = 0;
2418                         to = PAGE_CACHE_SIZE;
2419                 }
2420
2421                 if (page_has_buffers(tmppage)) {
2422                         if (ocfs2_should_order_data(inode))
2423                                 ocfs2_jbd2_file_inode(wc->w_handle, inode);
2424                         block_commit_write(tmppage, from, to);
2425                 }
2426         }
2427
2428 out_write_size:
2429         pos += copied;
2430         if (pos > i_size_read(inode)) {
2431                 i_size_write(inode, pos);
2432                 mark_inode_dirty(inode);
2433         }
2434         inode->i_blocks = ocfs2_inode_sector_count(inode);
2435         di->i_size = cpu_to_le64((u64)i_size_read(inode));
2436         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2437         di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
2438         di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
2439         ocfs2_update_inode_fsync_trans(handle, inode, 1);
2440         ocfs2_journal_dirty(handle, wc->w_di_bh);
2441
2442 out:
2443         /* unlock pages before dealloc since it needs acquiring j_trans_barrier
2444          * lock, or it will cause a deadlock since journal commit threads holds
2445          * this lock and will ask for the page lock when flushing the data.
2446          * put it here to preserve the unlock order.
2447          */
2448         ocfs2_unlock_pages(wc);
2449
2450         ocfs2_commit_trans(osb, handle);
2451
2452         ocfs2_run_deallocs(osb, &wc->w_dealloc);
2453
2454         brelse(wc->w_di_bh);
2455         kfree(wc);
2456
2457         return copied;
2458 }
2459
2460 static int ocfs2_write_end(struct file *file, struct address_space *mapping,
2461                            loff_t pos, unsigned len, unsigned copied,
2462                            struct page *page, void *fsdata)
2463 {
2464         int ret;
2465         struct inode *inode = mapping->host;
2466
2467         ret = ocfs2_write_end_nolock(mapping, pos, len, copied, page, fsdata);
2468
2469         up_write(&OCFS2_I(inode)->ip_alloc_sem);
2470         ocfs2_inode_unlock(inode, 1);
2471
2472         return ret;
2473 }
2474
2475 const struct address_space_operations ocfs2_aops = {
2476         .readpage               = ocfs2_readpage,
2477         .readpages              = ocfs2_readpages,
2478         .writepage              = ocfs2_writepage,
2479         .write_begin            = ocfs2_write_begin,
2480         .write_end              = ocfs2_write_end,
2481         .bmap                   = ocfs2_bmap,
2482         .direct_IO              = ocfs2_direct_IO,
2483         .invalidatepage         = block_invalidatepage,
2484         .releasepage            = ocfs2_releasepage,
2485         .migratepage            = buffer_migrate_page,
2486         .is_partially_uptodate  = block_is_partially_uptodate,
2487         .error_remove_page      = generic_error_remove_page,
2488 };