Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / fs / ext4 / file.c
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *      (jj@sunsite.ms.mff.cuni.cz)
19  */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 /*
35  * Called when an inode is released. Note that this is different
36  * from ext4_file_open: open gets called at every open, but release
37  * gets called only when /all/ the files are closed.
38  */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42                 ext4_alloc_da_blocks(inode);
43                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44         }
45         /* if we are the last writer on the inode, drop the block reservation */
46         if ((filp->f_mode & FMODE_WRITE) &&
47                         (atomic_read(&inode->i_writecount) == 1) &&
48                         !EXT4_I(inode)->i_reserved_data_blocks)
49         {
50                 down_write(&EXT4_I(inode)->i_data_sem);
51                 ext4_discard_preallocations(inode);
52                 up_write(&EXT4_I(inode)->i_data_sem);
53         }
54         if (is_dx(inode) && filp->private_data)
55                 ext4_htree_free_dir_info(filp->private_data);
56
57         return 0;
58 }
59
60 static void ext4_unwritten_wait(struct inode *inode)
61 {
62         wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66
67 /*
68  * This tests whether the IO in question is block-aligned or not.
69  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70  * are converted to written only after the IO is complete.  Until they are
71  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72  * it needs to zero out portions of the start and/or end block.  If 2 AIO
73  * threads are at work on the same unwritten block, they must be synchronized
74  * or one thread will zero the other's data, causing corruption.
75  */
76 static int
77 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
78 {
79         struct super_block *sb = inode->i_sb;
80         int blockmask = sb->s_blocksize - 1;
81
82         if (pos >= i_size_read(inode))
83                 return 0;
84
85         if ((pos | iov_iter_alignment(from)) & blockmask)
86                 return 1;
87
88         return 0;
89 }
90
91 static ssize_t
92 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93 {
94         struct file *file = iocb->ki_filp;
95         struct inode *inode = file_inode(iocb->ki_filp);
96         struct mutex *aio_mutex = NULL;
97         struct blk_plug plug;
98         int o_direct = iocb->ki_flags & IOCB_DIRECT;
99         int overwrite = 0;
100         ssize_t ret;
101
102         /*
103          * Unaligned direct AIO must be serialized; see comment above
104          * In the case of O_APPEND, assume that we must always serialize
105          */
106         if (o_direct &&
107             ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
108             !is_sync_kiocb(iocb) &&
109             (iocb->ki_flags & IOCB_APPEND ||
110              ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
111                 aio_mutex = ext4_aio_mutex(inode);
112                 mutex_lock(aio_mutex);
113                 ext4_unwritten_wait(inode);
114         }
115
116         mutex_lock(&inode->i_mutex);
117         ret = generic_write_checks(iocb, from);
118         if (ret <= 0)
119                 goto out;
120
121         /*
122          * If we have encountered a bitmap-format file, the size limit
123          * is smaller than s_maxbytes, which is for extent-mapped files.
124          */
125         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
126                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
127
128                 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
129                         ret = -EFBIG;
130                         goto out;
131                 }
132                 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
133         }
134
135         iocb->private = &overwrite;
136         if (o_direct) {
137                 size_t length = iov_iter_count(from);
138                 loff_t pos = iocb->ki_pos;
139                 blk_start_plug(&plug);
140
141                 /* check whether we do a DIO overwrite or not */
142                 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
143                     !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
144                         struct ext4_map_blocks map;
145                         unsigned int blkbits = inode->i_blkbits;
146                         int err, len;
147
148                         map.m_lblk = pos >> blkbits;
149                         map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
150                                 - map.m_lblk;
151                         len = map.m_len;
152
153                         err = ext4_map_blocks(NULL, inode, &map, 0);
154                         /*
155                          * 'err==len' means that all of blocks has
156                          * been preallocated no matter they are
157                          * initialized or not.  For excluding
158                          * unwritten extents, we need to check
159                          * m_flags.  There are two conditions that
160                          * indicate for initialized extents.  1) If we
161                          * hit extent cache, EXT4_MAP_MAPPED flag is
162                          * returned; 2) If we do a real lookup,
163                          * non-flags are returned.  So we should check
164                          * these two conditions.
165                          */
166                         if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
167                                 overwrite = 1;
168                 }
169         }
170
171         ret = __generic_file_write_iter(iocb, from);
172         mutex_unlock(&inode->i_mutex);
173
174         if (ret > 0) {
175                 ssize_t err;
176
177                 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
178                 if (err < 0)
179                         ret = err;
180         }
181         if (o_direct)
182                 blk_finish_plug(&plug);
183
184         if (aio_mutex)
185                 mutex_unlock(aio_mutex);
186         return ret;
187
188 out:
189         mutex_unlock(&inode->i_mutex);
190         if (aio_mutex)
191                 mutex_unlock(aio_mutex);
192         return ret;
193 }
194
195 #ifdef CONFIG_FS_DAX
196 static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate)
197 {
198         struct inode *inode = bh->b_assoc_map->host;
199         /* XXX: breaks on 32-bit > 16TB. Is that even supported? */
200         loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits;
201         int err;
202         if (!uptodate)
203                 return;
204         WARN_ON(!buffer_unwritten(bh));
205         err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size);
206 }
207
208 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
209 {
210         int result;
211         handle_t *handle = NULL;
212         struct inode *inode = file_inode(vma->vm_file);
213         struct super_block *sb = inode->i_sb;
214         bool write = vmf->flags & FAULT_FLAG_WRITE;
215
216         if (write) {
217                 sb_start_pagefault(sb);
218                 file_update_time(vma->vm_file);
219                 down_read(&EXT4_I(inode)->i_mmap_sem);
220                 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
221                                                 EXT4_DATA_TRANS_BLOCKS(sb));
222         } else
223                 down_read(&EXT4_I(inode)->i_mmap_sem);
224
225         if (IS_ERR(handle))
226                 result = VM_FAULT_SIGBUS;
227         else
228                 result = __dax_fault(vma, vmf, ext4_get_block_dax,
229                                                 ext4_end_io_unwritten);
230
231         if (write) {
232                 if (!IS_ERR(handle))
233                         ext4_journal_stop(handle);
234                 up_read(&EXT4_I(inode)->i_mmap_sem);
235                 sb_end_pagefault(sb);
236         } else
237                 up_read(&EXT4_I(inode)->i_mmap_sem);
238
239         return result;
240 }
241
242 static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
243                                                 pmd_t *pmd, unsigned int flags)
244 {
245         int result;
246         handle_t *handle = NULL;
247         struct inode *inode = file_inode(vma->vm_file);
248         struct super_block *sb = inode->i_sb;
249         bool write = flags & FAULT_FLAG_WRITE;
250
251         if (write) {
252                 sb_start_pagefault(sb);
253                 file_update_time(vma->vm_file);
254                 down_read(&EXT4_I(inode)->i_mmap_sem);
255                 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
256                                 ext4_chunk_trans_blocks(inode,
257                                                         PMD_SIZE / PAGE_SIZE));
258         } else
259                 down_read(&EXT4_I(inode)->i_mmap_sem);
260
261         if (IS_ERR(handle))
262                 result = VM_FAULT_SIGBUS;
263         else
264                 result = __dax_pmd_fault(vma, addr, pmd, flags,
265                                 ext4_get_block_dax, ext4_end_io_unwritten);
266
267         if (write) {
268                 if (!IS_ERR(handle))
269                         ext4_journal_stop(handle);
270                 up_read(&EXT4_I(inode)->i_mmap_sem);
271                 sb_end_pagefault(sb);
272         } else
273                 up_read(&EXT4_I(inode)->i_mmap_sem);
274
275         return result;
276 }
277
278 static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
279 {
280         int err;
281         struct inode *inode = file_inode(vma->vm_file);
282
283         sb_start_pagefault(inode->i_sb);
284         file_update_time(vma->vm_file);
285         down_read(&EXT4_I(inode)->i_mmap_sem);
286         err = __dax_mkwrite(vma, vmf, ext4_get_block_dax,
287                             ext4_end_io_unwritten);
288         up_read(&EXT4_I(inode)->i_mmap_sem);
289         sb_end_pagefault(inode->i_sb);
290
291         return err;
292 }
293
294 /*
295  * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
296  * handler we check for races agaist truncate. Note that since we cycle through
297  * i_mmap_sem, we are sure that also any hole punching that began before we
298  * were called is finished by now and so if it included part of the file we
299  * are working on, our pte will get unmapped and the check for pte_same() in
300  * wp_pfn_shared() fails. Thus fault gets retried and things work out as
301  * desired.
302  */
303 static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
304                                 struct vm_fault *vmf)
305 {
306         struct inode *inode = file_inode(vma->vm_file);
307         struct super_block *sb = inode->i_sb;
308         int ret = VM_FAULT_NOPAGE;
309         loff_t size;
310
311         sb_start_pagefault(sb);
312         file_update_time(vma->vm_file);
313         down_read(&EXT4_I(inode)->i_mmap_sem);
314         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
315         if (vmf->pgoff >= size)
316                 ret = VM_FAULT_SIGBUS;
317         up_read(&EXT4_I(inode)->i_mmap_sem);
318         sb_end_pagefault(sb);
319
320         return ret;
321 }
322
323 static const struct vm_operations_struct ext4_dax_vm_ops = {
324         .fault          = ext4_dax_fault,
325         .pmd_fault      = ext4_dax_pmd_fault,
326         .page_mkwrite   = ext4_dax_mkwrite,
327         .pfn_mkwrite    = ext4_dax_pfn_mkwrite,
328 };
329 #else
330 #define ext4_dax_vm_ops ext4_file_vm_ops
331 #endif
332
333 static const struct vm_operations_struct ext4_file_vm_ops = {
334         .fault          = ext4_filemap_fault,
335         .map_pages      = filemap_map_pages,
336         .page_mkwrite   = ext4_page_mkwrite,
337 };
338
339 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
340 {
341         struct inode *inode = file->f_mapping->host;
342
343         if (ext4_encrypted_inode(inode)) {
344                 int err = ext4_get_encryption_info(inode);
345                 if (err)
346                         return 0;
347                 if (ext4_encryption_info(inode) == NULL)
348                         return -ENOKEY;
349         }
350         file_accessed(file);
351         if (IS_DAX(file_inode(file))) {
352                 vma->vm_ops = &ext4_dax_vm_ops;
353                 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
354         } else {
355                 vma->vm_ops = &ext4_file_vm_ops;
356         }
357         return 0;
358 }
359
360 static int ext4_file_open(struct inode * inode, struct file * filp)
361 {
362         struct super_block *sb = inode->i_sb;
363         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
364         struct vfsmount *mnt = filp->f_path.mnt;
365         struct path path;
366         char buf[64], *cp;
367         int ret;
368
369         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
370                      !(sb->s_flags & MS_RDONLY))) {
371                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
372                 /*
373                  * Sample where the filesystem has been mounted and
374                  * store it in the superblock for sysadmin convenience
375                  * when trying to sort through large numbers of block
376                  * devices or filesystem images.
377                  */
378                 memset(buf, 0, sizeof(buf));
379                 path.mnt = mnt;
380                 path.dentry = mnt->mnt_root;
381                 cp = d_path(&path, buf, sizeof(buf));
382                 if (!IS_ERR(cp)) {
383                         handle_t *handle;
384                         int err;
385
386                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
387                         if (IS_ERR(handle))
388                                 return PTR_ERR(handle);
389                         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
390                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
391                         if (err) {
392                                 ext4_journal_stop(handle);
393                                 return err;
394                         }
395                         strlcpy(sbi->s_es->s_last_mounted, cp,
396                                 sizeof(sbi->s_es->s_last_mounted));
397                         ext4_handle_dirty_super(handle, sb);
398                         ext4_journal_stop(handle);
399                 }
400         }
401         if (ext4_encrypted_inode(inode)) {
402                 ret = ext4_get_encryption_info(inode);
403                 if (ret)
404                         return -EACCES;
405                 if (ext4_encryption_info(inode) == NULL)
406                         return -ENOKEY;
407         }
408         /*
409          * Set up the jbd2_inode if we are opening the inode for
410          * writing and the journal is present
411          */
412         if (filp->f_mode & FMODE_WRITE) {
413                 ret = ext4_inode_attach_jinode(inode);
414                 if (ret < 0)
415                         return ret;
416         }
417         return dquot_file_open(inode, filp);
418 }
419
420 /*
421  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
422  * file rather than ext4_ext_walk_space() because we can introduce
423  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
424  * function.  When extent status tree has been fully implemented, it will
425  * track all extent status for a file and we can directly use it to
426  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
427  */
428
429 /*
430  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
431  * lookup page cache to check whether or not there has some data between
432  * [startoff, endoff] because, if this range contains an unwritten extent,
433  * we determine this extent as a data or a hole according to whether the
434  * page cache has data or not.
435  */
436 static int ext4_find_unwritten_pgoff(struct inode *inode,
437                                      int whence,
438                                      struct ext4_map_blocks *map,
439                                      loff_t *offset)
440 {
441         struct pagevec pvec;
442         unsigned int blkbits;
443         pgoff_t index;
444         pgoff_t end;
445         loff_t endoff;
446         loff_t startoff;
447         loff_t lastoff;
448         int found = 0;
449
450         blkbits = inode->i_sb->s_blocksize_bits;
451         startoff = *offset;
452         lastoff = startoff;
453         endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
454
455         index = startoff >> PAGE_CACHE_SHIFT;
456         end = endoff >> PAGE_CACHE_SHIFT;
457
458         pagevec_init(&pvec, 0);
459         do {
460                 int i, num;
461                 unsigned long nr_pages;
462
463                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
464                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
465                                           (pgoff_t)num);
466                 if (nr_pages == 0) {
467                         if (whence == SEEK_DATA)
468                                 break;
469
470                         BUG_ON(whence != SEEK_HOLE);
471                         /*
472                          * If this is the first time to go into the loop and
473                          * offset is not beyond the end offset, it will be a
474                          * hole at this offset
475                          */
476                         if (lastoff == startoff || lastoff < endoff)
477                                 found = 1;
478                         break;
479                 }
480
481                 /*
482                  * If this is the first time to go into the loop and
483                  * offset is smaller than the first page offset, it will be a
484                  * hole at this offset.
485                  */
486                 if (lastoff == startoff && whence == SEEK_HOLE &&
487                     lastoff < page_offset(pvec.pages[0])) {
488                         found = 1;
489                         break;
490                 }
491
492                 for (i = 0; i < nr_pages; i++) {
493                         struct page *page = pvec.pages[i];
494                         struct buffer_head *bh, *head;
495
496                         /*
497                          * If the current offset is not beyond the end of given
498                          * range, it will be a hole.
499                          */
500                         if (lastoff < endoff && whence == SEEK_HOLE &&
501                             page->index > end) {
502                                 found = 1;
503                                 *offset = lastoff;
504                                 goto out;
505                         }
506
507                         lock_page(page);
508
509                         if (unlikely(page->mapping != inode->i_mapping)) {
510                                 unlock_page(page);
511                                 continue;
512                         }
513
514                         if (!page_has_buffers(page)) {
515                                 unlock_page(page);
516                                 continue;
517                         }
518
519                         if (page_has_buffers(page)) {
520                                 lastoff = page_offset(page);
521                                 bh = head = page_buffers(page);
522                                 do {
523                                         if (buffer_uptodate(bh) ||
524                                             buffer_unwritten(bh)) {
525                                                 if (whence == SEEK_DATA)
526                                                         found = 1;
527                                         } else {
528                                                 if (whence == SEEK_HOLE)
529                                                         found = 1;
530                                         }
531                                         if (found) {
532                                                 *offset = max_t(loff_t,
533                                                         startoff, lastoff);
534                                                 unlock_page(page);
535                                                 goto out;
536                                         }
537                                         lastoff += bh->b_size;
538                                         bh = bh->b_this_page;
539                                 } while (bh != head);
540                         }
541
542                         lastoff = page_offset(page) + PAGE_SIZE;
543                         unlock_page(page);
544                 }
545
546                 /*
547                  * The no. of pages is less than our desired, that would be a
548                  * hole in there.
549                  */
550                 if (nr_pages < num && whence == SEEK_HOLE) {
551                         found = 1;
552                         *offset = lastoff;
553                         break;
554                 }
555
556                 index = pvec.pages[i - 1]->index + 1;
557                 pagevec_release(&pvec);
558         } while (index <= end);
559
560 out:
561         pagevec_release(&pvec);
562         return found;
563 }
564
565 /*
566  * ext4_seek_data() retrieves the offset for SEEK_DATA.
567  */
568 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
569 {
570         struct inode *inode = file->f_mapping->host;
571         struct ext4_map_blocks map;
572         struct extent_status es;
573         ext4_lblk_t start, last, end;
574         loff_t dataoff, isize;
575         int blkbits;
576         int ret = 0;
577
578         mutex_lock(&inode->i_mutex);
579
580         isize = i_size_read(inode);
581         if (offset >= isize) {
582                 mutex_unlock(&inode->i_mutex);
583                 return -ENXIO;
584         }
585
586         blkbits = inode->i_sb->s_blocksize_bits;
587         start = offset >> blkbits;
588         last = start;
589         end = isize >> blkbits;
590         dataoff = offset;
591
592         do {
593                 map.m_lblk = last;
594                 map.m_len = end - last + 1;
595                 ret = ext4_map_blocks(NULL, inode, &map, 0);
596                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
597                         if (last != start)
598                                 dataoff = (loff_t)last << blkbits;
599                         break;
600                 }
601
602                 /*
603                  * If there is a delay extent at this offset,
604                  * it will be as a data.
605                  */
606                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
607                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
608                         if (last != start)
609                                 dataoff = (loff_t)last << blkbits;
610                         break;
611                 }
612
613                 /*
614                  * If there is a unwritten extent at this offset,
615                  * it will be as a data or a hole according to page
616                  * cache that has data or not.
617                  */
618                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
619                         int unwritten;
620                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
621                                                               &map, &dataoff);
622                         if (unwritten)
623                                 break;
624                 }
625
626                 last++;
627                 dataoff = (loff_t)last << blkbits;
628         } while (last <= end);
629
630         mutex_unlock(&inode->i_mutex);
631
632         if (dataoff > isize)
633                 return -ENXIO;
634
635         return vfs_setpos(file, dataoff, maxsize);
636 }
637
638 /*
639  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
640  */
641 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
642 {
643         struct inode *inode = file->f_mapping->host;
644         struct ext4_map_blocks map;
645         struct extent_status es;
646         ext4_lblk_t start, last, end;
647         loff_t holeoff, isize;
648         int blkbits;
649         int ret = 0;
650
651         mutex_lock(&inode->i_mutex);
652
653         isize = i_size_read(inode);
654         if (offset >= isize) {
655                 mutex_unlock(&inode->i_mutex);
656                 return -ENXIO;
657         }
658
659         blkbits = inode->i_sb->s_blocksize_bits;
660         start = offset >> blkbits;
661         last = start;
662         end = isize >> blkbits;
663         holeoff = offset;
664
665         do {
666                 map.m_lblk = last;
667                 map.m_len = end - last + 1;
668                 ret = ext4_map_blocks(NULL, inode, &map, 0);
669                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
670                         last += ret;
671                         holeoff = (loff_t)last << blkbits;
672                         continue;
673                 }
674
675                 /*
676                  * If there is a delay extent at this offset,
677                  * we will skip this extent.
678                  */
679                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
680                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
681                         last = es.es_lblk + es.es_len;
682                         holeoff = (loff_t)last << blkbits;
683                         continue;
684                 }
685
686                 /*
687                  * If there is a unwritten extent at this offset,
688                  * it will be as a data or a hole according to page
689                  * cache that has data or not.
690                  */
691                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
692                         int unwritten;
693                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
694                                                               &map, &holeoff);
695                         if (!unwritten) {
696                                 last += ret;
697                                 holeoff = (loff_t)last << blkbits;
698                                 continue;
699                         }
700                 }
701
702                 /* find a hole */
703                 break;
704         } while (last <= end);
705
706         mutex_unlock(&inode->i_mutex);
707
708         if (holeoff > isize)
709                 holeoff = isize;
710
711         return vfs_setpos(file, holeoff, maxsize);
712 }
713
714 /*
715  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
716  * by calling generic_file_llseek_size() with the appropriate maxbytes
717  * value for each.
718  */
719 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
720 {
721         struct inode *inode = file->f_mapping->host;
722         loff_t maxbytes;
723
724         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
725                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
726         else
727                 maxbytes = inode->i_sb->s_maxbytes;
728
729         switch (whence) {
730         case SEEK_SET:
731         case SEEK_CUR:
732         case SEEK_END:
733                 return generic_file_llseek_size(file, offset, whence,
734                                                 maxbytes, i_size_read(inode));
735         case SEEK_DATA:
736                 return ext4_seek_data(file, offset, maxbytes);
737         case SEEK_HOLE:
738                 return ext4_seek_hole(file, offset, maxbytes);
739         }
740
741         return -EINVAL;
742 }
743
744 const struct file_operations ext4_file_operations = {
745         .llseek         = ext4_llseek,
746         .read_iter      = generic_file_read_iter,
747         .write_iter     = ext4_file_write_iter,
748         .unlocked_ioctl = ext4_ioctl,
749 #ifdef CONFIG_COMPAT
750         .compat_ioctl   = ext4_compat_ioctl,
751 #endif
752         .mmap           = ext4_file_mmap,
753         .open           = ext4_file_open,
754         .release        = ext4_release_file,
755         .fsync          = ext4_sync_file,
756         .splice_read    = generic_file_splice_read,
757         .splice_write   = iter_file_splice_write,
758         .fallocate      = ext4_fallocate,
759 };
760
761 const struct inode_operations ext4_file_inode_operations = {
762         .setattr        = ext4_setattr,
763         .getattr        = ext4_getattr,
764         .setxattr       = generic_setxattr,
765         .getxattr       = generic_getxattr,
766         .listxattr      = ext4_listxattr,
767         .removexattr    = generic_removexattr,
768         .get_acl        = ext4_get_acl,
769         .set_acl        = ext4_set_acl,
770         .fiemap         = ext4_fiemap,
771 };
772