Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / fs / nilfs2 / inode.c
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/pagemap.h>
28 #include <linux/writeback.h>
29 #include <linux/uio.h>
30 #include "nilfs.h"
31 #include "btnode.h"
32 #include "segment.h"
33 #include "page.h"
34 #include "mdt.h"
35 #include "cpfile.h"
36 #include "ifile.h"
37
38 /**
39  * struct nilfs_iget_args - arguments used during comparison between inodes
40  * @ino: inode number
41  * @cno: checkpoint number
42  * @root: pointer on NILFS root object (mounted checkpoint)
43  * @for_gc: inode for GC flag
44  */
45 struct nilfs_iget_args {
46         u64 ino;
47         __u64 cno;
48         struct nilfs_root *root;
49         int for_gc;
50 };
51
52 static int nilfs_iget_test(struct inode *inode, void *opaque);
53
54 void nilfs_inode_add_blocks(struct inode *inode, int n)
55 {
56         struct nilfs_root *root = NILFS_I(inode)->i_root;
57
58         inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
59         if (root)
60                 atomic64_add(n, &root->blocks_count);
61 }
62
63 void nilfs_inode_sub_blocks(struct inode *inode, int n)
64 {
65         struct nilfs_root *root = NILFS_I(inode)->i_root;
66
67         inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
68         if (root)
69                 atomic64_sub(n, &root->blocks_count);
70 }
71
72 /**
73  * nilfs_get_block() - get a file block on the filesystem (callback function)
74  * @inode - inode struct of the target file
75  * @blkoff - file block number
76  * @bh_result - buffer head to be mapped on
77  * @create - indicate whether allocating the block or not when it has not
78  *      been allocated yet.
79  *
80  * This function does not issue actual read request of the specified data
81  * block. It is done by VFS.
82  */
83 int nilfs_get_block(struct inode *inode, sector_t blkoff,
84                     struct buffer_head *bh_result, int create)
85 {
86         struct nilfs_inode_info *ii = NILFS_I(inode);
87         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
88         __u64 blknum = 0;
89         int err = 0, ret;
90         unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
91
92         down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
93         ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
94         up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
95         if (ret >= 0) { /* found */
96                 map_bh(bh_result, inode->i_sb, blknum);
97                 if (ret > 0)
98                         bh_result->b_size = (ret << inode->i_blkbits);
99                 goto out;
100         }
101         /* data block was not found */
102         if (ret == -ENOENT && create) {
103                 struct nilfs_transaction_info ti;
104
105                 bh_result->b_blocknr = 0;
106                 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
107                 if (unlikely(err))
108                         goto out;
109                 err = nilfs_bmap_insert(ii->i_bmap, blkoff,
110                                         (unsigned long)bh_result);
111                 if (unlikely(err != 0)) {
112                         if (err == -EEXIST) {
113                                 /*
114                                  * The get_block() function could be called
115                                  * from multiple callers for an inode.
116                                  * However, the page having this block must
117                                  * be locked in this case.
118                                  */
119                                 printk(KERN_WARNING
120                                        "nilfs_get_block: a race condition "
121                                        "while inserting a data block. "
122                                        "(inode number=%lu, file block "
123                                        "offset=%llu)\n",
124                                        inode->i_ino,
125                                        (unsigned long long)blkoff);
126                                 err = 0;
127                         }
128                         nilfs_transaction_abort(inode->i_sb);
129                         goto out;
130                 }
131                 nilfs_mark_inode_dirty_sync(inode);
132                 nilfs_transaction_commit(inode->i_sb); /* never fails */
133                 /* Error handling should be detailed */
134                 set_buffer_new(bh_result);
135                 set_buffer_delay(bh_result);
136                 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
137                                                       to proper value */
138         } else if (ret == -ENOENT) {
139                 /* not found is not error (e.g. hole); must return without
140                    the mapped state flag. */
141                 ;
142         } else {
143                 err = ret;
144         }
145
146  out:
147         return err;
148 }
149
150 /**
151  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
152  * address_space_operations.
153  * @file - file struct of the file to be read
154  * @page - the page to be read
155  */
156 static int nilfs_readpage(struct file *file, struct page *page)
157 {
158         return mpage_readpage(page, nilfs_get_block);
159 }
160
161 /**
162  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
163  * address_space_operations.
164  * @file - file struct of the file to be read
165  * @mapping - address_space struct used for reading multiple pages
166  * @pages - the pages to be read
167  * @nr_pages - number of pages to be read
168  */
169 static int nilfs_readpages(struct file *file, struct address_space *mapping,
170                            struct list_head *pages, unsigned nr_pages)
171 {
172         return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
173 }
174
175 static int nilfs_writepages(struct address_space *mapping,
176                             struct writeback_control *wbc)
177 {
178         struct inode *inode = mapping->host;
179         int err = 0;
180
181         if (inode->i_sb->s_flags & MS_RDONLY) {
182                 nilfs_clear_dirty_pages(mapping, false);
183                 return -EROFS;
184         }
185
186         if (wbc->sync_mode == WB_SYNC_ALL)
187                 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
188                                                     wbc->range_start,
189                                                     wbc->range_end);
190         return err;
191 }
192
193 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
194 {
195         struct inode *inode = page->mapping->host;
196         int err;
197
198         if (inode->i_sb->s_flags & MS_RDONLY) {
199                 /*
200                  * It means that filesystem was remounted in read-only
201                  * mode because of error or metadata corruption. But we
202                  * have dirty pages that try to be flushed in background.
203                  * So, here we simply discard this dirty page.
204                  */
205                 nilfs_clear_dirty_page(page, false);
206                 unlock_page(page);
207                 return -EROFS;
208         }
209
210         redirty_page_for_writepage(wbc, page);
211         unlock_page(page);
212
213         if (wbc->sync_mode == WB_SYNC_ALL) {
214                 err = nilfs_construct_segment(inode->i_sb);
215                 if (unlikely(err))
216                         return err;
217         } else if (wbc->for_reclaim)
218                 nilfs_flush_segment(inode->i_sb, inode->i_ino);
219
220         return 0;
221 }
222
223 static int nilfs_set_page_dirty(struct page *page)
224 {
225         struct inode *inode = page->mapping->host;
226         int ret = __set_page_dirty_nobuffers(page);
227
228         if (page_has_buffers(page)) {
229                 unsigned nr_dirty = 0;
230                 struct buffer_head *bh, *head;
231
232                 /*
233                  * This page is locked by callers, and no other thread
234                  * concurrently marks its buffers dirty since they are
235                  * only dirtied through routines in fs/buffer.c in
236                  * which call sites of mark_buffer_dirty are protected
237                  * by page lock.
238                  */
239                 bh = head = page_buffers(page);
240                 do {
241                         /* Do not mark hole blocks dirty */
242                         if (buffer_dirty(bh) || !buffer_mapped(bh))
243                                 continue;
244
245                         set_buffer_dirty(bh);
246                         nr_dirty++;
247                 } while (bh = bh->b_this_page, bh != head);
248
249                 if (nr_dirty)
250                         nilfs_set_file_dirty(inode, nr_dirty);
251         } else if (ret) {
252                 unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
253
254                 nilfs_set_file_dirty(inode, nr_dirty);
255         }
256         return ret;
257 }
258
259 void nilfs_write_failed(struct address_space *mapping, loff_t to)
260 {
261         struct inode *inode = mapping->host;
262
263         if (to > inode->i_size) {
264                 truncate_pagecache(inode, inode->i_size);
265                 nilfs_truncate(inode);
266         }
267 }
268
269 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
270                              loff_t pos, unsigned len, unsigned flags,
271                              struct page **pagep, void **fsdata)
272
273 {
274         struct inode *inode = mapping->host;
275         int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
276
277         if (unlikely(err))
278                 return err;
279
280         err = block_write_begin(mapping, pos, len, flags, pagep,
281                                 nilfs_get_block);
282         if (unlikely(err)) {
283                 nilfs_write_failed(mapping, pos + len);
284                 nilfs_transaction_abort(inode->i_sb);
285         }
286         return err;
287 }
288
289 static int nilfs_write_end(struct file *file, struct address_space *mapping,
290                            loff_t pos, unsigned len, unsigned copied,
291                            struct page *page, void *fsdata)
292 {
293         struct inode *inode = mapping->host;
294         unsigned start = pos & (PAGE_CACHE_SIZE - 1);
295         unsigned nr_dirty;
296         int err;
297
298         nr_dirty = nilfs_page_count_clean_buffers(page, start,
299                                                   start + copied);
300         copied = generic_write_end(file, mapping, pos, len, copied, page,
301                                    fsdata);
302         nilfs_set_file_dirty(inode, nr_dirty);
303         err = nilfs_transaction_commit(inode->i_sb);
304         return err ? : copied;
305 }
306
307 static ssize_t
308 nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
309 {
310         struct file *file = iocb->ki_filp;
311         struct address_space *mapping = file->f_mapping;
312         struct inode *inode = file->f_mapping->host;
313         size_t count = iov_iter_count(iter);
314         ssize_t size;
315
316         if (iov_iter_rw(iter) == WRITE)
317                 return 0;
318
319         /* Needs synchronization with the cleaner */
320         size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
321
322         /*
323          * In case of error extending write may have instantiated a few
324          * blocks outside i_size. Trim these off again.
325          */
326         if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) {
327                 loff_t isize = i_size_read(inode);
328                 loff_t end = offset + count;
329
330                 if (end > isize)
331                         nilfs_write_failed(mapping, end);
332         }
333
334         return size;
335 }
336
337 const struct address_space_operations nilfs_aops = {
338         .writepage              = nilfs_writepage,
339         .readpage               = nilfs_readpage,
340         .writepages             = nilfs_writepages,
341         .set_page_dirty         = nilfs_set_page_dirty,
342         .readpages              = nilfs_readpages,
343         .write_begin            = nilfs_write_begin,
344         .write_end              = nilfs_write_end,
345         /* .releasepage         = nilfs_releasepage, */
346         .invalidatepage         = block_invalidatepage,
347         .direct_IO              = nilfs_direct_IO,
348         .is_partially_uptodate  = block_is_partially_uptodate,
349 };
350
351 static int nilfs_insert_inode_locked(struct inode *inode,
352                                      struct nilfs_root *root,
353                                      unsigned long ino)
354 {
355         struct nilfs_iget_args args = {
356                 .ino = ino, .root = root, .cno = 0, .for_gc = 0
357         };
358
359         return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
360 }
361
362 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
363 {
364         struct super_block *sb = dir->i_sb;
365         struct the_nilfs *nilfs = sb->s_fs_info;
366         struct inode *inode;
367         struct nilfs_inode_info *ii;
368         struct nilfs_root *root;
369         int err = -ENOMEM;
370         ino_t ino;
371
372         inode = new_inode(sb);
373         if (unlikely(!inode))
374                 goto failed;
375
376         mapping_set_gfp_mask(inode->i_mapping,
377                              mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
378
379         root = NILFS_I(dir)->i_root;
380         ii = NILFS_I(inode);
381         ii->i_state = 1 << NILFS_I_NEW;
382         ii->i_root = root;
383
384         err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
385         if (unlikely(err))
386                 goto failed_ifile_create_inode;
387         /* reference count of i_bh inherits from nilfs_mdt_read_block() */
388
389         atomic64_inc(&root->inodes_count);
390         inode_init_owner(inode, dir, mode);
391         inode->i_ino = ino;
392         inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
393
394         if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
395                 err = nilfs_bmap_read(ii->i_bmap, NULL);
396                 if (err < 0)
397                         goto failed_after_creation;
398
399                 set_bit(NILFS_I_BMAP, &ii->i_state);
400                 /* No lock is needed; iget() ensures it. */
401         }
402
403         ii->i_flags = nilfs_mask_flags(
404                 mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
405
406         /* ii->i_file_acl = 0; */
407         /* ii->i_dir_acl = 0; */
408         ii->i_dir_start_lookup = 0;
409         nilfs_set_inode_flags(inode);
410         spin_lock(&nilfs->ns_next_gen_lock);
411         inode->i_generation = nilfs->ns_next_generation++;
412         spin_unlock(&nilfs->ns_next_gen_lock);
413         if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
414                 err = -EIO;
415                 goto failed_after_creation;
416         }
417
418         err = nilfs_init_acl(inode, dir);
419         if (unlikely(err))
420                 goto failed_after_creation; /* never occur. When supporting
421                                     nilfs_init_acl(), proper cancellation of
422                                     above jobs should be considered */
423
424         return inode;
425
426  failed_after_creation:
427         clear_nlink(inode);
428         unlock_new_inode(inode);
429         iput(inode);  /* raw_inode will be deleted through
430                          nilfs_evict_inode() */
431         goto failed;
432
433  failed_ifile_create_inode:
434         make_bad_inode(inode);
435         iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
436                          called */
437  failed:
438         return ERR_PTR(err);
439 }
440
441 void nilfs_set_inode_flags(struct inode *inode)
442 {
443         unsigned int flags = NILFS_I(inode)->i_flags;
444         unsigned int new_fl = 0;
445
446         if (flags & FS_SYNC_FL)
447                 new_fl |= S_SYNC;
448         if (flags & FS_APPEND_FL)
449                 new_fl |= S_APPEND;
450         if (flags & FS_IMMUTABLE_FL)
451                 new_fl |= S_IMMUTABLE;
452         if (flags & FS_NOATIME_FL)
453                 new_fl |= S_NOATIME;
454         if (flags & FS_DIRSYNC_FL)
455                 new_fl |= S_DIRSYNC;
456         inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
457                         S_NOATIME | S_DIRSYNC);
458 }
459
460 int nilfs_read_inode_common(struct inode *inode,
461                             struct nilfs_inode *raw_inode)
462 {
463         struct nilfs_inode_info *ii = NILFS_I(inode);
464         int err;
465
466         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
467         i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
468         i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
469         set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
470         inode->i_size = le64_to_cpu(raw_inode->i_size);
471         inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
472         inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
473         inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
474         inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
475         inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
476         inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
477         if (inode->i_nlink == 0)
478                 return -ESTALE; /* this inode is deleted */
479
480         inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
481         ii->i_flags = le32_to_cpu(raw_inode->i_flags);
482 #if 0
483         ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
484         ii->i_dir_acl = S_ISREG(inode->i_mode) ?
485                 0 : le32_to_cpu(raw_inode->i_dir_acl);
486 #endif
487         ii->i_dir_start_lookup = 0;
488         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
489
490         if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
491             S_ISLNK(inode->i_mode)) {
492                 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
493                 if (err < 0)
494                         return err;
495                 set_bit(NILFS_I_BMAP, &ii->i_state);
496                 /* No lock is needed; iget() ensures it. */
497         }
498         return 0;
499 }
500
501 static int __nilfs_read_inode(struct super_block *sb,
502                               struct nilfs_root *root, unsigned long ino,
503                               struct inode *inode)
504 {
505         struct the_nilfs *nilfs = sb->s_fs_info;
506         struct buffer_head *bh;
507         struct nilfs_inode *raw_inode;
508         int err;
509
510         down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
511         err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
512         if (unlikely(err))
513                 goto bad_inode;
514
515         raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
516
517         err = nilfs_read_inode_common(inode, raw_inode);
518         if (err)
519                 goto failed_unmap;
520
521         if (S_ISREG(inode->i_mode)) {
522                 inode->i_op = &nilfs_file_inode_operations;
523                 inode->i_fop = &nilfs_file_operations;
524                 inode->i_mapping->a_ops = &nilfs_aops;
525         } else if (S_ISDIR(inode->i_mode)) {
526                 inode->i_op = &nilfs_dir_inode_operations;
527                 inode->i_fop = &nilfs_dir_operations;
528                 inode->i_mapping->a_ops = &nilfs_aops;
529         } else if (S_ISLNK(inode->i_mode)) {
530                 inode->i_op = &nilfs_symlink_inode_operations;
531                 inode->i_mapping->a_ops = &nilfs_aops;
532         } else {
533                 inode->i_op = &nilfs_special_inode_operations;
534                 init_special_inode(
535                         inode, inode->i_mode,
536                         huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
537         }
538         nilfs_ifile_unmap_inode(root->ifile, ino, bh);
539         brelse(bh);
540         up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
541         nilfs_set_inode_flags(inode);
542         mapping_set_gfp_mask(inode->i_mapping,
543                              mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
544         return 0;
545
546  failed_unmap:
547         nilfs_ifile_unmap_inode(root->ifile, ino, bh);
548         brelse(bh);
549
550  bad_inode:
551         up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
552         return err;
553 }
554
555 static int nilfs_iget_test(struct inode *inode, void *opaque)
556 {
557         struct nilfs_iget_args *args = opaque;
558         struct nilfs_inode_info *ii;
559
560         if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
561                 return 0;
562
563         ii = NILFS_I(inode);
564         if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
565                 return !args->for_gc;
566
567         return args->for_gc && args->cno == ii->i_cno;
568 }
569
570 static int nilfs_iget_set(struct inode *inode, void *opaque)
571 {
572         struct nilfs_iget_args *args = opaque;
573
574         inode->i_ino = args->ino;
575         if (args->for_gc) {
576                 NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
577                 NILFS_I(inode)->i_cno = args->cno;
578                 NILFS_I(inode)->i_root = NULL;
579         } else {
580                 if (args->root && args->ino == NILFS_ROOT_INO)
581                         nilfs_get_root(args->root);
582                 NILFS_I(inode)->i_root = args->root;
583         }
584         return 0;
585 }
586
587 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
588                             unsigned long ino)
589 {
590         struct nilfs_iget_args args = {
591                 .ino = ino, .root = root, .cno = 0, .for_gc = 0
592         };
593
594         return ilookup5(sb, ino, nilfs_iget_test, &args);
595 }
596
597 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
598                                 unsigned long ino)
599 {
600         struct nilfs_iget_args args = {
601                 .ino = ino, .root = root, .cno = 0, .for_gc = 0
602         };
603
604         return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
605 }
606
607 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
608                          unsigned long ino)
609 {
610         struct inode *inode;
611         int err;
612
613         inode = nilfs_iget_locked(sb, root, ino);
614         if (unlikely(!inode))
615                 return ERR_PTR(-ENOMEM);
616         if (!(inode->i_state & I_NEW))
617                 return inode;
618
619         err = __nilfs_read_inode(sb, root, ino, inode);
620         if (unlikely(err)) {
621                 iget_failed(inode);
622                 return ERR_PTR(err);
623         }
624         unlock_new_inode(inode);
625         return inode;
626 }
627
628 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
629                                 __u64 cno)
630 {
631         struct nilfs_iget_args args = {
632                 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
633         };
634         struct inode *inode;
635         int err;
636
637         inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
638         if (unlikely(!inode))
639                 return ERR_PTR(-ENOMEM);
640         if (!(inode->i_state & I_NEW))
641                 return inode;
642
643         err = nilfs_init_gcinode(inode);
644         if (unlikely(err)) {
645                 iget_failed(inode);
646                 return ERR_PTR(err);
647         }
648         unlock_new_inode(inode);
649         return inode;
650 }
651
652 void nilfs_write_inode_common(struct inode *inode,
653                               struct nilfs_inode *raw_inode, int has_bmap)
654 {
655         struct nilfs_inode_info *ii = NILFS_I(inode);
656
657         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
658         raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
659         raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
660         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
661         raw_inode->i_size = cpu_to_le64(inode->i_size);
662         raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
663         raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
664         raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
665         raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
666         raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
667
668         raw_inode->i_flags = cpu_to_le32(ii->i_flags);
669         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
670
671         if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
672                 struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
673
674                 /* zero-fill unused portion in the case of super root block */
675                 raw_inode->i_xattr = 0;
676                 raw_inode->i_pad = 0;
677                 memset((void *)raw_inode + sizeof(*raw_inode), 0,
678                        nilfs->ns_inode_size - sizeof(*raw_inode));
679         }
680
681         if (has_bmap)
682                 nilfs_bmap_write(ii->i_bmap, raw_inode);
683         else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
684                 raw_inode->i_device_code =
685                         cpu_to_le64(huge_encode_dev(inode->i_rdev));
686         /* When extending inode, nilfs->ns_inode_size should be checked
687            for substitutions of appended fields */
688 }
689
690 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
691 {
692         ino_t ino = inode->i_ino;
693         struct nilfs_inode_info *ii = NILFS_I(inode);
694         struct inode *ifile = ii->i_root->ifile;
695         struct nilfs_inode *raw_inode;
696
697         raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
698
699         if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
700                 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
701         if (flags & I_DIRTY_DATASYNC)
702                 set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
703
704         nilfs_write_inode_common(inode, raw_inode, 0);
705                 /* XXX: call with has_bmap = 0 is a workaround to avoid
706                    deadlock of bmap. This delays update of i_bmap to just
707                    before writing */
708         nilfs_ifile_unmap_inode(ifile, ino, ibh);
709 }
710
711 #define NILFS_MAX_TRUNCATE_BLOCKS       16384  /* 64MB for 4KB block */
712
713 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
714                                 unsigned long from)
715 {
716         __u64 b;
717         int ret;
718
719         if (!test_bit(NILFS_I_BMAP, &ii->i_state))
720                 return;
721 repeat:
722         ret = nilfs_bmap_last_key(ii->i_bmap, &b);
723         if (ret == -ENOENT)
724                 return;
725         else if (ret < 0)
726                 goto failed;
727
728         if (b < from)
729                 return;
730
731         b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
732         ret = nilfs_bmap_truncate(ii->i_bmap, b);
733         nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
734         if (!ret || (ret == -ENOMEM &&
735                      nilfs_bmap_truncate(ii->i_bmap, b) == 0))
736                 goto repeat;
737
738 failed:
739         nilfs_warning(ii->vfs_inode.i_sb, __func__,
740                       "failed to truncate bmap (ino=%lu, err=%d)",
741                       ii->vfs_inode.i_ino, ret);
742 }
743
744 void nilfs_truncate(struct inode *inode)
745 {
746         unsigned long blkoff;
747         unsigned int blocksize;
748         struct nilfs_transaction_info ti;
749         struct super_block *sb = inode->i_sb;
750         struct nilfs_inode_info *ii = NILFS_I(inode);
751
752         if (!test_bit(NILFS_I_BMAP, &ii->i_state))
753                 return;
754         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
755                 return;
756
757         blocksize = sb->s_blocksize;
758         blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
759         nilfs_transaction_begin(sb, &ti, 0); /* never fails */
760
761         block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
762
763         nilfs_truncate_bmap(ii, blkoff);
764
765         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
766         if (IS_SYNC(inode))
767                 nilfs_set_transaction_flag(NILFS_TI_SYNC);
768
769         nilfs_mark_inode_dirty(inode);
770         nilfs_set_file_dirty(inode, 0);
771         nilfs_transaction_commit(sb);
772         /* May construct a logical segment and may fail in sync mode.
773            But truncate has no return value. */
774 }
775
776 static void nilfs_clear_inode(struct inode *inode)
777 {
778         struct nilfs_inode_info *ii = NILFS_I(inode);
779         struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
780
781         /*
782          * Free resources allocated in nilfs_read_inode(), here.
783          */
784         BUG_ON(!list_empty(&ii->i_dirty));
785         brelse(ii->i_bh);
786         ii->i_bh = NULL;
787
788         if (mdi && mdi->mi_palloc_cache)
789                 nilfs_palloc_destroy_cache(inode);
790
791         if (test_bit(NILFS_I_BMAP, &ii->i_state))
792                 nilfs_bmap_clear(ii->i_bmap);
793
794         nilfs_btnode_cache_clear(&ii->i_btnode_cache);
795
796         if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
797                 nilfs_put_root(ii->i_root);
798 }
799
800 void nilfs_evict_inode(struct inode *inode)
801 {
802         struct nilfs_transaction_info ti;
803         struct super_block *sb = inode->i_sb;
804         struct nilfs_inode_info *ii = NILFS_I(inode);
805         int ret;
806
807         if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
808                 truncate_inode_pages_final(&inode->i_data);
809                 clear_inode(inode);
810                 nilfs_clear_inode(inode);
811                 return;
812         }
813         nilfs_transaction_begin(sb, &ti, 0); /* never fails */
814
815         truncate_inode_pages_final(&inode->i_data);
816
817         /* TODO: some of the following operations may fail.  */
818         nilfs_truncate_bmap(ii, 0);
819         nilfs_mark_inode_dirty(inode);
820         clear_inode(inode);
821
822         ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
823         if (!ret)
824                 atomic64_dec(&ii->i_root->inodes_count);
825
826         nilfs_clear_inode(inode);
827
828         if (IS_SYNC(inode))
829                 nilfs_set_transaction_flag(NILFS_TI_SYNC);
830         nilfs_transaction_commit(sb);
831         /* May construct a logical segment and may fail in sync mode.
832            But delete_inode has no return value. */
833 }
834
835 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
836 {
837         struct nilfs_transaction_info ti;
838         struct inode *inode = d_inode(dentry);
839         struct super_block *sb = inode->i_sb;
840         int err;
841
842         err = inode_change_ok(inode, iattr);
843         if (err)
844                 return err;
845
846         err = nilfs_transaction_begin(sb, &ti, 0);
847         if (unlikely(err))
848                 return err;
849
850         if ((iattr->ia_valid & ATTR_SIZE) &&
851             iattr->ia_size != i_size_read(inode)) {
852                 inode_dio_wait(inode);
853                 truncate_setsize(inode, iattr->ia_size);
854                 nilfs_truncate(inode);
855         }
856
857         setattr_copy(inode, iattr);
858         mark_inode_dirty(inode);
859
860         if (iattr->ia_valid & ATTR_MODE) {
861                 err = nilfs_acl_chmod(inode);
862                 if (unlikely(err))
863                         goto out_err;
864         }
865
866         return nilfs_transaction_commit(sb);
867
868 out_err:
869         nilfs_transaction_abort(sb);
870         return err;
871 }
872
873 int nilfs_permission(struct inode *inode, int mask)
874 {
875         struct nilfs_root *root = NILFS_I(inode)->i_root;
876         if ((mask & MAY_WRITE) && root &&
877             root->cno != NILFS_CPTREE_CURRENT_CNO)
878                 return -EROFS; /* snapshot is not writable */
879
880         return generic_permission(inode, mask);
881 }
882
883 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
884 {
885         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
886         struct nilfs_inode_info *ii = NILFS_I(inode);
887         int err;
888
889         spin_lock(&nilfs->ns_inode_lock);
890         if (ii->i_bh == NULL) {
891                 spin_unlock(&nilfs->ns_inode_lock);
892                 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
893                                                   inode->i_ino, pbh);
894                 if (unlikely(err))
895                         return err;
896                 spin_lock(&nilfs->ns_inode_lock);
897                 if (ii->i_bh == NULL)
898                         ii->i_bh = *pbh;
899                 else {
900                         brelse(*pbh);
901                         *pbh = ii->i_bh;
902                 }
903         } else
904                 *pbh = ii->i_bh;
905
906         get_bh(*pbh);
907         spin_unlock(&nilfs->ns_inode_lock);
908         return 0;
909 }
910
911 int nilfs_inode_dirty(struct inode *inode)
912 {
913         struct nilfs_inode_info *ii = NILFS_I(inode);
914         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
915         int ret = 0;
916
917         if (!list_empty(&ii->i_dirty)) {
918                 spin_lock(&nilfs->ns_inode_lock);
919                 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
920                         test_bit(NILFS_I_BUSY, &ii->i_state);
921                 spin_unlock(&nilfs->ns_inode_lock);
922         }
923         return ret;
924 }
925
926 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
927 {
928         struct nilfs_inode_info *ii = NILFS_I(inode);
929         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
930
931         atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
932
933         if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
934                 return 0;
935
936         spin_lock(&nilfs->ns_inode_lock);
937         if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
938             !test_bit(NILFS_I_BUSY, &ii->i_state)) {
939                 /* Because this routine may race with nilfs_dispose_list(),
940                    we have to check NILFS_I_QUEUED here, too. */
941                 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
942                         /* This will happen when somebody is freeing
943                            this inode. */
944                         nilfs_warning(inode->i_sb, __func__,
945                                       "cannot get inode (ino=%lu)\n",
946                                       inode->i_ino);
947                         spin_unlock(&nilfs->ns_inode_lock);
948                         return -EINVAL; /* NILFS_I_DIRTY may remain for
949                                            freeing inode */
950                 }
951                 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
952                 set_bit(NILFS_I_QUEUED, &ii->i_state);
953         }
954         spin_unlock(&nilfs->ns_inode_lock);
955         return 0;
956 }
957
958 int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
959 {
960         struct buffer_head *ibh;
961         int err;
962
963         err = nilfs_load_inode_block(inode, &ibh);
964         if (unlikely(err)) {
965                 nilfs_warning(inode->i_sb, __func__,
966                               "failed to reget inode block.\n");
967                 return err;
968         }
969         nilfs_update_inode(inode, ibh, flags);
970         mark_buffer_dirty(ibh);
971         nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
972         brelse(ibh);
973         return 0;
974 }
975
976 /**
977  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
978  * @inode: inode of the file to be registered.
979  *
980  * nilfs_dirty_inode() loads a inode block containing the specified
981  * @inode and copies data from a nilfs_inode to a corresponding inode
982  * entry in the inode block. This operation is excluded from the segment
983  * construction. This function can be called both as a single operation
984  * and as a part of indivisible file operations.
985  */
986 void nilfs_dirty_inode(struct inode *inode, int flags)
987 {
988         struct nilfs_transaction_info ti;
989         struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
990
991         if (is_bad_inode(inode)) {
992                 nilfs_warning(inode->i_sb, __func__,
993                               "tried to mark bad_inode dirty. ignored.\n");
994                 dump_stack();
995                 return;
996         }
997         if (mdi) {
998                 nilfs_mdt_mark_dirty(inode);
999                 return;
1000         }
1001         nilfs_transaction_begin(inode->i_sb, &ti, 0);
1002         __nilfs_mark_inode_dirty(inode, flags);
1003         nilfs_transaction_commit(inode->i_sb); /* never fails */
1004 }
1005
1006 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1007                  __u64 start, __u64 len)
1008 {
1009         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
1010         __u64 logical = 0, phys = 0, size = 0;
1011         __u32 flags = 0;
1012         loff_t isize;
1013         sector_t blkoff, end_blkoff;
1014         sector_t delalloc_blkoff;
1015         unsigned long delalloc_blklen;
1016         unsigned int blkbits = inode->i_blkbits;
1017         int ret, n;
1018
1019         ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
1020         if (ret)
1021                 return ret;
1022
1023         mutex_lock(&inode->i_mutex);
1024
1025         isize = i_size_read(inode);
1026
1027         blkoff = start >> blkbits;
1028         end_blkoff = (start + len - 1) >> blkbits;
1029
1030         delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
1031                                                         &delalloc_blkoff);
1032
1033         do {
1034                 __u64 blkphy;
1035                 unsigned int maxblocks;
1036
1037                 if (delalloc_blklen && blkoff == delalloc_blkoff) {
1038                         if (size) {
1039                                 /* End of the current extent */
1040                                 ret = fiemap_fill_next_extent(
1041                                         fieinfo, logical, phys, size, flags);
1042                                 if (ret)
1043                                         break;
1044                         }
1045                         if (blkoff > end_blkoff)
1046                                 break;
1047
1048                         flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
1049                         logical = blkoff << blkbits;
1050                         phys = 0;
1051                         size = delalloc_blklen << blkbits;
1052
1053                         blkoff = delalloc_blkoff + delalloc_blklen;
1054                         delalloc_blklen = nilfs_find_uncommitted_extent(
1055                                 inode, blkoff, &delalloc_blkoff);
1056                         continue;
1057                 }
1058
1059                 /*
1060                  * Limit the number of blocks that we look up so as
1061                  * not to get into the next delayed allocation extent.
1062                  */
1063                 maxblocks = INT_MAX;
1064                 if (delalloc_blklen)
1065                         maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1066                                           maxblocks);
1067                 blkphy = 0;
1068
1069                 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1070                 n = nilfs_bmap_lookup_contig(
1071                         NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1072                 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1073
1074                 if (n < 0) {
1075                         int past_eof;
1076
1077                         if (unlikely(n != -ENOENT))
1078                                 break; /* error */
1079
1080                         /* HOLE */
1081                         blkoff++;
1082                         past_eof = ((blkoff << blkbits) >= isize);
1083
1084                         if (size) {
1085                                 /* End of the current extent */
1086
1087                                 if (past_eof)
1088                                         flags |= FIEMAP_EXTENT_LAST;
1089
1090                                 ret = fiemap_fill_next_extent(
1091                                         fieinfo, logical, phys, size, flags);
1092                                 if (ret)
1093                                         break;
1094                                 size = 0;
1095                         }
1096                         if (blkoff > end_blkoff || past_eof)
1097                                 break;
1098                 } else {
1099                         if (size) {
1100                                 if (phys && blkphy << blkbits == phys + size) {
1101                                         /* The current extent goes on */
1102                                         size += n << blkbits;
1103                                 } else {
1104                                         /* Terminate the current extent */
1105                                         ret = fiemap_fill_next_extent(
1106                                                 fieinfo, logical, phys, size,
1107                                                 flags);
1108                                         if (ret || blkoff > end_blkoff)
1109                                                 break;
1110
1111                                         /* Start another extent */
1112                                         flags = FIEMAP_EXTENT_MERGED;
1113                                         logical = blkoff << blkbits;
1114                                         phys = blkphy << blkbits;
1115                                         size = n << blkbits;
1116                                 }
1117                         } else {
1118                                 /* Start a new extent */
1119                                 flags = FIEMAP_EXTENT_MERGED;
1120                                 logical = blkoff << blkbits;
1121                                 phys = blkphy << blkbits;
1122                                 size = n << blkbits;
1123                         }
1124                         blkoff += n;
1125                 }
1126                 cond_resched();
1127         } while (true);
1128
1129         /* If ret is 1 then we just hit the end of the extent array */
1130         if (ret == 1)
1131                 ret = 0;
1132
1133         mutex_unlock(&inode->i_mutex);
1134         return ret;
1135 }