2 * aops.c - NTFS kernel address space operations and page cache handling.
4 * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
5 * Copyright (c) 2002 Richard Russon
7 * This program/include file is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program/include file is distributed in the hope that it will be
13 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program (in the main directory of the Linux-NTFS
19 * distribution in the file COPYING); if not, write to the Free Software
20 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/errno.h>
25 #include <linux/gfp.h>
27 #include <linux/pagemap.h>
28 #include <linux/swap.h>
29 #include <linux/buffer_head.h>
30 #include <linux/writeback.h>
31 #include <linux/bit_spinlock.h>
43 * ntfs_end_buffer_async_read - async io completion for reading attributes
44 * @bh: buffer head on which io is completed
45 * @uptodate: whether @bh is now uptodate or not
47 * Asynchronous I/O completion handler for reading pages belonging to the
48 * attribute address space of an inode. The inodes can either be files or
49 * directories or they can be fake inodes describing some attribute.
51 * If NInoMstProtected(), perform the post read mst fixups when all IO on the
52 * page has been completed and mark the page uptodate or set the error bit on
53 * the page. To determine the size of the records that need fixing up, we
54 * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
55 * record size, and index_block_size_bits, to the log(base 2) of the ntfs
58 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
61 struct buffer_head *first, *tmp;
65 int page_uptodate = 1;
68 vi = page->mapping->host;
71 if (likely(uptodate)) {
73 s64 file_ofs, init_size;
75 set_buffer_uptodate(bh);
77 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
79 read_lock_irqsave(&ni->size_lock, flags);
80 init_size = ni->initialized_size;
81 i_size = i_size_read(vi);
82 read_unlock_irqrestore(&ni->size_lock, flags);
83 if (unlikely(init_size > i_size)) {
84 /* Race with shrinking truncate. */
87 /* Check for the current buffer head overflowing. */
88 if (unlikely(file_ofs + bh->b_size > init_size)) {
93 if (file_ofs < init_size)
94 ofs = init_size - file_ofs;
95 local_irq_save(flags);
96 kaddr = kmap_atomic(page);
97 memset(kaddr + bh_offset(bh) + ofs, 0,
99 flush_dcache_page(page);
100 kunmap_atomic(kaddr);
101 local_irq_restore(flags);
104 clear_buffer_uptodate(bh);
106 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
107 "0x%llx.", (unsigned long long)bh->b_blocknr);
109 first = page_buffers(page);
110 flags = bh_uptodate_lock_irqsave(first);
111 clear_buffer_async_read(bh);
115 if (!buffer_uptodate(tmp))
117 if (buffer_async_read(tmp)) {
118 if (likely(buffer_locked(tmp)))
120 /* Async buffers must be locked. */
123 tmp = tmp->b_this_page;
125 bh_uptodate_unlock_irqrestore(first, flags);
127 * If none of the buffers had errors then we can set the page uptodate,
128 * but we first have to perform the post read mst fixups, if the
129 * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
130 * Note we ignore fixup errors as those are detected when
131 * map_mft_record() is called which gives us per record granularity
132 * rather than per page granularity.
134 if (!NInoMstProtected(ni)) {
135 if (likely(page_uptodate && !PageError(page)))
136 SetPageUptodate(page);
139 unsigned int i, recs;
142 rec_size = ni->itype.index.block_size;
143 recs = PAGE_CACHE_SIZE / rec_size;
144 /* Should have been verified before we got here... */
146 local_irq_save_nort(flags);
147 kaddr = kmap_atomic(page);
148 for (i = 0; i < recs; i++)
149 post_read_mst_fixup((NTFS_RECORD*)(kaddr +
150 i * rec_size), rec_size);
151 kunmap_atomic(kaddr);
152 local_irq_restore_nort(flags);
153 flush_dcache_page(page);
154 if (likely(page_uptodate && !PageError(page)))
155 SetPageUptodate(page);
160 bh_uptodate_unlock_irqrestore(first, flags);
164 * ntfs_read_block - fill a @page of an address space with data
165 * @page: page cache page to fill with data
167 * Fill the page @page of the address space belonging to the @page->host inode.
168 * We read each buffer asynchronously and when all buffers are read in, our io
169 * completion handler ntfs_end_buffer_read_async(), if required, automatically
170 * applies the mst fixups to the page before finally marking it uptodate and
173 * We only enforce allocated_size limit because i_size is checked for in
174 * generic_file_read().
176 * Return 0 on success and -errno on error.
178 * Contains an adapted version of fs/buffer.c::block_read_full_page().
180 static int ntfs_read_block(struct page *page)
190 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
191 sector_t iblock, lblock, zblock;
193 unsigned int blocksize, vcn_ofs;
195 unsigned char blocksize_bits;
197 vi = page->mapping->host;
201 /* $MFT/$DATA must have its complete runlist in memory at all times. */
202 BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
204 blocksize = vol->sb->s_blocksize;
205 blocksize_bits = vol->sb->s_blocksize_bits;
207 if (!page_has_buffers(page)) {
208 create_empty_buffers(page, blocksize, 0);
209 if (unlikely(!page_has_buffers(page))) {
214 bh = head = page_buffers(page);
218 * We may be racing with truncate. To avoid some of the problems we
219 * now take a snapshot of the various sizes and use those for the whole
220 * of the function. In case of an extending truncate it just means we
221 * may leave some buffers unmapped which are now allocated. This is
222 * not a problem since these buffers will just get mapped when a write
223 * occurs. In case of a shrinking truncate, we will detect this later
224 * on due to the runlist being incomplete and if the page is being
225 * fully truncated, truncate will throw it away as soon as we unlock
226 * it so no need to worry what we do with it.
228 iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
229 read_lock_irqsave(&ni->size_lock, flags);
230 lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
231 init_size = ni->initialized_size;
232 i_size = i_size_read(vi);
233 read_unlock_irqrestore(&ni->size_lock, flags);
234 if (unlikely(init_size > i_size)) {
235 /* Race with shrinking truncate. */
238 zblock = (init_size + blocksize - 1) >> blocksize_bits;
240 /* Loop through all the buffers in the page. */
246 if (unlikely(buffer_uptodate(bh)))
248 if (unlikely(buffer_mapped(bh))) {
252 bh->b_bdev = vol->sb->s_bdev;
253 /* Is the block within the allowed limits? */
254 if (iblock < lblock) {
255 bool is_retry = false;
257 /* Convert iblock into corresponding vcn and offset. */
258 vcn = (VCN)iblock << blocksize_bits >>
259 vol->cluster_size_bits;
260 vcn_ofs = ((VCN)iblock << blocksize_bits) &
261 vol->cluster_size_mask;
264 down_read(&ni->runlist.lock);
267 if (likely(rl != NULL)) {
268 /* Seek to element containing target vcn. */
269 while (rl->length && rl[1].vcn <= vcn)
271 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
273 lcn = LCN_RL_NOT_MAPPED;
274 /* Successful remap. */
276 /* Setup buffer head to correct block. */
277 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
278 + vcn_ofs) >> blocksize_bits;
279 set_buffer_mapped(bh);
280 /* Only read initialized data blocks. */
281 if (iblock < zblock) {
285 /* Fully non-initialized data block, zero it. */
288 /* It is a hole, need to zero it. */
291 /* If first try and runlist unmapped, map and retry. */
292 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
295 * Attempt to map runlist, dropping lock for
298 up_read(&ni->runlist.lock);
299 err = ntfs_map_runlist(ni, vcn);
301 goto lock_retry_remap;
304 up_read(&ni->runlist.lock);
306 * If buffer is outside the runlist, treat it as a
307 * hole. This can happen due to concurrent truncate
310 if (err == -ENOENT || lcn == LCN_ENOENT) {
314 /* Hard error, zero out region. */
319 ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
320 "attribute type 0x%x, vcn 0x%llx, "
321 "offset 0x%x because its location on "
322 "disk could not be determined%s "
323 "(error code %i).", ni->mft_no,
324 ni->type, (unsigned long long)vcn,
325 vcn_ofs, is_retry ? " even after "
326 "retrying" : "", err);
329 * Either iblock was outside lblock limits or
330 * ntfs_rl_vcn_to_lcn() returned error. Just zero that portion
331 * of the page and set the buffer uptodate.
334 bh->b_blocknr = -1UL;
335 clear_buffer_mapped(bh);
337 zero_user(page, i * blocksize, blocksize);
339 set_buffer_uptodate(bh);
340 } while (i++, iblock++, (bh = bh->b_this_page) != head);
342 /* Release the lock if we took it. */
344 up_read(&ni->runlist.lock);
346 /* Check we have at least one buffer ready for i/o. */
348 struct buffer_head *tbh;
350 /* Lock the buffers. */
351 for (i = 0; i < nr; i++) {
354 tbh->b_end_io = ntfs_end_buffer_async_read;
355 set_buffer_async_read(tbh);
357 /* Finally, start i/o on the buffers. */
358 for (i = 0; i < nr; i++) {
360 if (likely(!buffer_uptodate(tbh)))
361 submit_bh(READ, tbh);
363 ntfs_end_buffer_async_read(tbh, 1);
367 /* No i/o was scheduled on any of the buffers. */
368 if (likely(!PageError(page)))
369 SetPageUptodate(page);
370 else /* Signal synchronous i/o error. */
377 * ntfs_readpage - fill a @page of a @file with data from the device
378 * @file: open file to which the page @page belongs or NULL
379 * @page: page cache page to fill with data
381 * For non-resident attributes, ntfs_readpage() fills the @page of the open
382 * file @file by calling the ntfs version of the generic block_read_full_page()
383 * function, ntfs_read_block(), which in turn creates and reads in the buffers
384 * associated with the page asynchronously.
386 * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
387 * data from the mft record (which at this stage is most likely in memory) and
388 * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
389 * even if the mft record is not cached at this point in time, we need to wait
390 * for it to be read in before we can do the copy.
392 * Return 0 on success and -errno on error.
394 static int ntfs_readpage(struct file *file, struct page *page)
398 ntfs_inode *ni, *base_ni;
400 ntfs_attr_search_ctx *ctx;
407 BUG_ON(!PageLocked(page));
408 vi = page->mapping->host;
409 i_size = i_size_read(vi);
410 /* Is the page fully outside i_size? (truncate in progress) */
411 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
413 zero_user(page, 0, PAGE_CACHE_SIZE);
414 ntfs_debug("Read outside i_size - truncated?");
418 * This can potentially happen because we clear PageUptodate() during
419 * ntfs_writepage() of MstProtected() attributes.
421 if (PageUptodate(page)) {
427 * Only $DATA attributes can be encrypted and only unnamed $DATA
428 * attributes can be compressed. Index root can have the flags set but
429 * this means to create compressed/encrypted files, not that the
430 * attribute is compressed/encrypted. Note we need to check for
431 * AT_INDEX_ALLOCATION since this is the type of both directory and
434 if (ni->type != AT_INDEX_ALLOCATION) {
435 /* If attribute is encrypted, deny access, just like NT4. */
436 if (NInoEncrypted(ni)) {
437 BUG_ON(ni->type != AT_DATA);
441 /* Compressed data streams are handled in compress.c. */
442 if (NInoNonResident(ni) && NInoCompressed(ni)) {
443 BUG_ON(ni->type != AT_DATA);
444 BUG_ON(ni->name_len);
445 return ntfs_read_compressed_block(page);
448 /* NInoNonResident() == NInoIndexAllocPresent() */
449 if (NInoNonResident(ni)) {
450 /* Normal, non-resident data stream. */
451 return ntfs_read_block(page);
454 * Attribute is resident, implying it is not compressed or encrypted.
455 * This also means the attribute is smaller than an mft record and
456 * hence smaller than a page, so can simply zero out any pages with
457 * index above 0. Note the attribute can actually be marked compressed
458 * but if it is resident the actual data is not compressed so we are
459 * ok to ignore the compressed flag here.
461 if (unlikely(page->index > 0)) {
462 zero_user(page, 0, PAGE_CACHE_SIZE);
468 base_ni = ni->ext.base_ntfs_ino;
469 /* Map, pin, and lock the mft record. */
470 mrec = map_mft_record(base_ni);
476 * If a parallel write made the attribute non-resident, drop the mft
477 * record and retry the readpage.
479 if (unlikely(NInoNonResident(ni))) {
480 unmap_mft_record(base_ni);
483 ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
484 if (unlikely(!ctx)) {
488 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
489 CASE_SENSITIVE, 0, NULL, 0, ctx);
491 goto put_unm_err_out;
492 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
493 read_lock_irqsave(&ni->size_lock, flags);
494 if (unlikely(attr_len > ni->initialized_size))
495 attr_len = ni->initialized_size;
496 i_size = i_size_read(vi);
497 read_unlock_irqrestore(&ni->size_lock, flags);
498 if (unlikely(attr_len > i_size)) {
499 /* Race with shrinking truncate. */
502 addr = kmap_atomic(page);
503 /* Copy the data to the page. */
504 memcpy(addr, (u8*)ctx->attr +
505 le16_to_cpu(ctx->attr->data.resident.value_offset),
507 /* Zero the remainder of the page. */
508 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
509 flush_dcache_page(page);
512 ntfs_attr_put_search_ctx(ctx);
514 unmap_mft_record(base_ni);
516 SetPageUptodate(page);
525 * ntfs_write_block - write a @page to the backing store
526 * @page: page cache page to write out
527 * @wbc: writeback control structure
529 * This function is for writing pages belonging to non-resident, non-mst
530 * protected attributes to their backing store.
532 * For a page with buffers, map and write the dirty buffers asynchronously
533 * under page writeback. For a page without buffers, create buffers for the
534 * page, then proceed as above.
536 * If a page doesn't have buffers the page dirty state is definitive. If a page
537 * does have buffers, the page dirty state is just a hint, and the buffer dirty
538 * state is definitive. (A hint which has rules: dirty buffers against a clean
539 * page is illegal. Other combinations are legal and need to be handled. In
540 * particular a dirty page containing clean buffers for example.)
542 * Return 0 on success and -errno on error.
544 * Based on ntfs_read_block() and __block_write_full_page().
546 static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
550 s64 initialized_size;
552 sector_t block, dblock, iblock;
557 struct buffer_head *bh, *head;
559 unsigned int blocksize, vcn_ofs;
561 bool need_end_writeback;
562 unsigned char blocksize_bits;
564 vi = page->mapping->host;
568 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
569 "0x%lx.", ni->mft_no, ni->type, page->index);
571 BUG_ON(!NInoNonResident(ni));
572 BUG_ON(NInoMstProtected(ni));
573 blocksize = vol->sb->s_blocksize;
574 blocksize_bits = vol->sb->s_blocksize_bits;
575 if (!page_has_buffers(page)) {
576 BUG_ON(!PageUptodate(page));
577 create_empty_buffers(page, blocksize,
578 (1 << BH_Uptodate) | (1 << BH_Dirty));
579 if (unlikely(!page_has_buffers(page))) {
580 ntfs_warning(vol->sb, "Error allocating page "
581 "buffers. Redirtying page so we try "
584 * Put the page back on mapping->dirty_pages, but leave
585 * its buffers' dirty state as-is.
587 redirty_page_for_writepage(wbc, page);
592 bh = head = page_buffers(page);
595 /* NOTE: Different naming scheme to ntfs_read_block()! */
597 /* The first block in the page. */
598 block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
600 read_lock_irqsave(&ni->size_lock, flags);
601 i_size = i_size_read(vi);
602 initialized_size = ni->initialized_size;
603 read_unlock_irqrestore(&ni->size_lock, flags);
605 /* The first out of bounds block for the data size. */
606 dblock = (i_size + blocksize - 1) >> blocksize_bits;
608 /* The last (fully or partially) initialized block. */
609 iblock = initialized_size >> blocksize_bits;
612 * Be very careful. We have no exclusion from __set_page_dirty_buffers
613 * here, and the (potentially unmapped) buffers may become dirty at
614 * any time. If a buffer becomes dirty here after we've inspected it
615 * then we just miss that fact, and the page stays dirty.
617 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
618 * handle that here by just cleaning them.
622 * Loop through all the buffers in the page, mapping all the dirty
623 * buffers to disk addresses and handling any aliases from the
624 * underlying block device's mapping.
629 bool is_retry = false;
631 if (unlikely(block >= dblock)) {
633 * Mapped buffers outside i_size will occur, because
634 * this page can be outside i_size when there is a
635 * truncate in progress. The contents of such buffers
636 * were zeroed by ntfs_writepage().
638 * FIXME: What about the small race window where
639 * ntfs_writepage() has not done any clearing because
640 * the page was within i_size but before we get here,
641 * vmtruncate() modifies i_size?
643 clear_buffer_dirty(bh);
644 set_buffer_uptodate(bh);
648 /* Clean buffers are not written out, so no need to map them. */
649 if (!buffer_dirty(bh))
652 /* Make sure we have enough initialized size. */
653 if (unlikely((block >= iblock) &&
654 (initialized_size < i_size))) {
656 * If this page is fully outside initialized size, zero
657 * out all pages between the current initialized size
658 * and the current page. Just use ntfs_readpage() to do
659 * the zeroing transparently.
661 if (block > iblock) {
664 // - read_cache_page()
665 // Again for each page do:
666 // - wait_on_page_locked()
667 // - Check (PageUptodate(page) &&
669 // Update initialized size in the attribute and
671 // Again, for each page do:
672 // __set_page_dirty_buffers();
673 // page_cache_release()
674 // We don't need to wait on the writes.
678 * The current page straddles initialized size. Zero
679 * all non-uptodate buffers and set them uptodate (and
680 * dirty?). Note, there aren't any non-uptodate buffers
681 * if the page is uptodate.
682 * FIXME: For an uptodate page, the buffers may need to
683 * be written out because they were not initialized on
686 if (!PageUptodate(page)) {
688 // Zero any non-uptodate buffers up to i_size.
689 // Set them uptodate and dirty.
692 // Update initialized size in the attribute and in the
693 // inode (up to i_size).
695 // FIXME: This is inefficient. Try to batch the two
696 // size changes to happen in one go.
697 ntfs_error(vol->sb, "Writing beyond initialized size "
698 "is not supported yet. Sorry.");
701 // Do NOT set_buffer_new() BUT DO clear buffer range
702 // outside write request range.
703 // set_buffer_uptodate() on complete buffers as well as
704 // set_buffer_dirty().
707 /* No need to map buffers that are already mapped. */
708 if (buffer_mapped(bh))
711 /* Unmapped, dirty buffer. Need to map it. */
712 bh->b_bdev = vol->sb->s_bdev;
714 /* Convert block into corresponding vcn and offset. */
715 vcn = (VCN)block << blocksize_bits;
716 vcn_ofs = vcn & vol->cluster_size_mask;
717 vcn >>= vol->cluster_size_bits;
720 down_read(&ni->runlist.lock);
723 if (likely(rl != NULL)) {
724 /* Seek to element containing target vcn. */
725 while (rl->length && rl[1].vcn <= vcn)
727 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
729 lcn = LCN_RL_NOT_MAPPED;
730 /* Successful remap. */
732 /* Setup buffer head to point to correct block. */
733 bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
734 vcn_ofs) >> blocksize_bits;
735 set_buffer_mapped(bh);
738 /* It is a hole, need to instantiate it. */
739 if (lcn == LCN_HOLE) {
741 unsigned long *bpos, *bend;
743 /* Check if the buffer is zero. */
744 kaddr = kmap_atomic(page);
745 bpos = (unsigned long *)(kaddr + bh_offset(bh));
746 bend = (unsigned long *)((u8*)bpos + blocksize);
750 } while (likely(++bpos < bend));
751 kunmap_atomic(kaddr);
754 * Buffer is zero and sparse, no need to write
758 clear_buffer_dirty(bh);
761 // TODO: Instantiate the hole.
762 // clear_buffer_new(bh);
763 // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
764 ntfs_error(vol->sb, "Writing into sparse regions is "
765 "not supported yet. Sorry.");
769 /* If first try and runlist unmapped, map and retry. */
770 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
773 * Attempt to map runlist, dropping lock for
776 up_read(&ni->runlist.lock);
777 err = ntfs_map_runlist(ni, vcn);
779 goto lock_retry_remap;
782 up_read(&ni->runlist.lock);
784 * If buffer is outside the runlist, truncate has cut it out
785 * of the runlist. Just clean and clear the buffer and set it
786 * uptodate so it can get discarded by the VM.
788 if (err == -ENOENT || lcn == LCN_ENOENT) {
790 clear_buffer_dirty(bh);
791 zero_user(page, bh_offset(bh), blocksize);
792 set_buffer_uptodate(bh);
796 /* Failed to map the buffer, even after retrying. */
800 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
801 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
802 "because its location on disk could not be "
803 "determined%s (error code %i).", ni->mft_no,
804 ni->type, (unsigned long long)vcn,
805 vcn_ofs, is_retry ? " even after "
806 "retrying" : "", err);
808 } while (block++, (bh = bh->b_this_page) != head);
810 /* Release the lock if we took it. */
812 up_read(&ni->runlist.lock);
814 /* For the error case, need to reset bh to the beginning. */
817 /* Just an optimization, so ->readpage() is not called later. */
818 if (unlikely(!PageUptodate(page))) {
821 if (!buffer_uptodate(bh)) {
826 } while ((bh = bh->b_this_page) != head);
828 SetPageUptodate(page);
831 /* Setup all mapped, dirty buffers for async write i/o. */
833 if (buffer_mapped(bh) && buffer_dirty(bh)) {
835 if (test_clear_buffer_dirty(bh)) {
836 BUG_ON(!buffer_uptodate(bh));
837 mark_buffer_async_write(bh);
840 } else if (unlikely(err)) {
842 * For the error case. The buffer may have been set
843 * dirty during attachment to a dirty page.
846 clear_buffer_dirty(bh);
848 } while ((bh = bh->b_this_page) != head);
851 // TODO: Remove the -EOPNOTSUPP check later on...
852 if (unlikely(err == -EOPNOTSUPP))
854 else if (err == -ENOMEM) {
855 ntfs_warning(vol->sb, "Error allocating memory. "
856 "Redirtying page so we try again "
859 * Put the page back on mapping->dirty_pages, but
860 * leave its buffer's dirty state as-is.
862 redirty_page_for_writepage(wbc, page);
868 BUG_ON(PageWriteback(page));
869 set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
871 /* Submit the prepared buffers for i/o. */
872 need_end_writeback = true;
874 struct buffer_head *next = bh->b_this_page;
875 if (buffer_async_write(bh)) {
876 submit_bh(WRITE, bh);
877 need_end_writeback = false;
880 } while (bh != head);
883 /* If no i/o was started, need to end_page_writeback(). */
884 if (unlikely(need_end_writeback))
885 end_page_writeback(page);
892 * ntfs_write_mst_block - write a @page to the backing store
893 * @page: page cache page to write out
894 * @wbc: writeback control structure
896 * This function is for writing pages belonging to non-resident, mst protected
897 * attributes to their backing store. The only supported attributes are index
898 * allocation and $MFT/$DATA. Both directory inodes and index inodes are
899 * supported for the index allocation case.
901 * The page must remain locked for the duration of the write because we apply
902 * the mst fixups, write, and then undo the fixups, so if we were to unlock the
903 * page before undoing the fixups, any other user of the page will see the
904 * page contents as corrupt.
906 * We clear the page uptodate flag for the duration of the function to ensure
907 * exclusion for the $MFT/$DATA case against someone mapping an mft record we
908 * are about to apply the mst fixups to.
910 * Return 0 on success and -errno on error.
912 * Based on ntfs_write_block(), ntfs_mft_writepage(), and
913 * write_mft_record_nolock().
915 static int ntfs_write_mst_block(struct page *page,
916 struct writeback_control *wbc)
918 sector_t block, dblock, rec_block;
919 struct inode *vi = page->mapping->host;
920 ntfs_inode *ni = NTFS_I(vi);
921 ntfs_volume *vol = ni->vol;
923 unsigned int rec_size = ni->itype.index.block_size;
924 ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
925 struct buffer_head *bh, *head, *tbh, *rec_start_bh;
926 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
928 int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
929 unsigned bh_size, rec_size_bits;
930 bool sync, is_mft, page_is_dirty, rec_is_dirty;
931 unsigned char bh_size_bits;
933 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
934 "0x%lx.", vi->i_ino, ni->type, page->index);
935 BUG_ON(!NInoNonResident(ni));
936 BUG_ON(!NInoMstProtected(ni));
937 is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
939 * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
940 * in its page cache were to be marked dirty. However this should
941 * never happen with the current driver and considering we do not
942 * handle this case here we do want to BUG(), at least for now.
944 BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
945 (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
946 bh_size = vol->sb->s_blocksize;
947 bh_size_bits = vol->sb->s_blocksize_bits;
948 max_bhs = PAGE_CACHE_SIZE / bh_size;
950 BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
952 /* Were we called for sync purposes? */
953 sync = (wbc->sync_mode == WB_SYNC_ALL);
955 /* Make sure we have mapped buffers. */
956 bh = head = page_buffers(page);
959 rec_size_bits = ni->itype.index.block_size_bits;
960 BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
961 bhs_per_rec = rec_size >> bh_size_bits;
962 BUG_ON(!bhs_per_rec);
964 /* The first block in the page. */
965 rec_block = block = (sector_t)page->index <<
966 (PAGE_CACHE_SHIFT - bh_size_bits);
968 /* The first out of bounds block for the data size. */
969 dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
972 err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
973 page_is_dirty = rec_is_dirty = false;
976 bool is_retry = false;
978 if (likely(block < rec_block)) {
979 if (unlikely(block >= dblock)) {
980 clear_buffer_dirty(bh);
981 set_buffer_uptodate(bh);
985 * This block is not the first one in the record. We
986 * ignore the buffer's dirty state because we could
987 * have raced with a parallel mark_ntfs_record_dirty().
991 if (unlikely(err2)) {
993 clear_buffer_dirty(bh);
996 } else /* if (block == rec_block) */ {
997 BUG_ON(block > rec_block);
998 /* This block is the first one in the record. */
999 rec_block += bhs_per_rec;
1001 if (unlikely(block >= dblock)) {
1002 clear_buffer_dirty(bh);
1005 if (!buffer_dirty(bh)) {
1006 /* Clean records are not written out. */
1007 rec_is_dirty = false;
1010 rec_is_dirty = true;
1013 /* Need to map the buffer if it is not mapped already. */
1014 if (unlikely(!buffer_mapped(bh))) {
1017 unsigned int vcn_ofs;
1019 bh->b_bdev = vol->sb->s_bdev;
1020 /* Obtain the vcn and offset of the current block. */
1021 vcn = (VCN)block << bh_size_bits;
1022 vcn_ofs = vcn & vol->cluster_size_mask;
1023 vcn >>= vol->cluster_size_bits;
1026 down_read(&ni->runlist.lock);
1027 rl = ni->runlist.rl;
1029 if (likely(rl != NULL)) {
1030 /* Seek to element containing target vcn. */
1031 while (rl->length && rl[1].vcn <= vcn)
1033 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1035 lcn = LCN_RL_NOT_MAPPED;
1036 /* Successful remap. */
1037 if (likely(lcn >= 0)) {
1038 /* Setup buffer head to correct block. */
1039 bh->b_blocknr = ((lcn <<
1040 vol->cluster_size_bits) +
1041 vcn_ofs) >> bh_size_bits;
1042 set_buffer_mapped(bh);
1045 * Remap failed. Retry to map the runlist once
1046 * unless we are working on $MFT which always
1047 * has the whole of its runlist in memory.
1049 if (!is_mft && !is_retry &&
1050 lcn == LCN_RL_NOT_MAPPED) {
1053 * Attempt to map runlist, dropping
1054 * lock for the duration.
1056 up_read(&ni->runlist.lock);
1057 err2 = ntfs_map_runlist(ni, vcn);
1059 goto lock_retry_remap;
1060 if (err2 == -ENOMEM)
1061 page_is_dirty = true;
1066 up_read(&ni->runlist.lock);
1068 /* Hard error. Abort writing this record. */
1069 if (!err || err == -ENOMEM)
1072 ntfs_error(vol->sb, "Cannot write ntfs record "
1073 "0x%llx (inode 0x%lx, "
1074 "attribute type 0x%x) because "
1075 "its location on disk could "
1076 "not be determined (error "
1080 vol->mft_record_size_bits,
1081 ni->mft_no, ni->type,
1084 * If this is not the first buffer, remove the
1085 * buffers in this record from the list of
1086 * buffers to write and clear their dirty bit
1087 * if not error -ENOMEM.
1089 if (rec_start_bh != bh) {
1090 while (bhs[--nr_bhs] != rec_start_bh)
1092 if (err2 != -ENOMEM) {
1096 } while ((rec_start_bh =
1105 BUG_ON(!buffer_uptodate(bh));
1106 BUG_ON(nr_bhs >= max_bhs);
1108 } while (block++, (bh = bh->b_this_page) != head);
1110 up_read(&ni->runlist.lock);
1111 /* If there were no dirty buffers, we are done. */
1114 /* Map the page so we can access its contents. */
1116 /* Clear the page uptodate flag whilst the mst fixups are applied. */
1117 BUG_ON(!PageUptodate(page));
1118 ClearPageUptodate(page);
1119 for (i = 0; i < nr_bhs; i++) {
1122 /* Skip buffers which are not at the beginning of records. */
1123 if (i % bhs_per_rec)
1126 ofs = bh_offset(tbh);
1129 unsigned long mft_no;
1131 /* Get the mft record number. */
1132 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1134 /* Check whether to write this mft record. */
1136 if (!ntfs_may_write_mft_record(vol, mft_no,
1137 (MFT_RECORD*)(kaddr + ofs), &tni)) {
1139 * The record should not be written. This
1140 * means we need to redirty the page before
1143 page_is_dirty = true;
1145 * Remove the buffers in this mft record from
1146 * the list of buffers to write.
1150 } while (++i % bhs_per_rec);
1154 * The record should be written. If a locked ntfs
1155 * inode was returned, add it to the array of locked
1159 locked_nis[nr_locked_nis++] = tni;
1161 /* Apply the mst protection fixups. */
1162 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
1164 if (unlikely(err2)) {
1165 if (!err || err == -ENOMEM)
1167 ntfs_error(vol->sb, "Failed to apply mst fixups "
1168 "(inode 0x%lx, attribute type 0x%x, "
1169 "page index 0x%lx, page offset 0x%x)!"
1170 " Unmount and run chkdsk.", vi->i_ino,
1171 ni->type, page->index, ofs);
1173 * Mark all the buffers in this record clean as we do
1174 * not want to write corrupt data to disk.
1177 clear_buffer_dirty(bhs[i]);
1179 } while (++i % bhs_per_rec);
1184 /* If no records are to be written out, we are done. */
1187 flush_dcache_page(page);
1188 /* Lock buffers and start synchronous write i/o on them. */
1189 for (i = 0; i < nr_bhs; i++) {
1193 if (!trylock_buffer(tbh))
1195 /* The buffer dirty state is now irrelevant, just clean it. */
1196 clear_buffer_dirty(tbh);
1197 BUG_ON(!buffer_uptodate(tbh));
1198 BUG_ON(!buffer_mapped(tbh));
1200 tbh->b_end_io = end_buffer_write_sync;
1201 submit_bh(WRITE, tbh);
1203 /* Synchronize the mft mirror now if not @sync. */
1204 if (is_mft && !sync)
1207 /* Wait on i/o completion of buffers. */
1208 for (i = 0; i < nr_bhs; i++) {
1212 wait_on_buffer(tbh);
1213 if (unlikely(!buffer_uptodate(tbh))) {
1214 ntfs_error(vol->sb, "I/O error while writing ntfs "
1215 "record buffer (inode 0x%lx, "
1216 "attribute type 0x%x, page index "
1217 "0x%lx, page offset 0x%lx)! Unmount "
1218 "and run chkdsk.", vi->i_ino, ni->type,
1219 page->index, bh_offset(tbh));
1220 if (!err || err == -ENOMEM)
1223 * Set the buffer uptodate so the page and buffer
1224 * states do not become out of sync.
1226 set_buffer_uptodate(tbh);
1229 /* If @sync, now synchronize the mft mirror. */
1230 if (is_mft && sync) {
1232 for (i = 0; i < nr_bhs; i++) {
1233 unsigned long mft_no;
1237 * Skip buffers which are not at the beginning of
1240 if (i % bhs_per_rec)
1243 /* Skip removed buffers (and hence records). */
1246 ofs = bh_offset(tbh);
1247 /* Get the mft record number. */
1248 mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1250 if (mft_no < vol->mftmirr_size)
1251 ntfs_sync_mft_mirror(vol, mft_no,
1252 (MFT_RECORD*)(kaddr + ofs),
1258 /* Remove the mst protection fixups again. */
1259 for (i = 0; i < nr_bhs; i++) {
1260 if (!(i % bhs_per_rec)) {
1264 post_write_mst_fixup((NTFS_RECORD*)(kaddr +
1268 flush_dcache_page(page);
1270 /* Unlock any locked inodes. */
1271 while (nr_locked_nis-- > 0) {
1272 ntfs_inode *tni, *base_tni;
1274 tni = locked_nis[nr_locked_nis];
1275 /* Get the base inode. */
1276 mutex_lock(&tni->extent_lock);
1277 if (tni->nr_extents >= 0)
1280 base_tni = tni->ext.base_ntfs_ino;
1283 mutex_unlock(&tni->extent_lock);
1284 ntfs_debug("Unlocking %s inode 0x%lx.",
1285 tni == base_tni ? "base" : "extent",
1287 mutex_unlock(&tni->mrec_lock);
1288 atomic_dec(&tni->count);
1289 iput(VFS_I(base_tni));
1291 SetPageUptodate(page);
1294 if (unlikely(err && err != -ENOMEM)) {
1296 * Set page error if there is only one ntfs record in the page.
1297 * Otherwise we would loose per-record granularity.
1299 if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
1303 if (page_is_dirty) {
1304 ntfs_debug("Page still contains one or more dirty ntfs "
1305 "records. Redirtying the page starting at "
1306 "record 0x%lx.", page->index <<
1307 (PAGE_CACHE_SHIFT - rec_size_bits));
1308 redirty_page_for_writepage(wbc, page);
1312 * Keep the VM happy. This must be done otherwise the
1313 * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
1314 * the page is clean.
1316 BUG_ON(PageWriteback(page));
1317 set_page_writeback(page);
1319 end_page_writeback(page);
1322 ntfs_debug("Done.");
1327 * ntfs_writepage - write a @page to the backing store
1328 * @page: page cache page to write out
1329 * @wbc: writeback control structure
1331 * This is called from the VM when it wants to have a dirty ntfs page cache
1332 * page cleaned. The VM has already locked the page and marked it clean.
1334 * For non-resident attributes, ntfs_writepage() writes the @page by calling
1335 * the ntfs version of the generic block_write_full_page() function,
1336 * ntfs_write_block(), which in turn if necessary creates and writes the
1337 * buffers associated with the page asynchronously.
1339 * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
1340 * the data to the mft record (which at this stage is most likely in memory).
1341 * The mft record is then marked dirty and written out asynchronously via the
1342 * vfs inode dirty code path for the inode the mft record belongs to or via the
1343 * vm page dirty code path for the page the mft record is in.
1345 * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
1347 * Return 0 on success and -errno on error.
1349 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1352 struct inode *vi = page->mapping->host;
1353 ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1355 ntfs_attr_search_ctx *ctx = NULL;
1356 MFT_RECORD *m = NULL;
1361 BUG_ON(!PageLocked(page));
1362 i_size = i_size_read(vi);
1363 /* Is the page fully outside i_size? (truncate in progress) */
1364 if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
1365 PAGE_CACHE_SHIFT)) {
1367 * The page may have dirty, unmapped buffers. Make them
1368 * freeable here, so the page does not leak.
1370 block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
1372 ntfs_debug("Write outside i_size - truncated?");
1376 * Only $DATA attributes can be encrypted and only unnamed $DATA
1377 * attributes can be compressed. Index root can have the flags set but
1378 * this means to create compressed/encrypted files, not that the
1379 * attribute is compressed/encrypted. Note we need to check for
1380 * AT_INDEX_ALLOCATION since this is the type of both directory and
1383 if (ni->type != AT_INDEX_ALLOCATION) {
1384 /* If file is encrypted, deny access, just like NT4. */
1385 if (NInoEncrypted(ni)) {
1387 BUG_ON(ni->type != AT_DATA);
1388 ntfs_debug("Denying write access to encrypted file.");
1391 /* Compressed data streams are handled in compress.c. */
1392 if (NInoNonResident(ni) && NInoCompressed(ni)) {
1393 BUG_ON(ni->type != AT_DATA);
1394 BUG_ON(ni->name_len);
1395 // TODO: Implement and replace this with
1396 // return ntfs_write_compressed_block(page);
1398 ntfs_error(vi->i_sb, "Writing to compressed files is "
1399 "not supported yet. Sorry.");
1402 // TODO: Implement and remove this check.
1403 if (NInoNonResident(ni) && NInoSparse(ni)) {
1405 ntfs_error(vi->i_sb, "Writing to sparse files is not "
1406 "supported yet. Sorry.");
1410 /* NInoNonResident() == NInoIndexAllocPresent() */
1411 if (NInoNonResident(ni)) {
1412 /* We have to zero every time due to mmap-at-end-of-file. */
1413 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1414 /* The page straddles i_size. */
1415 unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1416 zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
1418 /* Handle mst protected attributes. */
1419 if (NInoMstProtected(ni))
1420 return ntfs_write_mst_block(page, wbc);
1421 /* Normal, non-resident data stream. */
1422 return ntfs_write_block(page, wbc);
1425 * Attribute is resident, implying it is not compressed, encrypted, or
1426 * mst protected. This also means the attribute is smaller than an mft
1427 * record and hence smaller than a page, so can simply return error on
1428 * any pages with index above 0. Note the attribute can actually be
1429 * marked compressed but if it is resident the actual data is not
1430 * compressed so we are ok to ignore the compressed flag here.
1432 BUG_ON(page_has_buffers(page));
1433 BUG_ON(!PageUptodate(page));
1434 if (unlikely(page->index > 0)) {
1435 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. "
1436 "Aborting write.", page->index);
1437 BUG_ON(PageWriteback(page));
1438 set_page_writeback(page);
1440 end_page_writeback(page);
1446 base_ni = ni->ext.base_ntfs_ino;
1447 /* Map, pin, and lock the mft record. */
1448 m = map_mft_record(base_ni);
1456 * If a parallel write made the attribute non-resident, drop the mft
1457 * record and retry the writepage.
1459 if (unlikely(NInoNonResident(ni))) {
1460 unmap_mft_record(base_ni);
1461 goto retry_writepage;
1463 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1464 if (unlikely(!ctx)) {
1468 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1469 CASE_SENSITIVE, 0, NULL, 0, ctx);
1473 * Keep the VM happy. This must be done otherwise the radix-tree tag
1474 * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
1476 BUG_ON(PageWriteback(page));
1477 set_page_writeback(page);
1479 attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1480 i_size = i_size_read(vi);
1481 if (unlikely(attr_len > i_size)) {
1482 /* Race with shrinking truncate or a failed truncate. */
1485 * If the truncate failed, fix it up now. If a concurrent
1486 * truncate, we do its job, so it does not have to do anything.
1488 err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
1490 /* Shrinking cannot fail. */
1493 addr = kmap_atomic(page);
1494 /* Copy the data from the page to the mft record. */
1495 memcpy((u8*)ctx->attr +
1496 le16_to_cpu(ctx->attr->data.resident.value_offset),
1498 /* Zero out of bounds area in the page cache page. */
1499 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1500 kunmap_atomic(addr);
1501 flush_dcache_page(page);
1502 flush_dcache_mft_record_page(ctx->ntfs_ino);
1503 /* We are done with the page. */
1504 end_page_writeback(page);
1505 /* Finally, mark the mft record dirty, so it gets written back. */
1506 mark_mft_record_dirty(ctx->ntfs_ino);
1507 ntfs_attr_put_search_ctx(ctx);
1508 unmap_mft_record(base_ni);
1511 if (err == -ENOMEM) {
1512 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
1513 "page so we try again later.");
1515 * Put the page back on mapping->dirty_pages, but leave its
1516 * buffers' dirty state as-is.
1518 redirty_page_for_writepage(wbc, page);
1521 ntfs_error(vi->i_sb, "Resident attribute write failed with "
1524 NVolSetErrors(ni->vol);
1528 ntfs_attr_put_search_ctx(ctx);
1530 unmap_mft_record(base_ni);
1534 #endif /* NTFS_RW */
1537 * ntfs_bmap - map logical file block to physical device block
1538 * @mapping: address space mapping to which the block to be mapped belongs
1539 * @block: logical block to map to its physical device block
1541 * For regular, non-resident files (i.e. not compressed and not encrypted), map
1542 * the logical @block belonging to the file described by the address space
1543 * mapping @mapping to its physical device block.
1545 * The size of the block is equal to the @s_blocksize field of the super block
1546 * of the mounted file system which is guaranteed to be smaller than or equal
1547 * to the cluster size thus the block is guaranteed to fit entirely inside the
1548 * cluster which means we do not need to care how many contiguous bytes are
1549 * available after the beginning of the block.
1551 * Return the physical device block if the mapping succeeded or 0 if the block
1552 * is sparse or there was an error.
1554 * Note: This is a problem if someone tries to run bmap() on $Boot system file
1555 * as that really is in block zero but there is nothing we can do. bmap() is
1556 * just broken in that respect (just like it cannot distinguish sparse from
1557 * not available or error).
1559 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
1564 unsigned long blocksize, flags;
1565 ntfs_inode *ni = NTFS_I(mapping->host);
1566 ntfs_volume *vol = ni->vol;
1568 unsigned char blocksize_bits, cluster_size_shift;
1570 ntfs_debug("Entering for mft_no 0x%lx, logical block 0x%llx.",
1571 ni->mft_no, (unsigned long long)block);
1572 if (ni->type != AT_DATA || !NInoNonResident(ni) || NInoEncrypted(ni)) {
1573 ntfs_error(vol->sb, "BMAP does not make sense for %s "
1574 "attributes, returning 0.",
1575 (ni->type != AT_DATA) ? "non-data" :
1576 (!NInoNonResident(ni) ? "resident" :
1580 /* None of these can happen. */
1581 BUG_ON(NInoCompressed(ni));
1582 BUG_ON(NInoMstProtected(ni));
1583 blocksize = vol->sb->s_blocksize;
1584 blocksize_bits = vol->sb->s_blocksize_bits;
1585 ofs = (s64)block << blocksize_bits;
1586 read_lock_irqsave(&ni->size_lock, flags);
1587 size = ni->initialized_size;
1588 i_size = i_size_read(VFS_I(ni));
1589 read_unlock_irqrestore(&ni->size_lock, flags);
1591 * If the offset is outside the initialized size or the block straddles
1592 * the initialized size then pretend it is a hole unless the
1593 * initialized size equals the file size.
1595 if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size)))
1597 cluster_size_shift = vol->cluster_size_bits;
1598 down_read(&ni->runlist.lock);
1599 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false);
1600 up_read(&ni->runlist.lock);
1601 if (unlikely(lcn < LCN_HOLE)) {
1603 * Step down to an integer to avoid gcc doing a long long
1604 * comparision in the switch when we know @lcn is between
1605 * LCN_HOLE and LCN_EIO (i.e. -1 to -5).
1607 * Otherwise older gcc (at least on some architectures) will
1608 * try to use __cmpdi2() which is of course not available in
1614 * If the offset is out of bounds then pretend it is a
1619 ntfs_error(vol->sb, "Not enough memory to complete "
1620 "mapping for inode 0x%lx. "
1621 "Returning 0.", ni->mft_no);
1624 ntfs_error(vol->sb, "Failed to complete mapping for "
1625 "inode 0x%lx. Run chkdsk. "
1626 "Returning 0.", ni->mft_no);
1634 ntfs_debug("Done (returning hole).");
1638 * The block is really allocated and fullfils all our criteria.
1639 * Convert the cluster to units of block size and return the result.
1641 delta = ofs & vol->cluster_size_mask;
1642 if (unlikely(sizeof(block) < sizeof(lcn))) {
1643 block = lcn = ((lcn << cluster_size_shift) + delta) >>
1645 /* If the block number was truncated return 0. */
1646 if (unlikely(block != lcn)) {
1647 ntfs_error(vol->sb, "Physical block 0x%llx is too "
1648 "large to be returned, returning 0.",
1653 block = ((lcn << cluster_size_shift) + delta) >>
1655 ntfs_debug("Done (returning block 0x%llx).", (unsigned long long)lcn);
1660 * ntfs_normal_aops - address space operations for normal inodes and attributes
1662 * Note these are not used for compressed or mst protected inodes and
1665 const struct address_space_operations ntfs_normal_aops = {
1666 .readpage = ntfs_readpage,
1668 .writepage = ntfs_writepage,
1669 .set_page_dirty = __set_page_dirty_buffers,
1670 #endif /* NTFS_RW */
1672 .migratepage = buffer_migrate_page,
1673 .is_partially_uptodate = block_is_partially_uptodate,
1674 .error_remove_page = generic_error_remove_page,
1678 * ntfs_compressed_aops - address space operations for compressed inodes
1680 const struct address_space_operations ntfs_compressed_aops = {
1681 .readpage = ntfs_readpage,
1683 .writepage = ntfs_writepage,
1684 .set_page_dirty = __set_page_dirty_buffers,
1685 #endif /* NTFS_RW */
1686 .migratepage = buffer_migrate_page,
1687 .is_partially_uptodate = block_is_partially_uptodate,
1688 .error_remove_page = generic_error_remove_page,
1692 * ntfs_mst_aops - general address space operations for mst protecteed inodes
1695 const struct address_space_operations ntfs_mst_aops = {
1696 .readpage = ntfs_readpage, /* Fill page with data. */
1698 .writepage = ntfs_writepage, /* Write dirty page to disk. */
1699 .set_page_dirty = __set_page_dirty_nobuffers, /* Set the page dirty
1700 without touching the buffers
1701 belonging to the page. */
1702 #endif /* NTFS_RW */
1703 .migratepage = buffer_migrate_page,
1704 .is_partially_uptodate = block_is_partially_uptodate,
1705 .error_remove_page = generic_error_remove_page,
1711 * mark_ntfs_record_dirty - mark an ntfs record dirty
1712 * @page: page containing the ntfs record to mark dirty
1713 * @ofs: byte offset within @page at which the ntfs record begins
1715 * Set the buffers and the page in which the ntfs record is located dirty.
1717 * The latter also marks the vfs inode the ntfs record belongs to dirty
1718 * (I_DIRTY_PAGES only).
1720 * If the page does not have buffers, we create them and set them uptodate.
1721 * The page may not be locked which is why we need to handle the buffers under
1722 * the mapping->private_lock. Once the buffers are marked dirty we no longer
1723 * need the lock since try_to_free_buffers() does not free dirty buffers.
1725 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
1726 struct address_space *mapping = page->mapping;
1727 ntfs_inode *ni = NTFS_I(mapping->host);
1728 struct buffer_head *bh, *head, *buffers_to_free = NULL;
1729 unsigned int end, bh_size, bh_ofs;
1731 BUG_ON(!PageUptodate(page));
1732 end = ofs + ni->itype.index.block_size;
1733 bh_size = VFS_I(ni)->i_sb->s_blocksize;
1734 spin_lock(&mapping->private_lock);
1735 if (unlikely(!page_has_buffers(page))) {
1736 spin_unlock(&mapping->private_lock);
1737 bh = head = alloc_page_buffers(page, bh_size, 1);
1738 spin_lock(&mapping->private_lock);
1739 if (likely(!page_has_buffers(page))) {
1740 struct buffer_head *tail;
1743 set_buffer_uptodate(bh);
1745 bh = bh->b_this_page;
1747 tail->b_this_page = head;
1748 attach_page_buffers(page, head);
1750 buffers_to_free = bh;
1752 bh = head = page_buffers(page);
1755 bh_ofs = bh_offset(bh);
1756 if (bh_ofs + bh_size <= ofs)
1758 if (unlikely(bh_ofs >= end))
1760 set_buffer_dirty(bh);
1761 } while ((bh = bh->b_this_page) != head);
1762 spin_unlock(&mapping->private_lock);
1763 __set_page_dirty_nobuffers(page);
1764 if (unlikely(buffers_to_free)) {
1766 bh = buffers_to_free->b_this_page;
1767 free_buffer_head(buffers_to_free);
1768 buffers_to_free = bh;
1769 } while (buffers_to_free);
1773 #endif /* NTFS_RW */