These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / fs / mpage.c
index 3e79220..1480d3a 100644 (file)
  * status of that page is hard.  See end_buffer_async_read() for the details.
  * There is no point in duplicating all that complexity.
  */
-static void mpage_end_io(struct bio *bio, int err)
+static void mpage_end_io(struct bio *bio)
 {
        struct bio_vec *bv;
        int i;
 
        bio_for_each_segment_all(bv, bio, i) {
                struct page *page = bv->bv_page;
-               page_endio(page, bio_data_dir(bio), err);
+               page_endio(page, bio_data_dir(bio), bio->bi_error);
        }
 
        bio_put(bio);
@@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
 static struct bio *
 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
                sector_t *last_block_in_bio, struct buffer_head *map_bh,
-               unsigned long *first_logical_block, get_block_t get_block)
+               unsigned long *first_logical_block, get_block_t get_block,
+               gfp_t gfp)
 {
        struct inode *inode = page->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
@@ -277,8 +278,7 @@ alloc_new:
                                goto out;
                }
                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-                               min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
-                               GFP_KERNEL);
+                               min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
                if (bio == NULL)
                        goto confused;
        }
@@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
+       gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
@@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
                prefetchw(&page->flags);
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping,
-                                       page->index, GFP_KERNEL)) {
+                                       page->index,
+                                       gfp)) {
                        bio = do_mpage_readpage(bio, page,
                                        nr_pages - page_idx,
                                        &last_block_in_bio, &map_bh,
                                        &first_logical_block,
-                                       get_block);
+                                       get_block, gfp);
                }
                page_cache_release(page);
        }
@@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block)
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
+       gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
        bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
-                       &map_bh, &first_logical_block, get_block);
+                       &map_bh, &first_logical_block, get_block, gfp);
        if (bio)
                mpage_bio_submit(READ, bio);
        return 0;
@@ -482,6 +485,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
        struct buffer_head map_bh;
        loff_t i_size = i_size_read(inode);
        int ret = 0;
+       int wr = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
 
        if (page_has_buffers(page)) {
                struct buffer_head *head = page_buffers(page);
@@ -590,7 +594,7 @@ page_is_mapped:
         * This page will go to BIO.  Do we need to send this BIO off first?
         */
        if (bio && mpd->last_block_in_bio != blocks[0] - 1)
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
 
 alloc_new:
        if (bio == NULL) {
@@ -602,9 +606,11 @@ alloc_new:
                        }
                }
                bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-                               bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
+                               BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
                if (bio == NULL)
                        goto confused;
+
+               wbc_init_bio(wbc, bio);
        }
 
        /*
@@ -612,9 +618,10 @@ alloc_new:
         * the confused fail path above (OOM) will be very confused when
         * it finds all bh marked clean (i.e. it will not write anything)
         */
+       wbc_account_io(wbc, page, PAGE_SIZE);
        length = first_unmapped << blkbits;
        if (bio_add_page(bio, page, length, 0) < length) {
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
                goto alloc_new;
        }
 
@@ -624,7 +631,7 @@ alloc_new:
        set_page_writeback(page);
        unlock_page(page);
        if (boundary || (first_unmapped != blocks_per_page)) {
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
                if (boundary_block) {
                        write_boundary_block(boundary_bdev,
                                        boundary_block, 1 << blkbits);
@@ -636,7 +643,7 @@ alloc_new:
 
 confused:
        if (bio)
-               bio = mpage_bio_submit(WRITE, bio);
+               bio = mpage_bio_submit(wr, bio);
 
        if (mpd->use_writepage) {
                ret = mapping->a_ops->writepage(page, wbc);
@@ -692,8 +699,11 @@ mpage_writepages(struct address_space *mapping,
                };
 
                ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
-               if (mpd.bio)
-                       mpage_bio_submit(WRITE, mpd.bio);
+               if (mpd.bio) {
+                       int wr = (wbc->sync_mode == WB_SYNC_ALL ?
+                                 WRITE_SYNC : WRITE);
+                       mpage_bio_submit(wr, mpd.bio);
+               }
        }
        blk_finish_plug(&plug);
        return ret;
@@ -710,8 +720,11 @@ int mpage_writepage(struct page *page, get_block_t get_block,
                .use_writepage = 0,
        };
        int ret = __mpage_writepage(page, wbc, &mpd);
-       if (mpd.bio)
-               mpage_bio_submit(WRITE, mpd.bio);
+       if (mpd.bio) {
+               int wr = (wbc->sync_mode == WB_SYNC_ALL ?
+                         WRITE_SYNC : WRITE);
+               mpage_bio_submit(wr, mpd.bio);
+       }
        return ret;
 }
 EXPORT_SYMBOL(mpage_writepage);