Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / fs / fuse / file.c
index 5ef05b5..8821c38 100644 (file)
@@ -96,17 +96,17 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
                         * Drop the release request when client does not
                         * implement 'open'
                         */
-                       req->background = 0;
+                       __clear_bit(FR_BACKGROUND, &req->flags);
                        iput(req->misc.release.inode);
                        fuse_put_request(ff->fc, req);
                } else if (sync) {
-                       req->background = 0;
+                       __clear_bit(FR_BACKGROUND, &req->flags);
                        fuse_request_send(ff->fc, req);
                        iput(req->misc.release.inode);
                        fuse_put_request(ff->fc, req);
                } else {
                        req->end = fuse_release_end;
-                       req->background = 1;
+                       __set_bit(FR_BACKGROUND, &req->flags);
                        fuse_request_send_background(ff->fc, req);
                }
                kfree(ff);
@@ -299,8 +299,8 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
 {
        WARN_ON(atomic_read(&ff->count) > 1);
        fuse_prepare_release(ff, flags, FUSE_RELEASE);
-       ff->reserved_req->force = 1;
-       ff->reserved_req->background = 0;
+       __set_bit(FR_FORCE, &ff->reserved_req->flags);
+       __clear_bit(FR_BACKGROUND, &ff->reserved_req->flags);
        fuse_request_send(ff->fc, ff->reserved_req);
        fuse_put_request(ff->fc, ff->reserved_req);
        kfree(ff);
@@ -417,6 +417,15 @@ static int fuse_flush(struct file *file, fl_owner_t id)
        fuse_sync_writes(inode);
        mutex_unlock(&inode->i_mutex);
 
+       if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
+           test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
+               err = -ENOSPC;
+       if (test_bit(AS_EIO, &file->f_mapping->flags) &&
+           test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
+               err = -EIO;
+       if (err)
+               return err;
+
        req = fuse_get_req_nofail_nopages(fc, file);
        memset(&inarg, 0, sizeof(inarg));
        inarg.fh = ff->fh;
@@ -426,7 +435,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(inarg);
        req->in.args[0].value = &inarg;
-       req->force = 1;
+       __set_bit(FR_FORCE, &req->flags);
        fuse_request_send(fc, req);
        err = req->out.h.error;
        fuse_put_request(fc, req);
@@ -462,6 +471,21 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
                goto out;
 
        fuse_sync_writes(inode);
+
+       /*
+        * Due to implementation of fuse writeback
+        * filemap_write_and_wait_range() does not catch errors.
+        * We have to do this directly after fuse_sync_writes()
+        */
+       if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
+           test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
+               err = -ENOSPC;
+       if (test_bit(AS_EIO, &file->f_mapping->flags) &&
+           test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
+               err = -EIO;
+       if (err)
+               goto out;
+
        err = sync_inode_metadata(inode, 1);
        if (err)
                goto out;
@@ -516,18 +540,23 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
        req->out.args[0].size = count;
 }
 
-static void fuse_release_user_pages(struct fuse_req *req, int write)
+static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
 {
        unsigned i;
 
        for (i = 0; i < req->num_pages; i++) {
                struct page *page = req->pages[i];
-               if (write)
+               if (should_dirty)
                        set_page_dirty_lock(page);
                put_page(page);
        }
 }
 
+static void fuse_io_release(struct kref *kref)
+{
+       kfree(container_of(kref, struct fuse_io_priv, refcnt));
+}
+
 static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
 {
        if (io->err)
@@ -585,8 +614,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
                }
 
                io->iocb->ki_complete(io->iocb, res, 0);
-               kfree(io);
        }
+
+       kref_put(&io->refcnt, fuse_io_release);
 }
 
 static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
@@ -613,6 +643,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
                size_t num_bytes, struct fuse_io_priv *io)
 {
        spin_lock(&io->lock);
+       kref_get(&io->refcnt);
        io->size += num_bytes;
        io->reqs++;
        spin_unlock(&io->lock);
@@ -691,7 +722,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
 
 static int fuse_do_readpage(struct file *file, struct page *page)
 {
-       struct fuse_io_priv io = { .async = 0, .file = file };
+       struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
        struct inode *inode = page->mapping->host;
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_req *req;
@@ -984,7 +1015,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
        size_t res;
        unsigned offset;
        unsigned i;
-       struct fuse_io_priv io = { .async = 0, .file = file };
+       struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
 
        for (i = 0; i < req->num_pages; i++)
                fuse_wait_on_page_writeback(inode, req->pages[i]->index);
@@ -1049,6 +1080,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
                tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
                flush_dcache_page(page);
 
+               iov_iter_advance(ii, tmp);
                if (!tmp) {
                        unlock_page(page);
                        page_cache_release(page);
@@ -1061,7 +1093,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
                req->page_descs[req->num_pages].length = tmp;
                req->num_pages++;
 
-               iov_iter_advance(ii, tmp);
                count += tmp;
                pos += tmp;
                offset += tmp;
@@ -1169,7 +1200,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (err <= 0)
                goto out;
 
-       err = file_remove_suid(file);
+       err = file_remove_privs(file);
        if (err)
                goto out;
 
@@ -1300,6 +1331,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                       loff_t *ppos, int flags)
 {
        int write = flags & FUSE_DIO_WRITE;
+       bool should_dirty = !write && iter_is_iovec(iter);
        int cuse = flags & FUSE_DIO_CUSE;
        struct file *file = io->file;
        struct inode *inode = file->f_mapping->host;
@@ -1344,7 +1376,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
                        nres = fuse_send_read(req, io, pos, nbytes, owner);
 
                if (!io->async)
-                       fuse_release_user_pages(req, !write);
+                       fuse_release_user_pages(req, should_dirty);
                if (req->out.h.error) {
                        if (!res)
                                res = req->out.h.error;
@@ -1398,7 +1430,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
 
 static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
+       struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
        return __fuse_direct_read(&io, to, &iocb->ki_pos);
 }
 
@@ -1406,7 +1438,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
-       struct fuse_io_priv io = { .async = 0, .file = file };
+       struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
        ssize_t res;
 
        if (is_bad_inode(inode))
@@ -1445,9 +1477,9 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
 
        list_del(&req->writepages_entry);
        for (i = 0; i < req->num_pages; i++) {
-               dec_bdi_stat(bdi, BDI_WRITEBACK);
+               dec_wb_stat(&bdi->wb, WB_WRITEBACK);
                dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
-               bdi_writeout_inc(bdi);
+               wb_writeout_inc(&bdi->wb);
        }
        wake_up(&fi->page_waitq);
 }
@@ -1611,7 +1643,8 @@ static int fuse_writepage_locked(struct page *page)
        if (!req)
                goto err;
 
-       req->background = 1; /* writeback always goes to bg_queue */
+       /* writeback always goes to bg_queue */
+       __set_bit(FR_BACKGROUND, &req->flags);
        tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
        if (!tmp_page)
                goto err_free;
@@ -1634,7 +1667,7 @@ static int fuse_writepage_locked(struct page *page)
        req->end = fuse_writepage_end;
        req->inode = inode;
 
-       inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
+       inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
        inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
 
        spin_lock(&fc->lock);
@@ -1742,16 +1775,15 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
                }
        }
 
-       if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
-                                       old_req->state == FUSE_REQ_PENDING)) {
+       if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) {
                struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
 
                copy_highpage(old_req->pages[0], page);
                spin_unlock(&fc->lock);
 
-               dec_bdi_stat(bdi, BDI_WRITEBACK);
+               dec_wb_stat(&bdi->wb, WB_WRITEBACK);
                dec_zone_page_state(page, NR_WRITEBACK_TEMP);
-               bdi_writeout_inc(bdi);
+               wb_writeout_inc(&bdi->wb);
                fuse_writepage_free(fc, new_req);
                fuse_request_free(new_req);
                goto out;
@@ -1830,7 +1862,7 @@ static int fuse_writepages_fill(struct page *page,
                req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
                req->misc.write.next = NULL;
                req->in.argpages = 1;
-               req->background = 1;
+               __set_bit(FR_BACKGROUND, &req->flags);
                req->num_pages = 0;
                req->end = fuse_writepage_end;
                req->inode = inode;
@@ -1848,7 +1880,7 @@ static int fuse_writepages_fill(struct page *page,
        req->page_descs[req->num_pages].offset = 0;
        req->page_descs[req->num_pages].length = PAGE_SIZE;
 
-       inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
+       inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
        inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
 
        err = 0;
@@ -1965,6 +1997,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = page->mapping->host;
 
+       /* Haven't copied anything?  Skip zeroing, size extending, dirtying. */
+       if (!copied)
+               goto unlock;
+
        if (!PageUptodate(page)) {
                /* Zero any unwritten bytes at the end of the page */
                size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK;
@@ -1975,6 +2011,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
 
        fuse_write_update_size(inode, pos + copied);
        set_page_dirty(page);
+
+unlock:
        unlock_page(page);
        page_cache_release(page);
 
@@ -2189,7 +2227,7 @@ static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
        int err;
 
        if (fc->no_flock) {
-               err = flock_lock_file_wait(file, fl);
+               err = locks_lock_file_wait(file, fl);
        } else {
                struct fuse_file *ff = file->private_data;
 
@@ -2786,6 +2824,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
        loff_t i_size;
        size_t count = iov_iter_count(iter);
        struct fuse_io_priv *io;
+       bool is_sync = is_sync_kiocb(iocb);
 
        pos = offset;
        inode = file->f_mapping->host;
@@ -2806,6 +2845,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
        if (!io)
                return -ENOMEM;
        spin_lock_init(&io->lock);
+       kref_init(&io->refcnt);
        io->reqs = 1;
        io->bytes = -1;
        io->size = 0;
@@ -2825,12 +2865,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
         * to wait on real async I/O requests, so we must submit this request
         * synchronously.
         */
-       if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
+       if (!is_sync && (offset + count > i_size) &&
            iov_iter_rw(iter) == WRITE)
                io->async = false;
 
-       if (io->async && is_sync_kiocb(iocb))
+       if (io->async && is_sync) {
+               /*
+                * Additional reference to keep io around after
+                * calling fuse_aio_complete()
+                */
+               kref_get(&io->refcnt);
                io->done = &wait;
+       }
 
        if (iov_iter_rw(iter) == WRITE) {
                ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
@@ -2843,14 +2889,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
 
                /* we have a non-extending, async request, so return */
-               if (!is_sync_kiocb(iocb))
+               if (!is_sync)
                        return -EIOCBQUEUED;
 
                wait_for_completion(&wait);
                ret = fuse_get_res_by_io(io);
        }
 
-       kfree(io);
+       kref_put(&io->refcnt, fuse_io_release);
 
        if (iov_iter_rw(iter) == WRITE) {
                if (ret > 0)