These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / md / bcache / request.c
index ab43fad..25fa844 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/hash.h>
 #include <linux/random.h>
+#include <linux/backing-dev.h>
 
 #include <trace/events/bcache.h>
 
@@ -87,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
        if (journal_ref)
                atomic_dec_bug(journal_ref);
 
-       if (!op->insert_data_done)
+       if (!op->insert_data_done) {
                continue_at(cl, bch_data_insert_start, op->wq);
+               return;
+       }
 
        bch_keylist_free(&op->insert_keys);
        closure_return(cl);
@@ -170,22 +173,22 @@ static void bch_data_insert_error(struct closure *cl)
        bch_data_insert_keys(cl);
 }
 
-static void bch_data_insert_endio(struct bio *bio, int error)
+static void bch_data_insert_endio(struct bio *bio)
 {
        struct closure *cl = bio->bi_private;
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 
-       if (error) {
+       if (bio->bi_error) {
                /* TODO: We could try to recover from this. */
                if (op->writeback)
-                       op->error = error;
+                       op->error = bio->bi_error;
                else if (!op->replace)
                        set_closure_fn(cl, bch_data_insert_error, op->wq);
                else
                        set_closure_fn(cl, NULL, NULL);
        }
 
-       bch_bbio_endio(op->c, bio, error, "writing data to cache");
+       bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
 }
 
 static void bch_data_insert_start(struct closure *cl)
@@ -215,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
                /* 1 for the device pointer and 1 for the chksum */
                if (bch_keylist_realloc(&op->insert_keys,
                                        3 + (op->csum ? 1 : 0),
-                                       op->c))
+                                       op->c)) {
                        continue_at(cl, bch_data_insert_keys, op->wq);
+                       return;
+               }
 
                k = op->insert_keys.top;
                bkey_init(k);
@@ -254,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
 
        op->insert_data_done = true;
        continue_at(cl, bch_data_insert_keys, op->wq);
+       return;
 err:
        /* bch_alloc_sectors() blocks if s->writeback = true */
        BUG_ON(op->writeback);
@@ -471,7 +477,7 @@ struct search {
        struct data_insert_op   iop;
 };
 
-static void bch_cache_read_endio(struct bio *bio, int error)
+static void bch_cache_read_endio(struct bio *bio)
 {
        struct bbio *b = container_of(bio, struct bbio, bio);
        struct closure *cl = bio->bi_private;
@@ -484,15 +490,15 @@ static void bch_cache_read_endio(struct bio *bio, int error)
         * from the backing device.
         */
 
-       if (error)
-               s->iop.error = error;
+       if (bio->bi_error)
+               s->iop.error = bio->bi_error;
        else if (!KEY_DIRTY(&b->key) &&
                 ptr_stale(s->iop.c, &b->key, 0)) {
                atomic_long_inc(&s->iop.c->cache_read_races);
                s->iop.error = -EINTR;
        }
 
-       bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
+       bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
 }
 
 /*
@@ -575,21 +581,23 @@ static void cache_lookup(struct closure *cl)
        ret = bch_btree_map_keys(&s->op, s->iop.c,
                                 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
                                 cache_lookup_fn, MAP_END_KEY);
-       if (ret == -EAGAIN)
+       if (ret == -EAGAIN) {
                continue_at(cl, cache_lookup, bcache_wq);
+               return;
+       }
 
        closure_return(cl);
 }
 
 /* Common code for the make_request functions */
 
-static void request_endio(struct bio *bio, int error)
+static void request_endio(struct bio *bio)
 {
        struct closure *cl = bio->bi_private;
 
-       if (error) {
+       if (bio->bi_error) {
                struct search *s = container_of(cl, struct search, cl);
-               s->iop.error = error;
+               s->iop.error = bio->bi_error;
                /* Only cache read errors are recoverable */
                s->recoverable = false;
        }
@@ -605,7 +613,8 @@ static void bio_complete(struct search *s)
                                    &s->d->disk->part0, s->start_time);
 
                trace_bcache_request_end(s->d, s->orig_bio);
-               bio_endio(s->orig_bio, s->iop.error);
+               s->orig_bio->bi_error = s->iop.error;
+               bio_endio(s->orig_bio);
                s->orig_bio = NULL;
        }
 }
@@ -619,7 +628,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
        bio->bi_end_io          = request_endio;
        bio->bi_private         = &s->cl;
 
-       atomic_set(&bio->bi_cnt, 3);
+       bio_cnt_set(bio, 3);
 }
 
 static void search_free(struct closure *cl)
@@ -710,7 +719,7 @@ static void cached_dev_read_error(struct closure *cl)
 
                /* XXX: invalidate cache */
 
-               closure_bio_submit(bio, cl, s->d);
+               closure_bio_submit(bio, cl);
        }
 
        continue_at(cl, cached_dev_cache_miss_done, NULL);
@@ -833,7 +842,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
        s->cache_miss   = miss;
        s->iop.bio      = cache_bio;
        bio_get(cache_bio);
-       closure_bio_submit(cache_bio, &s->cl, s->d);
+       closure_bio_submit(cache_bio, &s->cl);
 
        return ret;
 out_put:
@@ -841,7 +850,7 @@ out_put:
 out_submit:
        miss->bi_end_io         = request_endio;
        miss->bi_private        = &s->cl;
-       closure_bio_submit(miss, &s->cl, s->d);
+       closure_bio_submit(miss, &s->cl);
        return ret;
 }
 
@@ -906,7 +915,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
 
                if (!(bio->bi_rw & REQ_DISCARD) ||
                    blk_queue_discard(bdev_get_queue(dc->bdev)))
-                       closure_bio_submit(bio, cl, s->d);
+                       closure_bio_submit(bio, cl);
        } else if (s->iop.writeback) {
                bch_writeback_add(dc);
                s->iop.bio = bio;
@@ -921,12 +930,12 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                        flush->bi_end_io = request_endio;
                        flush->bi_private = cl;
 
-                       closure_bio_submit(flush, cl, s->d);
+                       closure_bio_submit(flush, cl);
                }
        } else {
                s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
 
-               closure_bio_submit(bio, cl, s->d);
+               closure_bio_submit(bio, cl);
        }
 
        closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
@@ -942,14 +951,15 @@ static void cached_dev_nodata(struct closure *cl)
                bch_journal_meta(s->iop.c, cl);
 
        /* If it's a flush, we send the flush to the backing device too */
-       closure_bio_submit(bio, cl, s->d);
+       closure_bio_submit(bio, cl);
 
        continue_at(cl, cached_dev_bio_complete, NULL);
 }
 
 /* Cached devices - read & write stuff */
 
-static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t cached_dev_make_request(struct request_queue *q,
+                                       struct bio *bio)
 {
        struct search *s;
        struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
@@ -984,10 +994,12 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
        } else {
                if ((bio->bi_rw & REQ_DISCARD) &&
                    !blk_queue_discard(bdev_get_queue(dc->bdev)))
-                       bio_endio(bio, 0);
+                       bio_endio(bio);
                else
-                       bch_generic_make_request(bio, &d->bio_split_hook);
+                       generic_make_request(bio);
        }
+
+       return BLK_QC_T_NONE;
 }
 
 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1061,7 +1073,8 @@ static void flash_dev_nodata(struct closure *cl)
        continue_at(cl, search_free, NULL);
 }
 
-static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t flash_dev_make_request(struct request_queue *q,
+                                            struct bio *bio)
 {
        struct search *s;
        struct closure *cl;
@@ -1084,6 +1097,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
                continue_at_nobarrier(&s->cl,
                                      flash_dev_nodata,
                                      bcache_wq);
+               return BLK_QC_T_NONE;
        } else if (rw) {
                bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
                                        &KEY(d->id, bio->bi_iter.bi_sector, 0),
@@ -1099,6 +1113,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
        }
 
        continue_at(cl, search_free, NULL);
+       return BLK_QC_T_NONE;
 }
 
 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,