Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / md / bcache / movinggc.c
1 /*
2  * Moving/copying garbage collector
3  *
4  * Copyright 2012 Google, Inc.
5  */
6
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "request.h"
11
12 #include <trace/events/bcache.h>
13
14 struct moving_io {
15         struct closure          cl;
16         struct keybuf_key       *w;
17         struct data_insert_op   op;
18         struct bbio             bio;
19 };
20
21 static bool moving_pred(struct keybuf *buf, struct bkey *k)
22 {
23         struct cache_set *c = container_of(buf, struct cache_set,
24                                            moving_gc_keys);
25         unsigned i;
26
27         for (i = 0; i < KEY_PTRS(k); i++)
28                 if (ptr_available(c, k, i) &&
29                     GC_MOVE(PTR_BUCKET(c, k, i)))
30                         return true;
31
32         return false;
33 }
34
35 /* Moving GC - IO loop */
36
37 static void moving_io_destructor(struct closure *cl)
38 {
39         struct moving_io *io = container_of(cl, struct moving_io, cl);
40         kfree(io);
41 }
42
43 static void write_moving_finish(struct closure *cl)
44 {
45         struct moving_io *io = container_of(cl, struct moving_io, cl);
46         struct bio *bio = &io->bio.bio;
47         struct bio_vec *bv;
48         int i;
49
50         bio_for_each_segment_all(bv, bio, i)
51                 __free_page(bv->bv_page);
52
53         if (io->op.replace_collision)
54                 trace_bcache_gc_copy_collision(&io->w->key);
55
56         bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
57
58         up(&io->op.c->moving_in_flight);
59
60         closure_return_with_destructor(cl, moving_io_destructor);
61 }
62
63 static void read_moving_endio(struct bio *bio, int error)
64 {
65         struct bbio *b = container_of(bio, struct bbio, bio);
66         struct moving_io *io = container_of(bio->bi_private,
67                                             struct moving_io, cl);
68
69         if (error)
70                 io->op.error = error;
71         else if (!KEY_DIRTY(&b->key) &&
72                  ptr_stale(io->op.c, &b->key, 0)) {
73                 io->op.error = -EINTR;
74         }
75
76         bch_bbio_endio(io->op.c, bio, error, "reading data to move");
77 }
78
79 static void moving_init(struct moving_io *io)
80 {
81         struct bio *bio = &io->bio.bio;
82
83         bio_init(bio);
84         bio_get(bio);
85         bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
86
87         bio->bi_iter.bi_size    = KEY_SIZE(&io->w->key) << 9;
88         bio->bi_max_vecs        = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
89                                                PAGE_SECTORS);
90         bio->bi_private         = &io->cl;
91         bio->bi_io_vec          = bio->bi_inline_vecs;
92         bch_bio_map(bio, NULL);
93 }
94
95 static void write_moving(struct closure *cl)
96 {
97         struct moving_io *io = container_of(cl, struct moving_io, cl);
98         struct data_insert_op *op = &io->op;
99
100         if (!op->error) {
101                 moving_init(io);
102
103                 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
104                 op->write_prio          = 1;
105                 op->bio                 = &io->bio.bio;
106
107                 op->writeback           = KEY_DIRTY(&io->w->key);
108                 op->csum                = KEY_CSUM(&io->w->key);
109
110                 bkey_copy(&op->replace_key, &io->w->key);
111                 op->replace             = true;
112
113                 closure_call(&op->cl, bch_data_insert, NULL, cl);
114         }
115
116         continue_at(cl, write_moving_finish, op->wq);
117 }
118
119 static void read_moving_submit(struct closure *cl)
120 {
121         struct moving_io *io = container_of(cl, struct moving_io, cl);
122         struct bio *bio = &io->bio.bio;
123
124         bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
125
126         continue_at(cl, write_moving, io->op.wq);
127 }
128
129 static void read_moving(struct cache_set *c)
130 {
131         struct keybuf_key *w;
132         struct moving_io *io;
133         struct bio *bio;
134         struct closure cl;
135
136         closure_init_stack(&cl);
137
138         /* XXX: if we error, background writeback could stall indefinitely */
139
140         while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
141                 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
142                                            &MAX_KEY, moving_pred);
143                 if (!w)
144                         break;
145
146                 if (ptr_stale(c, &w->key, 0)) {
147                         bch_keybuf_del(&c->moving_gc_keys, w);
148                         continue;
149                 }
150
151                 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
152                              * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
153                              GFP_KERNEL);
154                 if (!io)
155                         goto err;
156
157                 w->private      = io;
158                 io->w           = w;
159                 io->op.inode    = KEY_INODE(&w->key);
160                 io->op.c        = c;
161                 io->op.wq       = c->moving_gc_wq;
162
163                 moving_init(io);
164                 bio = &io->bio.bio;
165
166                 bio->bi_rw      = READ;
167                 bio->bi_end_io  = read_moving_endio;
168
169                 if (bio_alloc_pages(bio, GFP_KERNEL))
170                         goto err;
171
172                 trace_bcache_gc_copy(&w->key);
173
174                 down(&c->moving_in_flight);
175                 closure_call(&io->cl, read_moving_submit, NULL, &cl);
176         }
177
178         if (0) {
179 err:            if (!IS_ERR_OR_NULL(w->private))
180                         kfree(w->private);
181
182                 bch_keybuf_del(&c->moving_gc_keys, w);
183         }
184
185         closure_sync(&cl);
186 }
187
188 static bool bucket_cmp(struct bucket *l, struct bucket *r)
189 {
190         return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
191 }
192
193 static unsigned bucket_heap_top(struct cache *ca)
194 {
195         struct bucket *b;
196         return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
197 }
198
199 void bch_moving_gc(struct cache_set *c)
200 {
201         struct cache *ca;
202         struct bucket *b;
203         unsigned i;
204
205         if (!c->copy_gc_enabled)
206                 return;
207
208         mutex_lock(&c->bucket_lock);
209
210         for_each_cache(ca, c, i) {
211                 unsigned sectors_to_move = 0;
212                 unsigned reserve_sectors = ca->sb.bucket_size *
213                         fifo_used(&ca->free[RESERVE_MOVINGGC]);
214
215                 ca->heap.used = 0;
216
217                 for_each_bucket(b, ca) {
218                         if (GC_MARK(b) == GC_MARK_METADATA ||
219                             !GC_SECTORS_USED(b) ||
220                             GC_SECTORS_USED(b) == ca->sb.bucket_size ||
221                             atomic_read(&b->pin))
222                                 continue;
223
224                         if (!heap_full(&ca->heap)) {
225                                 sectors_to_move += GC_SECTORS_USED(b);
226                                 heap_add(&ca->heap, b, bucket_cmp);
227                         } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
228                                 sectors_to_move -= bucket_heap_top(ca);
229                                 sectors_to_move += GC_SECTORS_USED(b);
230
231                                 ca->heap.data[0] = b;
232                                 heap_sift(&ca->heap, 0, bucket_cmp);
233                         }
234                 }
235
236                 while (sectors_to_move > reserve_sectors) {
237                         heap_pop(&ca->heap, b, bucket_cmp);
238                         sectors_to_move -= GC_SECTORS_USED(b);
239                 }
240
241                 while (heap_pop(&ca->heap, b, bucket_cmp))
242                         SET_GC_MOVE(b, 1);
243         }
244
245         mutex_unlock(&c->bucket_lock);
246
247         c->moving_gc_keys.last_scanned = ZERO_KEY;
248
249         read_moving(c);
250 }
251
252 void bch_moving_init_cache_set(struct cache_set *c)
253 {
254         bch_keybuf_init(&c->moving_gc_keys);
255         sema_init(&c->moving_in_flight, 64);
256 }