Kernel bump from 4.1.3-rt to 4.1.7-rt.
[kvmfornfv.git] / kernel / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011-2012 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/jiffies.h>
15 #include <linux/log2.h>
16 #include <linux/list.h>
17 #include <linux/rculist.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/sort.h>
23 #include <linux/rbtree.h>
24
25 #define DM_MSG_PREFIX   "thin"
26
27 /*
28  * Tunable constants
29  */
30 #define ENDIO_HOOK_POOL_SIZE 1024
31 #define MAPPING_POOL_SIZE 1024
32 #define COMMIT_PERIOD HZ
33 #define NO_SPACE_TIMEOUT_SECS 60
34
35 static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
36
37 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
38                 "A percentage of time allocated for copy on write");
39
40 /*
41  * The block size of the device holding pool data must be
42  * between 64KB and 1GB.
43  */
44 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
45 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
46
47 /*
48  * Device id is restricted to 24 bits.
49  */
50 #define MAX_DEV_ID ((1 << 24) - 1)
51
52 /*
53  * How do we handle breaking sharing of data blocks?
54  * =================================================
55  *
56  * We use a standard copy-on-write btree to store the mappings for the
57  * devices (note I'm talking about copy-on-write of the metadata here, not
58  * the data).  When you take an internal snapshot you clone the root node
59  * of the origin btree.  After this there is no concept of an origin or a
60  * snapshot.  They are just two device trees that happen to point to the
61  * same data blocks.
62  *
63  * When we get a write in we decide if it's to a shared data block using
64  * some timestamp magic.  If it is, we have to break sharing.
65  *
66  * Let's say we write to a shared block in what was the origin.  The
67  * steps are:
68  *
69  * i) plug io further to this physical block. (see bio_prison code).
70  *
71  * ii) quiesce any read io to that shared data block.  Obviously
72  * including all devices that share this block.  (see dm_deferred_set code)
73  *
74  * iii) copy the data block to a newly allocate block.  This step can be
75  * missed out if the io covers the block. (schedule_copy).
76  *
77  * iv) insert the new mapping into the origin's btree
78  * (process_prepared_mapping).  This act of inserting breaks some
79  * sharing of btree nodes between the two devices.  Breaking sharing only
80  * effects the btree of that specific device.  Btrees for the other
81  * devices that share the block never change.  The btree for the origin
82  * device as it was after the last commit is untouched, ie. we're using
83  * persistent data structures in the functional programming sense.
84  *
85  * v) unplug io to this physical block, including the io that triggered
86  * the breaking of sharing.
87  *
88  * Steps (ii) and (iii) occur in parallel.
89  *
90  * The metadata _doesn't_ need to be committed before the io continues.  We
91  * get away with this because the io is always written to a _new_ block.
92  * If there's a crash, then:
93  *
94  * - The origin mapping will point to the old origin block (the shared
95  * one).  This will contain the data as it was before the io that triggered
96  * the breaking of sharing came in.
97  *
98  * - The snap mapping still points to the old block.  As it would after
99  * the commit.
100  *
101  * The downside of this scheme is the timestamp magic isn't perfect, and
102  * will continue to think that data block in the snapshot device is shared
103  * even after the write to the origin has broken sharing.  I suspect data
104  * blocks will typically be shared by many different devices, so we're
105  * breaking sharing n + 1 times, rather than n, where n is the number of
106  * devices that reference this data block.  At the moment I think the
107  * benefits far, far outweigh the disadvantages.
108  */
109
110 /*----------------------------------------------------------------*/
111
112 /*
113  * Key building.
114  */
115 static void build_data_key(struct dm_thin_device *td,
116                            dm_block_t b, struct dm_cell_key *key)
117 {
118         key->virtual = 0;
119         key->dev = dm_thin_dev_id(td);
120         key->block_begin = b;
121         key->block_end = b + 1ULL;
122 }
123
124 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
125                               struct dm_cell_key *key)
126 {
127         key->virtual = 1;
128         key->dev = dm_thin_dev_id(td);
129         key->block_begin = b;
130         key->block_end = b + 1ULL;
131 }
132
133 /*----------------------------------------------------------------*/
134
135 #define THROTTLE_THRESHOLD (1 * HZ)
136
137 struct throttle {
138         struct rw_semaphore lock;
139         unsigned long threshold;
140         bool throttle_applied;
141 };
142
143 static void throttle_init(struct throttle *t)
144 {
145         init_rwsem(&t->lock);
146         t->throttle_applied = false;
147 }
148
149 static void throttle_work_start(struct throttle *t)
150 {
151         t->threshold = jiffies + THROTTLE_THRESHOLD;
152 }
153
154 static void throttle_work_update(struct throttle *t)
155 {
156         if (!t->throttle_applied && jiffies > t->threshold) {
157                 down_write(&t->lock);
158                 t->throttle_applied = true;
159         }
160 }
161
162 static void throttle_work_complete(struct throttle *t)
163 {
164         if (t->throttle_applied) {
165                 t->throttle_applied = false;
166                 up_write(&t->lock);
167         }
168 }
169
170 static void throttle_lock(struct throttle *t)
171 {
172         down_read(&t->lock);
173 }
174
175 static void throttle_unlock(struct throttle *t)
176 {
177         up_read(&t->lock);
178 }
179
180 /*----------------------------------------------------------------*/
181
182 /*
183  * A pool device ties together a metadata device and a data device.  It
184  * also provides the interface for creating and destroying internal
185  * devices.
186  */
187 struct dm_thin_new_mapping;
188
189 /*
190  * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
191  */
192 enum pool_mode {
193         PM_WRITE,               /* metadata may be changed */
194         PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
195         PM_READ_ONLY,           /* metadata may not be changed */
196         PM_FAIL,                /* all I/O fails */
197 };
198
199 struct pool_features {
200         enum pool_mode mode;
201
202         bool zero_new_blocks:1;
203         bool discard_enabled:1;
204         bool discard_passdown:1;
205         bool error_if_no_space:1;
206 };
207
208 struct thin_c;
209 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
210 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
211 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
212
213 #define CELL_SORT_ARRAY_SIZE 8192
214
215 struct pool {
216         struct list_head list;
217         struct dm_target *ti;   /* Only set if a pool target is bound */
218
219         struct mapped_device *pool_md;
220         struct block_device *md_dev;
221         struct dm_pool_metadata *pmd;
222
223         dm_block_t low_water_blocks;
224         uint32_t sectors_per_block;
225         int sectors_per_block_shift;
226
227         struct pool_features pf;
228         bool low_water_triggered:1;     /* A dm event has been sent */
229         bool suspended:1;
230
231         struct dm_bio_prison *prison;
232         struct dm_kcopyd_client *copier;
233
234         struct workqueue_struct *wq;
235         struct throttle throttle;
236         struct work_struct worker;
237         struct delayed_work waker;
238         struct delayed_work no_space_timeout;
239
240         unsigned long last_commit_jiffies;
241         unsigned ref_count;
242
243         spinlock_t lock;
244         struct bio_list deferred_flush_bios;
245         struct list_head prepared_mappings;
246         struct list_head prepared_discards;
247         struct list_head active_thins;
248
249         struct dm_deferred_set *shared_read_ds;
250         struct dm_deferred_set *all_io_ds;
251
252         struct dm_thin_new_mapping *next_mapping;
253         mempool_t *mapping_pool;
254
255         process_bio_fn process_bio;
256         process_bio_fn process_discard;
257
258         process_cell_fn process_cell;
259         process_cell_fn process_discard_cell;
260
261         process_mapping_fn process_prepared_mapping;
262         process_mapping_fn process_prepared_discard;
263
264         struct dm_bio_prison_cell **cell_sort_array;
265 };
266
267 static enum pool_mode get_pool_mode(struct pool *pool);
268 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
269
270 /*
271  * Target context for a pool.
272  */
273 struct pool_c {
274         struct dm_target *ti;
275         struct pool *pool;
276         struct dm_dev *data_dev;
277         struct dm_dev *metadata_dev;
278         struct dm_target_callbacks callbacks;
279
280         dm_block_t low_water_blocks;
281         struct pool_features requested_pf; /* Features requested during table load */
282         struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
283 };
284
285 /*
286  * Target context for a thin.
287  */
288 struct thin_c {
289         struct list_head list;
290         struct dm_dev *pool_dev;
291         struct dm_dev *origin_dev;
292         sector_t origin_size;
293         dm_thin_id dev_id;
294
295         struct pool *pool;
296         struct dm_thin_device *td;
297         struct mapped_device *thin_md;
298
299         bool requeue_mode:1;
300         spinlock_t lock;
301         struct list_head deferred_cells;
302         struct bio_list deferred_bio_list;
303         struct bio_list retry_on_resume_list;
304         struct rb_root sort_bio_list; /* sorted list of deferred bios */
305
306         /*
307          * Ensures the thin is not destroyed until the worker has finished
308          * iterating the active_thins list.
309          */
310         atomic_t refcount;
311         struct completion can_destroy;
312 };
313
314 /*----------------------------------------------------------------*/
315
316 /*
317  * wake_worker() is used when new work is queued and when pool_resume is
318  * ready to continue deferred IO processing.
319  */
320 static void wake_worker(struct pool *pool)
321 {
322         queue_work(pool->wq, &pool->worker);
323 }
324
325 /*----------------------------------------------------------------*/
326
327 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
328                       struct dm_bio_prison_cell **cell_result)
329 {
330         int r;
331         struct dm_bio_prison_cell *cell_prealloc;
332
333         /*
334          * Allocate a cell from the prison's mempool.
335          * This might block but it can't fail.
336          */
337         cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
338
339         r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
340         if (r)
341                 /*
342                  * We reused an old cell; we can get rid of
343                  * the new one.
344                  */
345                 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
346
347         return r;
348 }
349
350 static void cell_release(struct pool *pool,
351                          struct dm_bio_prison_cell *cell,
352                          struct bio_list *bios)
353 {
354         dm_cell_release(pool->prison, cell, bios);
355         dm_bio_prison_free_cell(pool->prison, cell);
356 }
357
358 static void cell_visit_release(struct pool *pool,
359                                void (*fn)(void *, struct dm_bio_prison_cell *),
360                                void *context,
361                                struct dm_bio_prison_cell *cell)
362 {
363         dm_cell_visit_release(pool->prison, fn, context, cell);
364         dm_bio_prison_free_cell(pool->prison, cell);
365 }
366
367 static void cell_release_no_holder(struct pool *pool,
368                                    struct dm_bio_prison_cell *cell,
369                                    struct bio_list *bios)
370 {
371         dm_cell_release_no_holder(pool->prison, cell, bios);
372         dm_bio_prison_free_cell(pool->prison, cell);
373 }
374
375 static void cell_error_with_code(struct pool *pool,
376                                  struct dm_bio_prison_cell *cell, int error_code)
377 {
378         dm_cell_error(pool->prison, cell, error_code);
379         dm_bio_prison_free_cell(pool->prison, cell);
380 }
381
382 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
383 {
384         cell_error_with_code(pool, cell, -EIO);
385 }
386
387 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
388 {
389         cell_error_with_code(pool, cell, 0);
390 }
391
392 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
393 {
394         cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
395 }
396
397 /*----------------------------------------------------------------*/
398
399 /*
400  * A global list of pools that uses a struct mapped_device as a key.
401  */
402 static struct dm_thin_pool_table {
403         struct mutex mutex;
404         struct list_head pools;
405 } dm_thin_pool_table;
406
407 static void pool_table_init(void)
408 {
409         mutex_init(&dm_thin_pool_table.mutex);
410         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
411 }
412
413 static void __pool_table_insert(struct pool *pool)
414 {
415         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
416         list_add(&pool->list, &dm_thin_pool_table.pools);
417 }
418
419 static void __pool_table_remove(struct pool *pool)
420 {
421         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
422         list_del(&pool->list);
423 }
424
425 static struct pool *__pool_table_lookup(struct mapped_device *md)
426 {
427         struct pool *pool = NULL, *tmp;
428
429         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
430
431         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
432                 if (tmp->pool_md == md) {
433                         pool = tmp;
434                         break;
435                 }
436         }
437
438         return pool;
439 }
440
441 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
442 {
443         struct pool *pool = NULL, *tmp;
444
445         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
446
447         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
448                 if (tmp->md_dev == md_dev) {
449                         pool = tmp;
450                         break;
451                 }
452         }
453
454         return pool;
455 }
456
457 /*----------------------------------------------------------------*/
458
459 struct dm_thin_endio_hook {
460         struct thin_c *tc;
461         struct dm_deferred_entry *shared_read_entry;
462         struct dm_deferred_entry *all_io_entry;
463         struct dm_thin_new_mapping *overwrite_mapping;
464         struct rb_node rb_node;
465 };
466
467 static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
468 {
469         bio_list_merge(bios, master);
470         bio_list_init(master);
471 }
472
473 static void error_bio_list(struct bio_list *bios, int error)
474 {
475         struct bio *bio;
476
477         while ((bio = bio_list_pop(bios)))
478                 bio_endio(bio, error);
479 }
480
481 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
482 {
483         struct bio_list bios;
484         unsigned long flags;
485
486         bio_list_init(&bios);
487
488         spin_lock_irqsave(&tc->lock, flags);
489         __merge_bio_list(&bios, master);
490         spin_unlock_irqrestore(&tc->lock, flags);
491
492         error_bio_list(&bios, error);
493 }
494
495 static void requeue_deferred_cells(struct thin_c *tc)
496 {
497         struct pool *pool = tc->pool;
498         unsigned long flags;
499         struct list_head cells;
500         struct dm_bio_prison_cell *cell, *tmp;
501
502         INIT_LIST_HEAD(&cells);
503
504         spin_lock_irqsave(&tc->lock, flags);
505         list_splice_init(&tc->deferred_cells, &cells);
506         spin_unlock_irqrestore(&tc->lock, flags);
507
508         list_for_each_entry_safe(cell, tmp, &cells, user_list)
509                 cell_requeue(pool, cell);
510 }
511
512 static void requeue_io(struct thin_c *tc)
513 {
514         struct bio_list bios;
515         unsigned long flags;
516
517         bio_list_init(&bios);
518
519         spin_lock_irqsave(&tc->lock, flags);
520         __merge_bio_list(&bios, &tc->deferred_bio_list);
521         __merge_bio_list(&bios, &tc->retry_on_resume_list);
522         spin_unlock_irqrestore(&tc->lock, flags);
523
524         error_bio_list(&bios, DM_ENDIO_REQUEUE);
525         requeue_deferred_cells(tc);
526 }
527
528 static void error_retry_list(struct pool *pool)
529 {
530         struct thin_c *tc;
531
532         rcu_read_lock();
533         list_for_each_entry_rcu(tc, &pool->active_thins, list)
534                 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
535         rcu_read_unlock();
536 }
537
538 /*
539  * This section of code contains the logic for processing a thin device's IO.
540  * Much of the code depends on pool object resources (lists, workqueues, etc)
541  * but most is exclusively called from the thin target rather than the thin-pool
542  * target.
543  */
544
545 static bool block_size_is_power_of_two(struct pool *pool)
546 {
547         return pool->sectors_per_block_shift >= 0;
548 }
549
550 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
551 {
552         struct pool *pool = tc->pool;
553         sector_t block_nr = bio->bi_iter.bi_sector;
554
555         if (block_size_is_power_of_two(pool))
556                 block_nr >>= pool->sectors_per_block_shift;
557         else
558                 (void) sector_div(block_nr, pool->sectors_per_block);
559
560         return block_nr;
561 }
562
563 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
564 {
565         struct pool *pool = tc->pool;
566         sector_t bi_sector = bio->bi_iter.bi_sector;
567
568         bio->bi_bdev = tc->pool_dev->bdev;
569         if (block_size_is_power_of_two(pool))
570                 bio->bi_iter.bi_sector =
571                         (block << pool->sectors_per_block_shift) |
572                         (bi_sector & (pool->sectors_per_block - 1));
573         else
574                 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
575                                  sector_div(bi_sector, pool->sectors_per_block);
576 }
577
578 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
579 {
580         bio->bi_bdev = tc->origin_dev->bdev;
581 }
582
583 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
584 {
585         return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
586                 dm_thin_changed_this_transaction(tc->td);
587 }
588
589 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
590 {
591         struct dm_thin_endio_hook *h;
592
593         if (bio->bi_rw & REQ_DISCARD)
594                 return;
595
596         h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
597         h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
598 }
599
600 static void issue(struct thin_c *tc, struct bio *bio)
601 {
602         struct pool *pool = tc->pool;
603         unsigned long flags;
604
605         if (!bio_triggers_commit(tc, bio)) {
606                 generic_make_request(bio);
607                 return;
608         }
609
610         /*
611          * Complete bio with an error if earlier I/O caused changes to
612          * the metadata that can't be committed e.g, due to I/O errors
613          * on the metadata device.
614          */
615         if (dm_thin_aborted_changes(tc->td)) {
616                 bio_io_error(bio);
617                 return;
618         }
619
620         /*
621          * Batch together any bios that trigger commits and then issue a
622          * single commit for them in process_deferred_bios().
623          */
624         spin_lock_irqsave(&pool->lock, flags);
625         bio_list_add(&pool->deferred_flush_bios, bio);
626         spin_unlock_irqrestore(&pool->lock, flags);
627 }
628
629 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
630 {
631         remap_to_origin(tc, bio);
632         issue(tc, bio);
633 }
634
635 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
636                             dm_block_t block)
637 {
638         remap(tc, bio, block);
639         issue(tc, bio);
640 }
641
642 /*----------------------------------------------------------------*/
643
644 /*
645  * Bio endio functions.
646  */
647 struct dm_thin_new_mapping {
648         struct list_head list;
649
650         bool pass_discard:1;
651         bool definitely_not_shared:1;
652
653         /*
654          * Track quiescing, copying and zeroing preparation actions.  When this
655          * counter hits zero the block is prepared and can be inserted into the
656          * btree.
657          */
658         atomic_t prepare_actions;
659
660         int err;
661         struct thin_c *tc;
662         dm_block_t virt_block;
663         dm_block_t data_block;
664         struct dm_bio_prison_cell *cell, *cell2;
665
666         /*
667          * If the bio covers the whole area of a block then we can avoid
668          * zeroing or copying.  Instead this bio is hooked.  The bio will
669          * still be in the cell, so care has to be taken to avoid issuing
670          * the bio twice.
671          */
672         struct bio *bio;
673         bio_end_io_t *saved_bi_end_io;
674 };
675
676 static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
677 {
678         struct pool *pool = m->tc->pool;
679
680         if (atomic_dec_and_test(&m->prepare_actions)) {
681                 list_add_tail(&m->list, &pool->prepared_mappings);
682                 wake_worker(pool);
683         }
684 }
685
686 static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
687 {
688         unsigned long flags;
689         struct pool *pool = m->tc->pool;
690
691         spin_lock_irqsave(&pool->lock, flags);
692         __complete_mapping_preparation(m);
693         spin_unlock_irqrestore(&pool->lock, flags);
694 }
695
696 static void copy_complete(int read_err, unsigned long write_err, void *context)
697 {
698         struct dm_thin_new_mapping *m = context;
699
700         m->err = read_err || write_err ? -EIO : 0;
701         complete_mapping_preparation(m);
702 }
703
704 static void overwrite_endio(struct bio *bio, int err)
705 {
706         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
707         struct dm_thin_new_mapping *m = h->overwrite_mapping;
708
709         m->err = err;
710         complete_mapping_preparation(m);
711 }
712
713 /*----------------------------------------------------------------*/
714
715 /*
716  * Workqueue.
717  */
718
719 /*
720  * Prepared mapping jobs.
721  */
722
723 /*
724  * This sends the bios in the cell, except the original holder, back
725  * to the deferred_bios list.
726  */
727 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
728 {
729         struct pool *pool = tc->pool;
730         unsigned long flags;
731
732         spin_lock_irqsave(&tc->lock, flags);
733         cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
734         spin_unlock_irqrestore(&tc->lock, flags);
735
736         wake_worker(pool);
737 }
738
739 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
740
741 struct remap_info {
742         struct thin_c *tc;
743         struct bio_list defer_bios;
744         struct bio_list issue_bios;
745 };
746
747 static void __inc_remap_and_issue_cell(void *context,
748                                        struct dm_bio_prison_cell *cell)
749 {
750         struct remap_info *info = context;
751         struct bio *bio;
752
753         while ((bio = bio_list_pop(&cell->bios))) {
754                 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
755                         bio_list_add(&info->defer_bios, bio);
756                 else {
757                         inc_all_io_entry(info->tc->pool, bio);
758
759                         /*
760                          * We can't issue the bios with the bio prison lock
761                          * held, so we add them to a list to issue on
762                          * return from this function.
763                          */
764                         bio_list_add(&info->issue_bios, bio);
765                 }
766         }
767 }
768
769 static void inc_remap_and_issue_cell(struct thin_c *tc,
770                                      struct dm_bio_prison_cell *cell,
771                                      dm_block_t block)
772 {
773         struct bio *bio;
774         struct remap_info info;
775
776         info.tc = tc;
777         bio_list_init(&info.defer_bios);
778         bio_list_init(&info.issue_bios);
779
780         /*
781          * We have to be careful to inc any bios we're about to issue
782          * before the cell is released, and avoid a race with new bios
783          * being added to the cell.
784          */
785         cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
786                            &info, cell);
787
788         while ((bio = bio_list_pop(&info.defer_bios)))
789                 thin_defer_bio(tc, bio);
790
791         while ((bio = bio_list_pop(&info.issue_bios)))
792                 remap_and_issue(info.tc, bio, block);
793 }
794
795 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
796 {
797         if (m->bio) {
798                 m->bio->bi_end_io = m->saved_bi_end_io;
799                 atomic_inc(&m->bio->bi_remaining);
800         }
801         cell_error(m->tc->pool, m->cell);
802         list_del(&m->list);
803         mempool_free(m, m->tc->pool->mapping_pool);
804 }
805
806 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
807 {
808         struct thin_c *tc = m->tc;
809         struct pool *pool = tc->pool;
810         struct bio *bio;
811         int r;
812
813         bio = m->bio;
814         if (bio) {
815                 bio->bi_end_io = m->saved_bi_end_io;
816                 atomic_inc(&bio->bi_remaining);
817         }
818
819         if (m->err) {
820                 cell_error(pool, m->cell);
821                 goto out;
822         }
823
824         /*
825          * Commit the prepared block into the mapping btree.
826          * Any I/O for this block arriving after this point will get
827          * remapped to it directly.
828          */
829         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
830         if (r) {
831                 metadata_operation_failed(pool, "dm_thin_insert_block", r);
832                 cell_error(pool, m->cell);
833                 goto out;
834         }
835
836         /*
837          * Release any bios held while the block was being provisioned.
838          * If we are processing a write bio that completely covers the block,
839          * we already processed it so can ignore it now when processing
840          * the bios in the cell.
841          */
842         if (bio) {
843                 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
844                 bio_endio(bio, 0);
845         } else {
846                 inc_all_io_entry(tc->pool, m->cell->holder);
847                 remap_and_issue(tc, m->cell->holder, m->data_block);
848                 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
849         }
850
851 out:
852         list_del(&m->list);
853         mempool_free(m, pool->mapping_pool);
854 }
855
856 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
857 {
858         struct thin_c *tc = m->tc;
859
860         bio_io_error(m->bio);
861         cell_defer_no_holder(tc, m->cell);
862         cell_defer_no_holder(tc, m->cell2);
863         mempool_free(m, tc->pool->mapping_pool);
864 }
865
866 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
867 {
868         struct thin_c *tc = m->tc;
869
870         inc_all_io_entry(tc->pool, m->bio);
871         cell_defer_no_holder(tc, m->cell);
872         cell_defer_no_holder(tc, m->cell2);
873
874         if (m->pass_discard)
875                 if (m->definitely_not_shared)
876                         remap_and_issue(tc, m->bio, m->data_block);
877                 else {
878                         bool used = false;
879                         if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
880                                 bio_endio(m->bio, 0);
881                         else
882                                 remap_and_issue(tc, m->bio, m->data_block);
883                 }
884         else
885                 bio_endio(m->bio, 0);
886
887         mempool_free(m, tc->pool->mapping_pool);
888 }
889
890 static void process_prepared_discard(struct dm_thin_new_mapping *m)
891 {
892         int r;
893         struct thin_c *tc = m->tc;
894
895         r = dm_thin_remove_block(tc->td, m->virt_block);
896         if (r)
897                 DMERR_LIMIT("dm_thin_remove_block() failed");
898
899         process_prepared_discard_passdown(m);
900 }
901
902 static void process_prepared(struct pool *pool, struct list_head *head,
903                              process_mapping_fn *fn)
904 {
905         unsigned long flags;
906         struct list_head maps;
907         struct dm_thin_new_mapping *m, *tmp;
908
909         INIT_LIST_HEAD(&maps);
910         spin_lock_irqsave(&pool->lock, flags);
911         list_splice_init(head, &maps);
912         spin_unlock_irqrestore(&pool->lock, flags);
913
914         list_for_each_entry_safe(m, tmp, &maps, list)
915                 (*fn)(m);
916 }
917
918 /*
919  * Deferred bio jobs.
920  */
921 static int io_overlaps_block(struct pool *pool, struct bio *bio)
922 {
923         return bio->bi_iter.bi_size ==
924                 (pool->sectors_per_block << SECTOR_SHIFT);
925 }
926
927 static int io_overwrites_block(struct pool *pool, struct bio *bio)
928 {
929         return (bio_data_dir(bio) == WRITE) &&
930                 io_overlaps_block(pool, bio);
931 }
932
933 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
934                                bio_end_io_t *fn)
935 {
936         *save = bio->bi_end_io;
937         bio->bi_end_io = fn;
938 }
939
940 static int ensure_next_mapping(struct pool *pool)
941 {
942         if (pool->next_mapping)
943                 return 0;
944
945         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
946
947         return pool->next_mapping ? 0 : -ENOMEM;
948 }
949
950 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
951 {
952         struct dm_thin_new_mapping *m = pool->next_mapping;
953
954         BUG_ON(!pool->next_mapping);
955
956         memset(m, 0, sizeof(struct dm_thin_new_mapping));
957         INIT_LIST_HEAD(&m->list);
958         m->bio = NULL;
959
960         pool->next_mapping = NULL;
961
962         return m;
963 }
964
965 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
966                     sector_t begin, sector_t end)
967 {
968         int r;
969         struct dm_io_region to;
970
971         to.bdev = tc->pool_dev->bdev;
972         to.sector = begin;
973         to.count = end - begin;
974
975         r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
976         if (r < 0) {
977                 DMERR_LIMIT("dm_kcopyd_zero() failed");
978                 copy_complete(1, 1, m);
979         }
980 }
981
982 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
983                                       dm_block_t data_block,
984                                       struct dm_thin_new_mapping *m)
985 {
986         struct pool *pool = tc->pool;
987         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
988
989         h->overwrite_mapping = m;
990         m->bio = bio;
991         save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
992         inc_all_io_entry(pool, bio);
993         remap_and_issue(tc, bio, data_block);
994 }
995
996 /*
997  * A partial copy also needs to zero the uncopied region.
998  */
999 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1000                           struct dm_dev *origin, dm_block_t data_origin,
1001                           dm_block_t data_dest,
1002                           struct dm_bio_prison_cell *cell, struct bio *bio,
1003                           sector_t len)
1004 {
1005         int r;
1006         struct pool *pool = tc->pool;
1007         struct dm_thin_new_mapping *m = get_next_mapping(pool);
1008
1009         m->tc = tc;
1010         m->virt_block = virt_block;
1011         m->data_block = data_dest;
1012         m->cell = cell;
1013
1014         /*
1015          * quiesce action + copy action + an extra reference held for the
1016          * duration of this function (we may need to inc later for a
1017          * partial zero).
1018          */
1019         atomic_set(&m->prepare_actions, 3);
1020
1021         if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1022                 complete_mapping_preparation(m); /* already quiesced */
1023
1024         /*
1025          * IO to pool_dev remaps to the pool target's data_dev.
1026          *
1027          * If the whole block of data is being overwritten, we can issue the
1028          * bio immediately. Otherwise we use kcopyd to clone the data first.
1029          */
1030         if (io_overwrites_block(pool, bio))
1031                 remap_and_issue_overwrite(tc, bio, data_dest, m);
1032         else {
1033                 struct dm_io_region from, to;
1034
1035                 from.bdev = origin->bdev;
1036                 from.sector = data_origin * pool->sectors_per_block;
1037                 from.count = len;
1038
1039                 to.bdev = tc->pool_dev->bdev;
1040                 to.sector = data_dest * pool->sectors_per_block;
1041                 to.count = len;
1042
1043                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1044                                    0, copy_complete, m);
1045                 if (r < 0) {
1046                         DMERR_LIMIT("dm_kcopyd_copy() failed");
1047                         copy_complete(1, 1, m);
1048
1049                         /*
1050                          * We allow the zero to be issued, to simplify the
1051                          * error path.  Otherwise we'd need to start
1052                          * worrying about decrementing the prepare_actions
1053                          * counter.
1054                          */
1055                 }
1056
1057                 /*
1058                  * Do we need to zero a tail region?
1059                  */
1060                 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1061                         atomic_inc(&m->prepare_actions);
1062                         ll_zero(tc, m,
1063                                 data_dest * pool->sectors_per_block + len,
1064                                 (data_dest + 1) * pool->sectors_per_block);
1065                 }
1066         }
1067
1068         complete_mapping_preparation(m); /* drop our ref */
1069 }
1070
1071 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1072                                    dm_block_t data_origin, dm_block_t data_dest,
1073                                    struct dm_bio_prison_cell *cell, struct bio *bio)
1074 {
1075         schedule_copy(tc, virt_block, tc->pool_dev,
1076                       data_origin, data_dest, cell, bio,
1077                       tc->pool->sectors_per_block);
1078 }
1079
1080 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1081                           dm_block_t data_block, struct dm_bio_prison_cell *cell,
1082                           struct bio *bio)
1083 {
1084         struct pool *pool = tc->pool;
1085         struct dm_thin_new_mapping *m = get_next_mapping(pool);
1086
1087         atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
1088         m->tc = tc;
1089         m->virt_block = virt_block;
1090         m->data_block = data_block;
1091         m->cell = cell;
1092
1093         /*
1094          * If the whole block of data is being overwritten or we are not
1095          * zeroing pre-existing data, we can issue the bio immediately.
1096          * Otherwise we use kcopyd to zero the data first.
1097          */
1098         if (!pool->pf.zero_new_blocks)
1099                 process_prepared_mapping(m);
1100
1101         else if (io_overwrites_block(pool, bio))
1102                 remap_and_issue_overwrite(tc, bio, data_block, m);
1103
1104         else
1105                 ll_zero(tc, m,
1106                         data_block * pool->sectors_per_block,
1107                         (data_block + 1) * pool->sectors_per_block);
1108 }
1109
1110 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1111                                    dm_block_t data_dest,
1112                                    struct dm_bio_prison_cell *cell, struct bio *bio)
1113 {
1114         struct pool *pool = tc->pool;
1115         sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1116         sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1117
1118         if (virt_block_end <= tc->origin_size)
1119                 schedule_copy(tc, virt_block, tc->origin_dev,
1120                               virt_block, data_dest, cell, bio,
1121                               pool->sectors_per_block);
1122
1123         else if (virt_block_begin < tc->origin_size)
1124                 schedule_copy(tc, virt_block, tc->origin_dev,
1125                               virt_block, data_dest, cell, bio,
1126                               tc->origin_size - virt_block_begin);
1127
1128         else
1129                 schedule_zero(tc, virt_block, data_dest, cell, bio);
1130 }
1131
1132 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1133
1134 static void check_for_space(struct pool *pool)
1135 {
1136         int r;
1137         dm_block_t nr_free;
1138
1139         if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
1140                 return;
1141
1142         r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
1143         if (r)
1144                 return;
1145
1146         if (nr_free)
1147                 set_pool_mode(pool, PM_WRITE);
1148 }
1149
1150 /*
1151  * A non-zero return indicates read_only or fail_io mode.
1152  * Many callers don't care about the return value.
1153  */
1154 static int commit(struct pool *pool)
1155 {
1156         int r;
1157
1158         if (get_pool_mode(pool) >= PM_READ_ONLY)
1159                 return -EINVAL;
1160
1161         r = dm_pool_commit_metadata(pool->pmd);
1162         if (r)
1163                 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1164         else
1165                 check_for_space(pool);
1166
1167         return r;
1168 }
1169
1170 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1171 {
1172         unsigned long flags;
1173
1174         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1175                 DMWARN("%s: reached low water mark for data device: sending event.",
1176                        dm_device_name(pool->pool_md));
1177                 spin_lock_irqsave(&pool->lock, flags);
1178                 pool->low_water_triggered = true;
1179                 spin_unlock_irqrestore(&pool->lock, flags);
1180                 dm_table_event(pool->ti->table);
1181         }
1182 }
1183
1184 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1185 {
1186         int r;
1187         dm_block_t free_blocks;
1188         struct pool *pool = tc->pool;
1189
1190         if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
1191                 return -EINVAL;
1192
1193         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1194         if (r) {
1195                 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1196                 return r;
1197         }
1198
1199         check_low_water_mark(pool, free_blocks);
1200
1201         if (!free_blocks) {
1202                 /*
1203                  * Try to commit to see if that will free up some
1204                  * more space.
1205                  */
1206                 r = commit(pool);
1207                 if (r)
1208                         return r;
1209
1210                 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1211                 if (r) {
1212                         metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
1213                         return r;
1214                 }
1215
1216                 if (!free_blocks) {
1217                         set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
1218                         return -ENOSPC;
1219                 }
1220         }
1221
1222         r = dm_pool_alloc_data_block(pool->pmd, result);
1223         if (r) {
1224                 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
1225                 return r;
1226         }
1227
1228         return 0;
1229 }
1230
1231 /*
1232  * If we have run out of space, queue bios until the device is
1233  * resumed, presumably after having been reloaded with more space.
1234  */
1235 static void retry_on_resume(struct bio *bio)
1236 {
1237         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1238         struct thin_c *tc = h->tc;
1239         unsigned long flags;
1240
1241         spin_lock_irqsave(&tc->lock, flags);
1242         bio_list_add(&tc->retry_on_resume_list, bio);
1243         spin_unlock_irqrestore(&tc->lock, flags);
1244 }
1245
1246 static int should_error_unserviceable_bio(struct pool *pool)
1247 {
1248         enum pool_mode m = get_pool_mode(pool);
1249
1250         switch (m) {
1251         case PM_WRITE:
1252                 /* Shouldn't get here */
1253                 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1254                 return -EIO;
1255
1256         case PM_OUT_OF_DATA_SPACE:
1257                 return pool->pf.error_if_no_space ? -ENOSPC : 0;
1258
1259         case PM_READ_ONLY:
1260         case PM_FAIL:
1261                 return -EIO;
1262         default:
1263                 /* Shouldn't get here */
1264                 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1265                 return -EIO;
1266         }
1267 }
1268
1269 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1270 {
1271         int error = should_error_unserviceable_bio(pool);
1272
1273         if (error)
1274                 bio_endio(bio, error);
1275         else
1276                 retry_on_resume(bio);
1277 }
1278
1279 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1280 {
1281         struct bio *bio;
1282         struct bio_list bios;
1283         int error;
1284
1285         error = should_error_unserviceable_bio(pool);
1286         if (error) {
1287                 cell_error_with_code(pool, cell, error);
1288                 return;
1289         }
1290
1291         bio_list_init(&bios);
1292         cell_release(pool, cell, &bios);
1293
1294         while ((bio = bio_list_pop(&bios)))
1295                 retry_on_resume(bio);
1296 }
1297
1298 static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1299 {
1300         int r;
1301         struct bio *bio = cell->holder;
1302         struct pool *pool = tc->pool;
1303         struct dm_bio_prison_cell *cell2;
1304         struct dm_cell_key key2;
1305         dm_block_t block = get_bio_block(tc, bio);
1306         struct dm_thin_lookup_result lookup_result;
1307         struct dm_thin_new_mapping *m;
1308
1309         if (tc->requeue_mode) {
1310                 cell_requeue(pool, cell);
1311                 return;
1312         }
1313
1314         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1315         switch (r) {
1316         case 0:
1317                 /*
1318                  * Check nobody is fiddling with this pool block.  This can
1319                  * happen if someone's in the process of breaking sharing
1320                  * on this block.
1321                  */
1322                 build_data_key(tc->td, lookup_result.block, &key2);
1323                 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1324                         cell_defer_no_holder(tc, cell);
1325                         break;
1326                 }
1327
1328                 if (io_overlaps_block(pool, bio)) {
1329                         /*
1330                          * IO may still be going to the destination block.  We must
1331                          * quiesce before we can do the removal.
1332                          */
1333                         m = get_next_mapping(pool);
1334                         m->tc = tc;
1335                         m->pass_discard = pool->pf.discard_passdown;
1336                         m->definitely_not_shared = !lookup_result.shared;
1337                         m->virt_block = block;
1338                         m->data_block = lookup_result.block;
1339                         m->cell = cell;
1340                         m->cell2 = cell2;
1341                         m->bio = bio;
1342
1343                         if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1344                                 pool->process_prepared_discard(m);
1345
1346                 } else {
1347                         inc_all_io_entry(pool, bio);
1348                         cell_defer_no_holder(tc, cell);
1349                         cell_defer_no_holder(tc, cell2);
1350
1351                         /*
1352                          * The DM core makes sure that the discard doesn't span
1353                          * a block boundary.  So we submit the discard of a
1354                          * partial block appropriately.
1355                          */
1356                         if ((!lookup_result.shared) && pool->pf.discard_passdown)
1357                                 remap_and_issue(tc, bio, lookup_result.block);
1358                         else
1359                                 bio_endio(bio, 0);
1360                 }
1361                 break;
1362
1363         case -ENODATA:
1364                 /*
1365                  * It isn't provisioned, just forget it.
1366                  */
1367                 cell_defer_no_holder(tc, cell);
1368                 bio_endio(bio, 0);
1369                 break;
1370
1371         default:
1372                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1373                             __func__, r);
1374                 cell_defer_no_holder(tc, cell);
1375                 bio_io_error(bio);
1376                 break;
1377         }
1378 }
1379
1380 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1381 {
1382         struct dm_bio_prison_cell *cell;
1383         struct dm_cell_key key;
1384         dm_block_t block = get_bio_block(tc, bio);
1385
1386         build_virtual_key(tc->td, block, &key);
1387         if (bio_detain(tc->pool, &key, bio, &cell))
1388                 return;
1389
1390         process_discard_cell(tc, cell);
1391 }
1392
1393 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1394                           struct dm_cell_key *key,
1395                           struct dm_thin_lookup_result *lookup_result,
1396                           struct dm_bio_prison_cell *cell)
1397 {
1398         int r;
1399         dm_block_t data_block;
1400         struct pool *pool = tc->pool;
1401
1402         r = alloc_data_block(tc, &data_block);
1403         switch (r) {
1404         case 0:
1405                 schedule_internal_copy(tc, block, lookup_result->block,
1406                                        data_block, cell, bio);
1407                 break;
1408
1409         case -ENOSPC:
1410                 retry_bios_on_resume(pool, cell);
1411                 break;
1412
1413         default:
1414                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1415                             __func__, r);
1416                 cell_error(pool, cell);
1417                 break;
1418         }
1419 }
1420
1421 static void __remap_and_issue_shared_cell(void *context,
1422                                           struct dm_bio_prison_cell *cell)
1423 {
1424         struct remap_info *info = context;
1425         struct bio *bio;
1426
1427         while ((bio = bio_list_pop(&cell->bios))) {
1428                 if ((bio_data_dir(bio) == WRITE) ||
1429                     (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1430                         bio_list_add(&info->defer_bios, bio);
1431                 else {
1432                         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1433
1434                         h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1435                         inc_all_io_entry(info->tc->pool, bio);
1436                         bio_list_add(&info->issue_bios, bio);
1437                 }
1438         }
1439 }
1440
1441 static void remap_and_issue_shared_cell(struct thin_c *tc,
1442                                         struct dm_bio_prison_cell *cell,
1443                                         dm_block_t block)
1444 {
1445         struct bio *bio;
1446         struct remap_info info;
1447
1448         info.tc = tc;
1449         bio_list_init(&info.defer_bios);
1450         bio_list_init(&info.issue_bios);
1451
1452         cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1453                            &info, cell);
1454
1455         while ((bio = bio_list_pop(&info.defer_bios)))
1456                 thin_defer_bio(tc, bio);
1457
1458         while ((bio = bio_list_pop(&info.issue_bios)))
1459                 remap_and_issue(tc, bio, block);
1460 }
1461
1462 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1463                                dm_block_t block,
1464                                struct dm_thin_lookup_result *lookup_result,
1465                                struct dm_bio_prison_cell *virt_cell)
1466 {
1467         struct dm_bio_prison_cell *data_cell;
1468         struct pool *pool = tc->pool;
1469         struct dm_cell_key key;
1470
1471         /*
1472          * If cell is already occupied, then sharing is already in the process
1473          * of being broken so we have nothing further to do here.
1474          */
1475         build_data_key(tc->td, lookup_result->block, &key);
1476         if (bio_detain(pool, &key, bio, &data_cell)) {
1477                 cell_defer_no_holder(tc, virt_cell);
1478                 return;
1479         }
1480
1481         if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1482                 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1483                 cell_defer_no_holder(tc, virt_cell);
1484         } else {
1485                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1486
1487                 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1488                 inc_all_io_entry(pool, bio);
1489                 remap_and_issue(tc, bio, lookup_result->block);
1490
1491                 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1492                 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
1493         }
1494 }
1495
1496 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1497                             struct dm_bio_prison_cell *cell)
1498 {
1499         int r;
1500         dm_block_t data_block;
1501         struct pool *pool = tc->pool;
1502
1503         /*
1504          * Remap empty bios (flushes) immediately, without provisioning.
1505          */
1506         if (!bio->bi_iter.bi_size) {
1507                 inc_all_io_entry(pool, bio);
1508                 cell_defer_no_holder(tc, cell);
1509
1510                 remap_and_issue(tc, bio, 0);
1511                 return;
1512         }
1513
1514         /*
1515          * Fill read bios with zeroes and complete them immediately.
1516          */
1517         if (bio_data_dir(bio) == READ) {
1518                 zero_fill_bio(bio);
1519                 cell_defer_no_holder(tc, cell);
1520                 bio_endio(bio, 0);
1521                 return;
1522         }
1523
1524         r = alloc_data_block(tc, &data_block);
1525         switch (r) {
1526         case 0:
1527                 if (tc->origin_dev)
1528                         schedule_external_copy(tc, block, data_block, cell, bio);
1529                 else
1530                         schedule_zero(tc, block, data_block, cell, bio);
1531                 break;
1532
1533         case -ENOSPC:
1534                 retry_bios_on_resume(pool, cell);
1535                 break;
1536
1537         default:
1538                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1539                             __func__, r);
1540                 cell_error(pool, cell);
1541                 break;
1542         }
1543 }
1544
1545 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1546 {
1547         int r;
1548         struct pool *pool = tc->pool;
1549         struct bio *bio = cell->holder;
1550         dm_block_t block = get_bio_block(tc, bio);
1551         struct dm_thin_lookup_result lookup_result;
1552
1553         if (tc->requeue_mode) {
1554                 cell_requeue(pool, cell);
1555                 return;
1556         }
1557
1558         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1559         switch (r) {
1560         case 0:
1561                 if (lookup_result.shared)
1562                         process_shared_bio(tc, bio, block, &lookup_result, cell);
1563                 else {
1564                         inc_all_io_entry(pool, bio);
1565                         remap_and_issue(tc, bio, lookup_result.block);
1566                         inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1567                 }
1568                 break;
1569
1570         case -ENODATA:
1571                 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1572                         inc_all_io_entry(pool, bio);
1573                         cell_defer_no_holder(tc, cell);
1574
1575                         if (bio_end_sector(bio) <= tc->origin_size)
1576                                 remap_to_origin_and_issue(tc, bio);
1577
1578                         else if (bio->bi_iter.bi_sector < tc->origin_size) {
1579                                 zero_fill_bio(bio);
1580                                 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1581                                 remap_to_origin_and_issue(tc, bio);
1582
1583                         } else {
1584                                 zero_fill_bio(bio);
1585                                 bio_endio(bio, 0);
1586                         }
1587                 } else
1588                         provision_block(tc, bio, block, cell);
1589                 break;
1590
1591         default:
1592                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1593                             __func__, r);
1594                 cell_defer_no_holder(tc, cell);
1595                 bio_io_error(bio);
1596                 break;
1597         }
1598 }
1599
1600 static void process_bio(struct thin_c *tc, struct bio *bio)
1601 {
1602         struct pool *pool = tc->pool;
1603         dm_block_t block = get_bio_block(tc, bio);
1604         struct dm_bio_prison_cell *cell;
1605         struct dm_cell_key key;
1606
1607         /*
1608          * If cell is already occupied, then the block is already
1609          * being provisioned so we have nothing further to do here.
1610          */
1611         build_virtual_key(tc->td, block, &key);
1612         if (bio_detain(pool, &key, bio, &cell))
1613                 return;
1614
1615         process_cell(tc, cell);
1616 }
1617
1618 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1619                                     struct dm_bio_prison_cell *cell)
1620 {
1621         int r;
1622         int rw = bio_data_dir(bio);
1623         dm_block_t block = get_bio_block(tc, bio);
1624         struct dm_thin_lookup_result lookup_result;
1625
1626         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1627         switch (r) {
1628         case 0:
1629                 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
1630                         handle_unserviceable_bio(tc->pool, bio);
1631                         if (cell)
1632                                 cell_defer_no_holder(tc, cell);
1633                 } else {
1634                         inc_all_io_entry(tc->pool, bio);
1635                         remap_and_issue(tc, bio, lookup_result.block);
1636                         if (cell)
1637                                 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1638                 }
1639                 break;
1640
1641         case -ENODATA:
1642                 if (cell)
1643                         cell_defer_no_holder(tc, cell);
1644                 if (rw != READ) {
1645                         handle_unserviceable_bio(tc->pool, bio);
1646                         break;
1647                 }
1648
1649                 if (tc->origin_dev) {
1650                         inc_all_io_entry(tc->pool, bio);
1651                         remap_to_origin_and_issue(tc, bio);
1652                         break;
1653                 }
1654
1655                 zero_fill_bio(bio);
1656                 bio_endio(bio, 0);
1657                 break;
1658
1659         default:
1660                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1661                             __func__, r);
1662                 if (cell)
1663                         cell_defer_no_holder(tc, cell);
1664                 bio_io_error(bio);
1665                 break;
1666         }
1667 }
1668
1669 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1670 {
1671         __process_bio_read_only(tc, bio, NULL);
1672 }
1673
1674 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1675 {
1676         __process_bio_read_only(tc, cell->holder, cell);
1677 }
1678
1679 static void process_bio_success(struct thin_c *tc, struct bio *bio)
1680 {
1681         bio_endio(bio, 0);
1682 }
1683
1684 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1685 {
1686         bio_io_error(bio);
1687 }
1688
1689 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1690 {
1691         cell_success(tc->pool, cell);
1692 }
1693
1694 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1695 {
1696         cell_error(tc->pool, cell);
1697 }
1698
1699 /*
1700  * FIXME: should we also commit due to size of transaction, measured in
1701  * metadata blocks?
1702  */
1703 static int need_commit_due_to_time(struct pool *pool)
1704 {
1705         return !time_in_range(jiffies, pool->last_commit_jiffies,
1706                               pool->last_commit_jiffies + COMMIT_PERIOD);
1707 }
1708
1709 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1710 #define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1711
1712 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1713 {
1714         struct rb_node **rbp, *parent;
1715         struct dm_thin_endio_hook *pbd;
1716         sector_t bi_sector = bio->bi_iter.bi_sector;
1717
1718         rbp = &tc->sort_bio_list.rb_node;
1719         parent = NULL;
1720         while (*rbp) {
1721                 parent = *rbp;
1722                 pbd = thin_pbd(parent);
1723
1724                 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1725                         rbp = &(*rbp)->rb_left;
1726                 else
1727                         rbp = &(*rbp)->rb_right;
1728         }
1729
1730         pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1731         rb_link_node(&pbd->rb_node, parent, rbp);
1732         rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1733 }
1734
1735 static void __extract_sorted_bios(struct thin_c *tc)
1736 {
1737         struct rb_node *node;
1738         struct dm_thin_endio_hook *pbd;
1739         struct bio *bio;
1740
1741         for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1742                 pbd = thin_pbd(node);
1743                 bio = thin_bio(pbd);
1744
1745                 bio_list_add(&tc->deferred_bio_list, bio);
1746                 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1747         }
1748
1749         WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1750 }
1751
1752 static void __sort_thin_deferred_bios(struct thin_c *tc)
1753 {
1754         struct bio *bio;
1755         struct bio_list bios;
1756
1757         bio_list_init(&bios);
1758         bio_list_merge(&bios, &tc->deferred_bio_list);
1759         bio_list_init(&tc->deferred_bio_list);
1760
1761         /* Sort deferred_bio_list using rb-tree */
1762         while ((bio = bio_list_pop(&bios)))
1763                 __thin_bio_rb_add(tc, bio);
1764
1765         /*
1766          * Transfer the sorted bios in sort_bio_list back to
1767          * deferred_bio_list to allow lockless submission of
1768          * all bios.
1769          */
1770         __extract_sorted_bios(tc);
1771 }
1772
1773 static void process_thin_deferred_bios(struct thin_c *tc)
1774 {
1775         struct pool *pool = tc->pool;
1776         unsigned long flags;
1777         struct bio *bio;
1778         struct bio_list bios;
1779         struct blk_plug plug;
1780         unsigned count = 0;
1781
1782         if (tc->requeue_mode) {
1783                 error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
1784                 return;
1785         }
1786
1787         bio_list_init(&bios);
1788
1789         spin_lock_irqsave(&tc->lock, flags);
1790
1791         if (bio_list_empty(&tc->deferred_bio_list)) {
1792                 spin_unlock_irqrestore(&tc->lock, flags);
1793                 return;
1794         }
1795
1796         __sort_thin_deferred_bios(tc);
1797
1798         bio_list_merge(&bios, &tc->deferred_bio_list);
1799         bio_list_init(&tc->deferred_bio_list);
1800
1801         spin_unlock_irqrestore(&tc->lock, flags);
1802
1803         blk_start_plug(&plug);
1804         while ((bio = bio_list_pop(&bios))) {
1805                 /*
1806                  * If we've got no free new_mapping structs, and processing
1807                  * this bio might require one, we pause until there are some
1808                  * prepared mappings to process.
1809                  */
1810                 if (ensure_next_mapping(pool)) {
1811                         spin_lock_irqsave(&tc->lock, flags);
1812                         bio_list_add(&tc->deferred_bio_list, bio);
1813                         bio_list_merge(&tc->deferred_bio_list, &bios);
1814                         spin_unlock_irqrestore(&tc->lock, flags);
1815                         break;
1816                 }
1817
1818                 if (bio->bi_rw & REQ_DISCARD)
1819                         pool->process_discard(tc, bio);
1820                 else
1821                         pool->process_bio(tc, bio);
1822
1823                 if ((count++ & 127) == 0) {
1824                         throttle_work_update(&pool->throttle);
1825                         dm_pool_issue_prefetches(pool->pmd);
1826                 }
1827         }
1828         blk_finish_plug(&plug);
1829 }
1830
1831 static int cmp_cells(const void *lhs, const void *rhs)
1832 {
1833         struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
1834         struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
1835
1836         BUG_ON(!lhs_cell->holder);
1837         BUG_ON(!rhs_cell->holder);
1838
1839         if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
1840                 return -1;
1841
1842         if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
1843                 return 1;
1844
1845         return 0;
1846 }
1847
1848 static unsigned sort_cells(struct pool *pool, struct list_head *cells)
1849 {
1850         unsigned count = 0;
1851         struct dm_bio_prison_cell *cell, *tmp;
1852
1853         list_for_each_entry_safe(cell, tmp, cells, user_list) {
1854                 if (count >= CELL_SORT_ARRAY_SIZE)
1855                         break;
1856
1857                 pool->cell_sort_array[count++] = cell;
1858                 list_del(&cell->user_list);
1859         }
1860
1861         sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
1862
1863         return count;
1864 }
1865
1866 static void process_thin_deferred_cells(struct thin_c *tc)
1867 {
1868         struct pool *pool = tc->pool;
1869         unsigned long flags;
1870         struct list_head cells;
1871         struct dm_bio_prison_cell *cell;
1872         unsigned i, j, count;
1873
1874         INIT_LIST_HEAD(&cells);
1875
1876         spin_lock_irqsave(&tc->lock, flags);
1877         list_splice_init(&tc->deferred_cells, &cells);
1878         spin_unlock_irqrestore(&tc->lock, flags);
1879
1880         if (list_empty(&cells))
1881                 return;
1882
1883         do {
1884                 count = sort_cells(tc->pool, &cells);
1885
1886                 for (i = 0; i < count; i++) {
1887                         cell = pool->cell_sort_array[i];
1888                         BUG_ON(!cell->holder);
1889
1890                         /*
1891                          * If we've got no free new_mapping structs, and processing
1892                          * this bio might require one, we pause until there are some
1893                          * prepared mappings to process.
1894                          */
1895                         if (ensure_next_mapping(pool)) {
1896                                 for (j = i; j < count; j++)
1897                                         list_add(&pool->cell_sort_array[j]->user_list, &cells);
1898
1899                                 spin_lock_irqsave(&tc->lock, flags);
1900                                 list_splice(&cells, &tc->deferred_cells);
1901                                 spin_unlock_irqrestore(&tc->lock, flags);
1902                                 return;
1903                         }
1904
1905                         if (cell->holder->bi_rw & REQ_DISCARD)
1906                                 pool->process_discard_cell(tc, cell);
1907                         else
1908                                 pool->process_cell(tc, cell);
1909                 }
1910         } while (!list_empty(&cells));
1911 }
1912
1913 static void thin_get(struct thin_c *tc);
1914 static void thin_put(struct thin_c *tc);
1915
1916 /*
1917  * We can't hold rcu_read_lock() around code that can block.  So we
1918  * find a thin with the rcu lock held; bump a refcount; then drop
1919  * the lock.
1920  */
1921 static struct thin_c *get_first_thin(struct pool *pool)
1922 {
1923         struct thin_c *tc = NULL;
1924
1925         rcu_read_lock();
1926         if (!list_empty(&pool->active_thins)) {
1927                 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1928                 thin_get(tc);
1929         }
1930         rcu_read_unlock();
1931
1932         return tc;
1933 }
1934
1935 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1936 {
1937         struct thin_c *old_tc = tc;
1938
1939         rcu_read_lock();
1940         list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1941                 thin_get(tc);
1942                 thin_put(old_tc);
1943                 rcu_read_unlock();
1944                 return tc;
1945         }
1946         thin_put(old_tc);
1947         rcu_read_unlock();
1948
1949         return NULL;
1950 }
1951
1952 static void process_deferred_bios(struct pool *pool)
1953 {
1954         unsigned long flags;
1955         struct bio *bio;
1956         struct bio_list bios;
1957         struct thin_c *tc;
1958
1959         tc = get_first_thin(pool);
1960         while (tc) {
1961                 process_thin_deferred_cells(tc);
1962                 process_thin_deferred_bios(tc);
1963                 tc = get_next_thin(pool, tc);
1964         }
1965
1966         /*
1967          * If there are any deferred flush bios, we must commit
1968          * the metadata before issuing them.
1969          */
1970         bio_list_init(&bios);
1971         spin_lock_irqsave(&pool->lock, flags);
1972         bio_list_merge(&bios, &pool->deferred_flush_bios);
1973         bio_list_init(&pool->deferred_flush_bios);
1974         spin_unlock_irqrestore(&pool->lock, flags);
1975
1976         if (bio_list_empty(&bios) &&
1977             !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1978                 return;
1979
1980         if (commit(pool)) {
1981                 while ((bio = bio_list_pop(&bios)))
1982                         bio_io_error(bio);
1983                 return;
1984         }
1985         pool->last_commit_jiffies = jiffies;
1986
1987         while ((bio = bio_list_pop(&bios)))
1988                 generic_make_request(bio);
1989 }
1990
1991 static void do_worker(struct work_struct *ws)
1992 {
1993         struct pool *pool = container_of(ws, struct pool, worker);
1994
1995         throttle_work_start(&pool->throttle);
1996         dm_pool_issue_prefetches(pool->pmd);
1997         throttle_work_update(&pool->throttle);
1998         process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1999         throttle_work_update(&pool->throttle);
2000         process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
2001         throttle_work_update(&pool->throttle);
2002         process_deferred_bios(pool);
2003         throttle_work_complete(&pool->throttle);
2004 }
2005
2006 /*
2007  * We want to commit periodically so that not too much
2008  * unwritten data builds up.
2009  */
2010 static void do_waker(struct work_struct *ws)
2011 {
2012         struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
2013         wake_worker(pool);
2014         queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2015 }
2016
2017 /*
2018  * We're holding onto IO to allow userland time to react.  After the
2019  * timeout either the pool will have been resized (and thus back in
2020  * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
2021  */
2022 static void do_no_space_timeout(struct work_struct *ws)
2023 {
2024         struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2025                                          no_space_timeout);
2026
2027         if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
2028                 set_pool_mode(pool, PM_READ_ONLY);
2029 }
2030
2031 /*----------------------------------------------------------------*/
2032
2033 struct pool_work {
2034         struct work_struct worker;
2035         struct completion complete;
2036 };
2037
2038 static struct pool_work *to_pool_work(struct work_struct *ws)
2039 {
2040         return container_of(ws, struct pool_work, worker);
2041 }
2042
2043 static void pool_work_complete(struct pool_work *pw)
2044 {
2045         complete(&pw->complete);
2046 }
2047
2048 static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2049                            void (*fn)(struct work_struct *))
2050 {
2051         INIT_WORK_ONSTACK(&pw->worker, fn);
2052         init_completion(&pw->complete);
2053         queue_work(pool->wq, &pw->worker);
2054         wait_for_completion(&pw->complete);
2055 }
2056
2057 /*----------------------------------------------------------------*/
2058
2059 struct noflush_work {
2060         struct pool_work pw;
2061         struct thin_c *tc;
2062 };
2063
2064 static struct noflush_work *to_noflush(struct work_struct *ws)
2065 {
2066         return container_of(to_pool_work(ws), struct noflush_work, pw);
2067 }
2068
2069 static void do_noflush_start(struct work_struct *ws)
2070 {
2071         struct noflush_work *w = to_noflush(ws);
2072         w->tc->requeue_mode = true;
2073         requeue_io(w->tc);
2074         pool_work_complete(&w->pw);
2075 }
2076
2077 static void do_noflush_stop(struct work_struct *ws)
2078 {
2079         struct noflush_work *w = to_noflush(ws);
2080         w->tc->requeue_mode = false;
2081         pool_work_complete(&w->pw);
2082 }
2083
2084 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2085 {
2086         struct noflush_work w;
2087
2088         w.tc = tc;
2089         pool_work_wait(&w.pw, tc->pool, fn);
2090 }
2091
2092 /*----------------------------------------------------------------*/
2093
2094 static enum pool_mode get_pool_mode(struct pool *pool)
2095 {
2096         return pool->pf.mode;
2097 }
2098
2099 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2100 {
2101         dm_table_event(pool->ti->table);
2102         DMINFO("%s: switching pool to %s mode",
2103                dm_device_name(pool->pool_md), new_mode);
2104 }
2105
2106 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2107 {
2108         struct pool_c *pt = pool->ti->private;
2109         bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2110         enum pool_mode old_mode = get_pool_mode(pool);
2111         unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
2112
2113         /*
2114          * Never allow the pool to transition to PM_WRITE mode if user
2115          * intervention is required to verify metadata and data consistency.
2116          */
2117         if (new_mode == PM_WRITE && needs_check) {
2118                 DMERR("%s: unable to switch pool to write mode until repaired.",
2119                       dm_device_name(pool->pool_md));
2120                 if (old_mode != new_mode)
2121                         new_mode = old_mode;
2122                 else
2123                         new_mode = PM_READ_ONLY;
2124         }
2125         /*
2126          * If we were in PM_FAIL mode, rollback of metadata failed.  We're
2127          * not going to recover without a thin_repair.  So we never let the
2128          * pool move out of the old mode.
2129          */
2130         if (old_mode == PM_FAIL)
2131                 new_mode = old_mode;
2132
2133         switch (new_mode) {
2134         case PM_FAIL:
2135                 if (old_mode != new_mode)
2136                         notify_of_pool_mode_change(pool, "failure");
2137                 dm_pool_metadata_read_only(pool->pmd);
2138                 pool->process_bio = process_bio_fail;
2139                 pool->process_discard = process_bio_fail;
2140                 pool->process_cell = process_cell_fail;
2141                 pool->process_discard_cell = process_cell_fail;
2142                 pool->process_prepared_mapping = process_prepared_mapping_fail;
2143                 pool->process_prepared_discard = process_prepared_discard_fail;
2144
2145                 error_retry_list(pool);
2146                 break;
2147
2148         case PM_READ_ONLY:
2149                 if (old_mode != new_mode)
2150                         notify_of_pool_mode_change(pool, "read-only");
2151                 dm_pool_metadata_read_only(pool->pmd);
2152                 pool->process_bio = process_bio_read_only;
2153                 pool->process_discard = process_bio_success;
2154                 pool->process_cell = process_cell_read_only;
2155                 pool->process_discard_cell = process_cell_success;
2156                 pool->process_prepared_mapping = process_prepared_mapping_fail;
2157                 pool->process_prepared_discard = process_prepared_discard_passdown;
2158
2159                 error_retry_list(pool);
2160                 break;
2161
2162         case PM_OUT_OF_DATA_SPACE:
2163                 /*
2164                  * Ideally we'd never hit this state; the low water mark
2165                  * would trigger userland to extend the pool before we
2166                  * completely run out of data space.  However, many small
2167                  * IOs to unprovisioned space can consume data space at an
2168                  * alarming rate.  Adjust your low water mark if you're
2169                  * frequently seeing this mode.
2170                  */
2171                 if (old_mode != new_mode)
2172                         notify_of_pool_mode_change(pool, "out-of-data-space");
2173                 pool->process_bio = process_bio_read_only;
2174                 pool->process_discard = process_discard_bio;
2175                 pool->process_cell = process_cell_read_only;
2176                 pool->process_discard_cell = process_discard_cell;
2177                 pool->process_prepared_mapping = process_prepared_mapping;
2178                 pool->process_prepared_discard = process_prepared_discard;
2179
2180                 if (!pool->pf.error_if_no_space && no_space_timeout)
2181                         queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
2182                 break;
2183
2184         case PM_WRITE:
2185                 if (old_mode != new_mode)
2186                         notify_of_pool_mode_change(pool, "write");
2187                 dm_pool_metadata_read_write(pool->pmd);
2188                 pool->process_bio = process_bio;
2189                 pool->process_discard = process_discard_bio;
2190                 pool->process_cell = process_cell;
2191                 pool->process_discard_cell = process_discard_cell;
2192                 pool->process_prepared_mapping = process_prepared_mapping;
2193                 pool->process_prepared_discard = process_prepared_discard;
2194                 break;
2195         }
2196
2197         pool->pf.mode = new_mode;
2198         /*
2199          * The pool mode may have changed, sync it so bind_control_target()
2200          * doesn't cause an unexpected mode transition on resume.
2201          */
2202         pt->adjusted_pf.mode = new_mode;
2203 }
2204
2205 static void abort_transaction(struct pool *pool)
2206 {
2207         const char *dev_name = dm_device_name(pool->pool_md);
2208
2209         DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2210         if (dm_pool_abort_metadata(pool->pmd)) {
2211                 DMERR("%s: failed to abort metadata transaction", dev_name);
2212                 set_pool_mode(pool, PM_FAIL);
2213         }
2214
2215         if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2216                 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2217                 set_pool_mode(pool, PM_FAIL);
2218         }
2219 }
2220
2221 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2222 {
2223         DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2224                     dm_device_name(pool->pool_md), op, r);
2225
2226         abort_transaction(pool);
2227         set_pool_mode(pool, PM_READ_ONLY);
2228 }
2229
2230 /*----------------------------------------------------------------*/
2231
2232 /*
2233  * Mapping functions.
2234  */
2235
2236 /*
2237  * Called only while mapping a thin bio to hand it over to the workqueue.
2238  */
2239 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2240 {
2241         unsigned long flags;
2242         struct pool *pool = tc->pool;
2243
2244         spin_lock_irqsave(&tc->lock, flags);
2245         bio_list_add(&tc->deferred_bio_list, bio);
2246         spin_unlock_irqrestore(&tc->lock, flags);
2247
2248         wake_worker(pool);
2249 }
2250
2251 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2252 {
2253         struct pool *pool = tc->pool;
2254
2255         throttle_lock(&pool->throttle);
2256         thin_defer_bio(tc, bio);
2257         throttle_unlock(&pool->throttle);
2258 }
2259
2260 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2261 {
2262         unsigned long flags;
2263         struct pool *pool = tc->pool;
2264
2265         throttle_lock(&pool->throttle);
2266         spin_lock_irqsave(&tc->lock, flags);
2267         list_add_tail(&cell->user_list, &tc->deferred_cells);
2268         spin_unlock_irqrestore(&tc->lock, flags);
2269         throttle_unlock(&pool->throttle);
2270
2271         wake_worker(pool);
2272 }
2273
2274 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2275 {
2276         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2277
2278         h->tc = tc;
2279         h->shared_read_entry = NULL;
2280         h->all_io_entry = NULL;
2281         h->overwrite_mapping = NULL;
2282 }
2283
2284 /*
2285  * Non-blocking function called from the thin target's map function.
2286  */
2287 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2288 {
2289         int r;
2290         struct thin_c *tc = ti->private;
2291         dm_block_t block = get_bio_block(tc, bio);
2292         struct dm_thin_device *td = tc->td;
2293         struct dm_thin_lookup_result result;
2294         struct dm_bio_prison_cell *virt_cell, *data_cell;
2295         struct dm_cell_key key;
2296
2297         thin_hook_bio(tc, bio);
2298
2299         if (tc->requeue_mode) {
2300                 bio_endio(bio, DM_ENDIO_REQUEUE);
2301                 return DM_MAPIO_SUBMITTED;
2302         }
2303
2304         if (get_pool_mode(tc->pool) == PM_FAIL) {
2305                 bio_io_error(bio);
2306                 return DM_MAPIO_SUBMITTED;
2307         }
2308
2309         if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
2310                 thin_defer_bio_with_throttle(tc, bio);
2311                 return DM_MAPIO_SUBMITTED;
2312         }
2313
2314         /*
2315          * We must hold the virtual cell before doing the lookup, otherwise
2316          * there's a race with discard.
2317          */
2318         build_virtual_key(tc->td, block, &key);
2319         if (bio_detain(tc->pool, &key, bio, &virt_cell))
2320                 return DM_MAPIO_SUBMITTED;
2321
2322         r = dm_thin_find_block(td, block, 0, &result);
2323
2324         /*
2325          * Note that we defer readahead too.
2326          */
2327         switch (r) {
2328         case 0:
2329                 if (unlikely(result.shared)) {
2330                         /*
2331                          * We have a race condition here between the
2332                          * result.shared value returned by the lookup and
2333                          * snapshot creation, which may cause new
2334                          * sharing.
2335                          *
2336                          * To avoid this always quiesce the origin before
2337                          * taking the snap.  You want to do this anyway to
2338                          * ensure a consistent application view
2339                          * (i.e. lockfs).
2340                          *
2341                          * More distant ancestors are irrelevant. The
2342                          * shared flag will be set in their case.
2343                          */
2344                         thin_defer_cell(tc, virt_cell);
2345                         return DM_MAPIO_SUBMITTED;
2346                 }
2347
2348                 build_data_key(tc->td, result.block, &key);
2349                 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2350                         cell_defer_no_holder(tc, virt_cell);
2351                         return DM_MAPIO_SUBMITTED;
2352                 }
2353
2354                 inc_all_io_entry(tc->pool, bio);
2355                 cell_defer_no_holder(tc, data_cell);
2356                 cell_defer_no_holder(tc, virt_cell);
2357
2358                 remap(tc, bio, result.block);
2359                 return DM_MAPIO_REMAPPED;
2360
2361         case -ENODATA:
2362         case -EWOULDBLOCK:
2363                 thin_defer_cell(tc, virt_cell);
2364                 return DM_MAPIO_SUBMITTED;
2365
2366         default:
2367                 /*
2368                  * Must always call bio_io_error on failure.
2369                  * dm_thin_find_block can fail with -EINVAL if the
2370                  * pool is switched to fail-io mode.
2371                  */
2372                 bio_io_error(bio);
2373                 cell_defer_no_holder(tc, virt_cell);
2374                 return DM_MAPIO_SUBMITTED;
2375         }
2376 }
2377
2378 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2379 {
2380         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
2381         struct request_queue *q;
2382
2383         if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2384                 return 1;
2385
2386         q = bdev_get_queue(pt->data_dev->bdev);
2387         return bdi_congested(&q->backing_dev_info, bdi_bits);
2388 }
2389
2390 static void requeue_bios(struct pool *pool)
2391 {
2392         unsigned long flags;
2393         struct thin_c *tc;
2394
2395         rcu_read_lock();
2396         list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2397                 spin_lock_irqsave(&tc->lock, flags);
2398                 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2399                 bio_list_init(&tc->retry_on_resume_list);
2400                 spin_unlock_irqrestore(&tc->lock, flags);
2401         }
2402         rcu_read_unlock();
2403 }
2404
2405 /*----------------------------------------------------------------
2406  * Binding of control targets to a pool object
2407  *--------------------------------------------------------------*/
2408 static bool data_dev_supports_discard(struct pool_c *pt)
2409 {
2410         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2411
2412         return q && blk_queue_discard(q);
2413 }
2414
2415 static bool is_factor(sector_t block_size, uint32_t n)
2416 {
2417         return !sector_div(block_size, n);
2418 }
2419
2420 /*
2421  * If discard_passdown was enabled verify that the data device
2422  * supports discards.  Disable discard_passdown if not.
2423  */
2424 static void disable_passdown_if_not_supported(struct pool_c *pt)
2425 {
2426         struct pool *pool = pt->pool;
2427         struct block_device *data_bdev = pt->data_dev->bdev;
2428         struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2429         sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
2430         const char *reason = NULL;
2431         char buf[BDEVNAME_SIZE];
2432
2433         if (!pt->adjusted_pf.discard_passdown)
2434                 return;
2435
2436         if (!data_dev_supports_discard(pt))
2437                 reason = "discard unsupported";
2438
2439         else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2440                 reason = "max discard sectors smaller than a block";
2441
2442         else if (data_limits->discard_granularity > block_size)
2443                 reason = "discard granularity larger than a block";
2444
2445         else if (!is_factor(block_size, data_limits->discard_granularity))
2446                 reason = "discard granularity not a factor of block size";
2447
2448         if (reason) {
2449                 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2450                 pt->adjusted_pf.discard_passdown = false;
2451         }
2452 }
2453
2454 static int bind_control_target(struct pool *pool, struct dm_target *ti)
2455 {
2456         struct pool_c *pt = ti->private;
2457
2458         /*
2459          * We want to make sure that a pool in PM_FAIL mode is never upgraded.
2460          */
2461         enum pool_mode old_mode = get_pool_mode(pool);
2462         enum pool_mode new_mode = pt->adjusted_pf.mode;
2463
2464         /*
2465          * Don't change the pool's mode until set_pool_mode() below.
2466          * Otherwise the pool's process_* function pointers may
2467          * not match the desired pool mode.
2468          */
2469         pt->adjusted_pf.mode = old_mode;
2470
2471         pool->ti = ti;
2472         pool->pf = pt->adjusted_pf;
2473         pool->low_water_blocks = pt->low_water_blocks;
2474
2475         set_pool_mode(pool, new_mode);
2476
2477         return 0;
2478 }
2479
2480 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2481 {
2482         if (pool->ti == ti)
2483                 pool->ti = NULL;
2484 }
2485
2486 /*----------------------------------------------------------------
2487  * Pool creation
2488  *--------------------------------------------------------------*/
2489 /* Initialize pool features. */
2490 static void pool_features_init(struct pool_features *pf)
2491 {
2492         pf->mode = PM_WRITE;
2493         pf->zero_new_blocks = true;
2494         pf->discard_enabled = true;
2495         pf->discard_passdown = true;
2496         pf->error_if_no_space = false;
2497 }
2498
2499 static void __pool_destroy(struct pool *pool)
2500 {
2501         __pool_table_remove(pool);
2502
2503         vfree(pool->cell_sort_array);
2504         if (dm_pool_metadata_close(pool->pmd) < 0)
2505                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2506
2507         dm_bio_prison_destroy(pool->prison);
2508         dm_kcopyd_client_destroy(pool->copier);
2509
2510         if (pool->wq)
2511                 destroy_workqueue(pool->wq);
2512
2513         if (pool->next_mapping)
2514                 mempool_free(pool->next_mapping, pool->mapping_pool);
2515         mempool_destroy(pool->mapping_pool);
2516         dm_deferred_set_destroy(pool->shared_read_ds);
2517         dm_deferred_set_destroy(pool->all_io_ds);
2518         kfree(pool);
2519 }
2520
2521 static struct kmem_cache *_new_mapping_cache;
2522
2523 static struct pool *pool_create(struct mapped_device *pool_md,
2524                                 struct block_device *metadata_dev,
2525                                 unsigned long block_size,
2526                                 int read_only, char **error)
2527 {
2528         int r;
2529         void *err_p;
2530         struct pool *pool;
2531         struct dm_pool_metadata *pmd;
2532         bool format_device = read_only ? false : true;
2533
2534         pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
2535         if (IS_ERR(pmd)) {
2536                 *error = "Error creating metadata object";
2537                 return (struct pool *)pmd;
2538         }
2539
2540         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2541         if (!pool) {
2542                 *error = "Error allocating memory for pool";
2543                 err_p = ERR_PTR(-ENOMEM);
2544                 goto bad_pool;
2545         }
2546
2547         pool->pmd = pmd;
2548         pool->sectors_per_block = block_size;
2549         if (block_size & (block_size - 1))
2550                 pool->sectors_per_block_shift = -1;
2551         else
2552                 pool->sectors_per_block_shift = __ffs(block_size);
2553         pool->low_water_blocks = 0;
2554         pool_features_init(&pool->pf);
2555         pool->prison = dm_bio_prison_create();
2556         if (!pool->prison) {
2557                 *error = "Error creating pool's bio prison";
2558                 err_p = ERR_PTR(-ENOMEM);
2559                 goto bad_prison;
2560         }
2561
2562         pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2563         if (IS_ERR(pool->copier)) {
2564                 r = PTR_ERR(pool->copier);
2565                 *error = "Error creating pool's kcopyd client";
2566                 err_p = ERR_PTR(r);
2567                 goto bad_kcopyd_client;
2568         }
2569
2570         /*
2571          * Create singlethreaded workqueue that will service all devices
2572          * that use this metadata.
2573          */
2574         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2575         if (!pool->wq) {
2576                 *error = "Error creating pool's workqueue";
2577                 err_p = ERR_PTR(-ENOMEM);
2578                 goto bad_wq;
2579         }
2580
2581         throttle_init(&pool->throttle);
2582         INIT_WORK(&pool->worker, do_worker);
2583         INIT_DELAYED_WORK(&pool->waker, do_waker);
2584         INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2585         spin_lock_init(&pool->lock);
2586         bio_list_init(&pool->deferred_flush_bios);
2587         INIT_LIST_HEAD(&pool->prepared_mappings);
2588         INIT_LIST_HEAD(&pool->prepared_discards);
2589         INIT_LIST_HEAD(&pool->active_thins);
2590         pool->low_water_triggered = false;
2591         pool->suspended = true;
2592
2593         pool->shared_read_ds = dm_deferred_set_create();
2594         if (!pool->shared_read_ds) {
2595                 *error = "Error creating pool's shared read deferred set";
2596                 err_p = ERR_PTR(-ENOMEM);
2597                 goto bad_shared_read_ds;
2598         }
2599
2600         pool->all_io_ds = dm_deferred_set_create();
2601         if (!pool->all_io_ds) {
2602                 *error = "Error creating pool's all io deferred set";
2603                 err_p = ERR_PTR(-ENOMEM);
2604                 goto bad_all_io_ds;
2605         }
2606
2607         pool->next_mapping = NULL;
2608         pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2609                                                       _new_mapping_cache);
2610         if (!pool->mapping_pool) {
2611                 *error = "Error creating pool's mapping mempool";
2612                 err_p = ERR_PTR(-ENOMEM);
2613                 goto bad_mapping_pool;
2614         }
2615
2616         pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
2617         if (!pool->cell_sort_array) {
2618                 *error = "Error allocating cell sort array";
2619                 err_p = ERR_PTR(-ENOMEM);
2620                 goto bad_sort_array;
2621         }
2622
2623         pool->ref_count = 1;
2624         pool->last_commit_jiffies = jiffies;
2625         pool->pool_md = pool_md;
2626         pool->md_dev = metadata_dev;
2627         __pool_table_insert(pool);
2628
2629         return pool;
2630
2631 bad_sort_array:
2632         mempool_destroy(pool->mapping_pool);
2633 bad_mapping_pool:
2634         dm_deferred_set_destroy(pool->all_io_ds);
2635 bad_all_io_ds:
2636         dm_deferred_set_destroy(pool->shared_read_ds);
2637 bad_shared_read_ds:
2638         destroy_workqueue(pool->wq);
2639 bad_wq:
2640         dm_kcopyd_client_destroy(pool->copier);
2641 bad_kcopyd_client:
2642         dm_bio_prison_destroy(pool->prison);
2643 bad_prison:
2644         kfree(pool);
2645 bad_pool:
2646         if (dm_pool_metadata_close(pmd))
2647                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2648
2649         return err_p;
2650 }
2651
2652 static void __pool_inc(struct pool *pool)
2653 {
2654         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2655         pool->ref_count++;
2656 }
2657
2658 static void __pool_dec(struct pool *pool)
2659 {
2660         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2661         BUG_ON(!pool->ref_count);
2662         if (!--pool->ref_count)
2663                 __pool_destroy(pool);
2664 }
2665
2666 static struct pool *__pool_find(struct mapped_device *pool_md,
2667                                 struct block_device *metadata_dev,
2668                                 unsigned long block_size, int read_only,
2669                                 char **error, int *created)
2670 {
2671         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2672
2673         if (pool) {
2674                 if (pool->pool_md != pool_md) {
2675                         *error = "metadata device already in use by a pool";
2676                         return ERR_PTR(-EBUSY);
2677                 }
2678                 __pool_inc(pool);
2679
2680         } else {
2681                 pool = __pool_table_lookup(pool_md);
2682                 if (pool) {
2683                         if (pool->md_dev != metadata_dev) {
2684                                 *error = "different pool cannot replace a pool";
2685                                 return ERR_PTR(-EINVAL);
2686                         }
2687                         __pool_inc(pool);
2688
2689                 } else {
2690                         pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
2691                         *created = 1;
2692                 }
2693         }
2694
2695         return pool;
2696 }
2697
2698 /*----------------------------------------------------------------
2699  * Pool target methods
2700  *--------------------------------------------------------------*/
2701 static void pool_dtr(struct dm_target *ti)
2702 {
2703         struct pool_c *pt = ti->private;
2704
2705         mutex_lock(&dm_thin_pool_table.mutex);
2706
2707         unbind_control_target(pt->pool, ti);
2708         __pool_dec(pt->pool);
2709         dm_put_device(ti, pt->metadata_dev);
2710         dm_put_device(ti, pt->data_dev);
2711         kfree(pt);
2712
2713         mutex_unlock(&dm_thin_pool_table.mutex);
2714 }
2715
2716 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2717                                struct dm_target *ti)
2718 {
2719         int r;
2720         unsigned argc;
2721         const char *arg_name;
2722
2723         static struct dm_arg _args[] = {
2724                 {0, 4, "Invalid number of pool feature arguments"},
2725         };
2726
2727         /*
2728          * No feature arguments supplied.
2729          */
2730         if (!as->argc)
2731                 return 0;
2732
2733         r = dm_read_arg_group(_args, as, &argc, &ti->error);
2734         if (r)
2735                 return -EINVAL;
2736
2737         while (argc && !r) {
2738                 arg_name = dm_shift_arg(as);
2739                 argc--;
2740
2741                 if (!strcasecmp(arg_name, "skip_block_zeroing"))
2742                         pf->zero_new_blocks = false;
2743
2744                 else if (!strcasecmp(arg_name, "ignore_discard"))
2745                         pf->discard_enabled = false;
2746
2747                 else if (!strcasecmp(arg_name, "no_discard_passdown"))
2748                         pf->discard_passdown = false;
2749
2750                 else if (!strcasecmp(arg_name, "read_only"))
2751                         pf->mode = PM_READ_ONLY;
2752
2753                 else if (!strcasecmp(arg_name, "error_if_no_space"))
2754                         pf->error_if_no_space = true;
2755
2756                 else {
2757                         ti->error = "Unrecognised pool feature requested";
2758                         r = -EINVAL;
2759                         break;
2760                 }
2761         }
2762
2763         return r;
2764 }
2765
2766 static void metadata_low_callback(void *context)
2767 {
2768         struct pool *pool = context;
2769
2770         DMWARN("%s: reached low water mark for metadata device: sending event.",
2771                dm_device_name(pool->pool_md));
2772
2773         dm_table_event(pool->ti->table);
2774 }
2775
2776 static sector_t get_dev_size(struct block_device *bdev)
2777 {
2778         return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2779 }
2780
2781 static void warn_if_metadata_device_too_big(struct block_device *bdev)
2782 {
2783         sector_t metadata_dev_size = get_dev_size(bdev);
2784         char buffer[BDEVNAME_SIZE];
2785
2786         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
2787                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2788                        bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
2789 }
2790
2791 static sector_t get_metadata_dev_size(struct block_device *bdev)
2792 {
2793         sector_t metadata_dev_size = get_dev_size(bdev);
2794
2795         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2796                 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
2797
2798         return metadata_dev_size;
2799 }
2800
2801 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2802 {
2803         sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2804
2805         sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
2806
2807         return metadata_dev_size;
2808 }
2809
2810 /*
2811  * When a metadata threshold is crossed a dm event is triggered, and
2812  * userland should respond by growing the metadata device.  We could let
2813  * userland set the threshold, like we do with the data threshold, but I'm
2814  * not sure they know enough to do this well.
2815  */
2816 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2817 {
2818         /*
2819          * 4M is ample for all ops with the possible exception of thin
2820          * device deletion which is harmless if it fails (just retry the
2821          * delete after you've grown the device).
2822          */
2823         dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2824         return min((dm_block_t)1024ULL /* 4M */, quarter);
2825 }
2826
2827 /*
2828  * thin-pool <metadata dev> <data dev>
2829  *           <data block size (sectors)>
2830  *           <low water mark (blocks)>
2831  *           [<#feature args> [<arg>]*]
2832  *
2833  * Optional feature arguments are:
2834  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
2835  *           ignore_discard: disable discard
2836  *           no_discard_passdown: don't pass discards down to the data device
2837  *           read_only: Don't allow any changes to be made to the pool metadata.
2838  *           error_if_no_space: error IOs, instead of queueing, if no space.
2839  */
2840 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2841 {
2842         int r, pool_created = 0;
2843         struct pool_c *pt;
2844         struct pool *pool;
2845         struct pool_features pf;
2846         struct dm_arg_set as;
2847         struct dm_dev *data_dev;
2848         unsigned long block_size;
2849         dm_block_t low_water_blocks;
2850         struct dm_dev *metadata_dev;
2851         fmode_t metadata_mode;
2852
2853         /*
2854          * FIXME Remove validation from scope of lock.
2855          */
2856         mutex_lock(&dm_thin_pool_table.mutex);
2857
2858         if (argc < 4) {
2859                 ti->error = "Invalid argument count";
2860                 r = -EINVAL;
2861                 goto out_unlock;
2862         }
2863
2864         as.argc = argc;
2865         as.argv = argv;
2866
2867         /*
2868          * Set default pool features.
2869          */
2870         pool_features_init(&pf);
2871
2872         dm_consume_args(&as, 4);
2873         r = parse_pool_features(&as, &pf, ti);
2874         if (r)
2875                 goto out_unlock;
2876
2877         metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2878         r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
2879         if (r) {
2880                 ti->error = "Error opening metadata block device";
2881                 goto out_unlock;
2882         }
2883         warn_if_metadata_device_too_big(metadata_dev->bdev);
2884
2885         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2886         if (r) {
2887                 ti->error = "Error getting data device";
2888                 goto out_metadata;
2889         }
2890
2891         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2892             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2893             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2894             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2895                 ti->error = "Invalid block size";
2896                 r = -EINVAL;
2897                 goto out;
2898         }
2899
2900         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2901                 ti->error = "Invalid low water mark";
2902                 r = -EINVAL;
2903                 goto out;
2904         }
2905
2906         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2907         if (!pt) {
2908                 r = -ENOMEM;
2909                 goto out;
2910         }
2911
2912         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2913                            block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2914         if (IS_ERR(pool)) {
2915                 r = PTR_ERR(pool);
2916                 goto out_free_pt;
2917         }
2918
2919         /*
2920          * 'pool_created' reflects whether this is the first table load.
2921          * Top level discard support is not allowed to be changed after
2922          * initial load.  This would require a pool reload to trigger thin
2923          * device changes.
2924          */
2925         if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2926                 ti->error = "Discard support cannot be disabled once enabled";
2927                 r = -EINVAL;
2928                 goto out_flags_changed;
2929         }
2930
2931         pt->pool = pool;
2932         pt->ti = ti;
2933         pt->metadata_dev = metadata_dev;
2934         pt->data_dev = data_dev;
2935         pt->low_water_blocks = low_water_blocks;
2936         pt->adjusted_pf = pt->requested_pf = pf;
2937         ti->num_flush_bios = 1;
2938
2939         /*
2940          * Only need to enable discards if the pool should pass
2941          * them down to the data device.  The thin device's discard
2942          * processing will cause mappings to be removed from the btree.
2943          */
2944         ti->discard_zeroes_data_unsupported = true;
2945         if (pf.discard_enabled && pf.discard_passdown) {
2946                 ti->num_discard_bios = 1;
2947
2948                 /*
2949                  * Setting 'discards_supported' circumvents the normal
2950                  * stacking of discard limits (this keeps the pool and
2951                  * thin devices' discard limits consistent).
2952                  */
2953                 ti->discards_supported = true;
2954         }
2955         ti->private = pt;
2956
2957         r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2958                                                 calc_metadata_threshold(pt),
2959                                                 metadata_low_callback,
2960                                                 pool);
2961         if (r)
2962                 goto out_free_pt;
2963
2964         pt->callbacks.congested_fn = pool_is_congested;
2965         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2966
2967         mutex_unlock(&dm_thin_pool_table.mutex);
2968
2969         return 0;
2970
2971 out_flags_changed:
2972         __pool_dec(pool);
2973 out_free_pt:
2974         kfree(pt);
2975 out:
2976         dm_put_device(ti, data_dev);
2977 out_metadata:
2978         dm_put_device(ti, metadata_dev);
2979 out_unlock:
2980         mutex_unlock(&dm_thin_pool_table.mutex);
2981
2982         return r;
2983 }
2984
2985 static int pool_map(struct dm_target *ti, struct bio *bio)
2986 {
2987         int r;
2988         struct pool_c *pt = ti->private;
2989         struct pool *pool = pt->pool;
2990         unsigned long flags;
2991
2992         /*
2993          * As this is a singleton target, ti->begin is always zero.
2994          */
2995         spin_lock_irqsave(&pool->lock, flags);
2996         bio->bi_bdev = pt->data_dev->bdev;
2997         r = DM_MAPIO_REMAPPED;
2998         spin_unlock_irqrestore(&pool->lock, flags);
2999
3000         return r;
3001 }
3002
3003 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
3004 {
3005         int r;
3006         struct pool_c *pt = ti->private;
3007         struct pool *pool = pt->pool;
3008         sector_t data_size = ti->len;
3009         dm_block_t sb_data_size;
3010
3011         *need_commit = false;
3012
3013         (void) sector_div(data_size, pool->sectors_per_block);
3014
3015         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
3016         if (r) {
3017                 DMERR("%s: failed to retrieve data device size",
3018                       dm_device_name(pool->pool_md));
3019                 return r;
3020         }
3021
3022         if (data_size < sb_data_size) {
3023                 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3024                       dm_device_name(pool->pool_md),
3025                       (unsigned long long)data_size, sb_data_size);
3026                 return -EINVAL;
3027
3028         } else if (data_size > sb_data_size) {
3029                 if (dm_pool_metadata_needs_check(pool->pmd)) {
3030                         DMERR("%s: unable to grow the data device until repaired.",
3031                               dm_device_name(pool->pool_md));
3032                         return 0;
3033                 }
3034
3035                 if (sb_data_size)
3036                         DMINFO("%s: growing the data device from %llu to %llu blocks",
3037                                dm_device_name(pool->pool_md),
3038                                sb_data_size, (unsigned long long)data_size);
3039                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
3040                 if (r) {
3041                         metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
3042                         return r;
3043                 }
3044
3045                 *need_commit = true;
3046         }
3047
3048         return 0;
3049 }
3050
3051 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3052 {
3053         int r;
3054         struct pool_c *pt = ti->private;
3055         struct pool *pool = pt->pool;
3056         dm_block_t metadata_dev_size, sb_metadata_dev_size;
3057
3058         *need_commit = false;
3059
3060         metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
3061
3062         r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3063         if (r) {
3064                 DMERR("%s: failed to retrieve metadata device size",
3065                       dm_device_name(pool->pool_md));
3066                 return r;
3067         }
3068
3069         if (metadata_dev_size < sb_metadata_dev_size) {
3070                 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3071                       dm_device_name(pool->pool_md),
3072                       metadata_dev_size, sb_metadata_dev_size);
3073                 return -EINVAL;
3074
3075         } else if (metadata_dev_size > sb_metadata_dev_size) {
3076                 if (dm_pool_metadata_needs_check(pool->pmd)) {
3077                         DMERR("%s: unable to grow the metadata device until repaired.",
3078                               dm_device_name(pool->pool_md));
3079                         return 0;
3080                 }
3081
3082                 warn_if_metadata_device_too_big(pool->md_dev);
3083                 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3084                        dm_device_name(pool->pool_md),
3085                        sb_metadata_dev_size, metadata_dev_size);
3086                 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3087                 if (r) {
3088                         metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
3089                         return r;
3090                 }
3091
3092                 *need_commit = true;
3093         }
3094
3095         return 0;
3096 }
3097
3098 /*
3099  * Retrieves the number of blocks of the data device from
3100  * the superblock and compares it to the actual device size,
3101  * thus resizing the data device in case it has grown.
3102  *
3103  * This both copes with opening preallocated data devices in the ctr
3104  * being followed by a resume
3105  * -and-
3106  * calling the resume method individually after userspace has
3107  * grown the data device in reaction to a table event.
3108  */
3109 static int pool_preresume(struct dm_target *ti)
3110 {
3111         int r;
3112         bool need_commit1, need_commit2;
3113         struct pool_c *pt = ti->private;
3114         struct pool *pool = pt->pool;
3115
3116         /*
3117          * Take control of the pool object.
3118          */
3119         r = bind_control_target(pool, ti);
3120         if (r)
3121                 return r;
3122
3123         r = maybe_resize_data_dev(ti, &need_commit1);
3124         if (r)
3125                 return r;
3126
3127         r = maybe_resize_metadata_dev(ti, &need_commit2);
3128         if (r)
3129                 return r;
3130
3131         if (need_commit1 || need_commit2)
3132                 (void) commit(pool);
3133
3134         return 0;
3135 }
3136
3137 static void pool_suspend_active_thins(struct pool *pool)
3138 {
3139         struct thin_c *tc;
3140
3141         /* Suspend all active thin devices */
3142         tc = get_first_thin(pool);
3143         while (tc) {
3144                 dm_internal_suspend_noflush(tc->thin_md);
3145                 tc = get_next_thin(pool, tc);
3146         }
3147 }
3148
3149 static void pool_resume_active_thins(struct pool *pool)
3150 {
3151         struct thin_c *tc;
3152
3153         /* Resume all active thin devices */
3154         tc = get_first_thin(pool);
3155         while (tc) {
3156                 dm_internal_resume(tc->thin_md);
3157                 tc = get_next_thin(pool, tc);
3158         }
3159 }
3160
3161 static void pool_resume(struct dm_target *ti)
3162 {
3163         struct pool_c *pt = ti->private;
3164         struct pool *pool = pt->pool;
3165         unsigned long flags;
3166
3167         /*
3168          * Must requeue active_thins' bios and then resume
3169          * active_thins _before_ clearing 'suspend' flag.
3170          */
3171         requeue_bios(pool);
3172         pool_resume_active_thins(pool);
3173
3174         spin_lock_irqsave(&pool->lock, flags);
3175         pool->low_water_triggered = false;
3176         pool->suspended = false;
3177         spin_unlock_irqrestore(&pool->lock, flags);
3178
3179         do_waker(&pool->waker.work);
3180 }
3181
3182 static void pool_presuspend(struct dm_target *ti)
3183 {
3184         struct pool_c *pt = ti->private;
3185         struct pool *pool = pt->pool;
3186         unsigned long flags;
3187
3188         spin_lock_irqsave(&pool->lock, flags);
3189         pool->suspended = true;
3190         spin_unlock_irqrestore(&pool->lock, flags);
3191
3192         pool_suspend_active_thins(pool);
3193 }
3194
3195 static void pool_presuspend_undo(struct dm_target *ti)
3196 {
3197         struct pool_c *pt = ti->private;
3198         struct pool *pool = pt->pool;
3199         unsigned long flags;
3200
3201         pool_resume_active_thins(pool);
3202
3203         spin_lock_irqsave(&pool->lock, flags);
3204         pool->suspended = false;
3205         spin_unlock_irqrestore(&pool->lock, flags);
3206 }
3207
3208 static void pool_postsuspend(struct dm_target *ti)
3209 {
3210         struct pool_c *pt = ti->private;
3211         struct pool *pool = pt->pool;
3212
3213         cancel_delayed_work(&pool->waker);
3214         cancel_delayed_work(&pool->no_space_timeout);
3215         flush_workqueue(pool->wq);
3216         (void) commit(pool);
3217 }
3218
3219 static int check_arg_count(unsigned argc, unsigned args_required)
3220 {
3221         if (argc != args_required) {
3222                 DMWARN("Message received with %u arguments instead of %u.",
3223                        argc, args_required);
3224                 return -EINVAL;
3225         }
3226
3227         return 0;
3228 }
3229
3230 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3231 {
3232         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3233             *dev_id <= MAX_DEV_ID)
3234                 return 0;
3235
3236         if (warning)
3237                 DMWARN("Message received with invalid device id: %s", arg);
3238
3239         return -EINVAL;
3240 }
3241
3242 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3243 {
3244         dm_thin_id dev_id;
3245         int r;
3246
3247         r = check_arg_count(argc, 2);
3248         if (r)
3249                 return r;
3250
3251         r = read_dev_id(argv[1], &dev_id, 1);
3252         if (r)
3253                 return r;
3254
3255         r = dm_pool_create_thin(pool->pmd, dev_id);
3256         if (r) {
3257                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3258                        argv[1]);
3259                 return r;
3260         }
3261
3262         return 0;
3263 }
3264
3265 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3266 {
3267         dm_thin_id dev_id;
3268         dm_thin_id origin_dev_id;
3269         int r;
3270
3271         r = check_arg_count(argc, 3);
3272         if (r)
3273                 return r;
3274
3275         r = read_dev_id(argv[1], &dev_id, 1);
3276         if (r)
3277                 return r;
3278
3279         r = read_dev_id(argv[2], &origin_dev_id, 1);
3280         if (r)
3281                 return r;
3282
3283         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3284         if (r) {
3285                 DMWARN("Creation of new snapshot %s of device %s failed.",
3286                        argv[1], argv[2]);
3287                 return r;
3288         }
3289
3290         return 0;
3291 }
3292
3293 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3294 {
3295         dm_thin_id dev_id;
3296         int r;
3297
3298         r = check_arg_count(argc, 2);
3299         if (r)
3300                 return r;
3301
3302         r = read_dev_id(argv[1], &dev_id, 1);
3303         if (r)
3304                 return r;
3305
3306         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3307         if (r)
3308                 DMWARN("Deletion of thin device %s failed.", argv[1]);
3309
3310         return r;
3311 }
3312
3313 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3314 {
3315         dm_thin_id old_id, new_id;
3316         int r;
3317
3318         r = check_arg_count(argc, 3);
3319         if (r)
3320                 return r;
3321
3322         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3323                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3324                 return -EINVAL;
3325         }
3326
3327         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3328                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3329                 return -EINVAL;
3330         }
3331
3332         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3333         if (r) {
3334                 DMWARN("Failed to change transaction id from %s to %s.",
3335                        argv[1], argv[2]);
3336                 return r;
3337         }
3338
3339         return 0;
3340 }
3341
3342 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3343 {
3344         int r;
3345
3346         r = check_arg_count(argc, 1);
3347         if (r)
3348                 return r;
3349
3350         (void) commit(pool);
3351
3352         r = dm_pool_reserve_metadata_snap(pool->pmd);
3353         if (r)
3354                 DMWARN("reserve_metadata_snap message failed.");
3355
3356         return r;
3357 }
3358
3359 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3360 {
3361         int r;
3362
3363         r = check_arg_count(argc, 1);
3364         if (r)
3365                 return r;
3366
3367         r = dm_pool_release_metadata_snap(pool->pmd);
3368         if (r)
3369                 DMWARN("release_metadata_snap message failed.");
3370
3371         return r;
3372 }
3373
3374 /*
3375  * Messages supported:
3376  *   create_thin        <dev_id>
3377  *   create_snap        <dev_id> <origin_id>
3378  *   delete             <dev_id>
3379  *   set_transaction_id <current_trans_id> <new_trans_id>
3380  *   reserve_metadata_snap
3381  *   release_metadata_snap
3382  */
3383 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3384 {
3385         int r = -EINVAL;
3386         struct pool_c *pt = ti->private;
3387         struct pool *pool = pt->pool;
3388
3389         if (get_pool_mode(pool) >= PM_READ_ONLY) {
3390                 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3391                       dm_device_name(pool->pool_md));
3392                 return -EINVAL;
3393         }
3394
3395         if (!strcasecmp(argv[0], "create_thin"))
3396                 r = process_create_thin_mesg(argc, argv, pool);
3397
3398         else if (!strcasecmp(argv[0], "create_snap"))
3399                 r = process_create_snap_mesg(argc, argv, pool);
3400
3401         else if (!strcasecmp(argv[0], "delete"))
3402                 r = process_delete_mesg(argc, argv, pool);
3403
3404         else if (!strcasecmp(argv[0], "set_transaction_id"))
3405                 r = process_set_transaction_id_mesg(argc, argv, pool);
3406
3407         else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3408                 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3409
3410         else if (!strcasecmp(argv[0], "release_metadata_snap"))
3411                 r = process_release_metadata_snap_mesg(argc, argv, pool);
3412
3413         else
3414                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3415
3416         if (!r)
3417                 (void) commit(pool);
3418
3419         return r;
3420 }
3421
3422 static void emit_flags(struct pool_features *pf, char *result,
3423                        unsigned sz, unsigned maxlen)
3424 {
3425         unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
3426                 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3427                 pf->error_if_no_space;
3428         DMEMIT("%u ", count);
3429
3430         if (!pf->zero_new_blocks)
3431                 DMEMIT("skip_block_zeroing ");
3432
3433         if (!pf->discard_enabled)
3434                 DMEMIT("ignore_discard ");
3435
3436         if (!pf->discard_passdown)
3437                 DMEMIT("no_discard_passdown ");
3438
3439         if (pf->mode == PM_READ_ONLY)
3440                 DMEMIT("read_only ");
3441
3442         if (pf->error_if_no_space)
3443                 DMEMIT("error_if_no_space ");
3444 }
3445
3446 /*
3447  * Status line is:
3448  *    <transaction id> <used metadata sectors>/<total metadata sectors>
3449  *    <used data sectors>/<total data sectors> <held metadata root>
3450  */
3451 static void pool_status(struct dm_target *ti, status_type_t type,
3452                         unsigned status_flags, char *result, unsigned maxlen)
3453 {
3454         int r;
3455         unsigned sz = 0;
3456         uint64_t transaction_id;
3457         dm_block_t nr_free_blocks_data;
3458         dm_block_t nr_free_blocks_metadata;
3459         dm_block_t nr_blocks_data;
3460         dm_block_t nr_blocks_metadata;
3461         dm_block_t held_root;
3462         char buf[BDEVNAME_SIZE];
3463         char buf2[BDEVNAME_SIZE];
3464         struct pool_c *pt = ti->private;
3465         struct pool *pool = pt->pool;
3466
3467         switch (type) {
3468         case STATUSTYPE_INFO:
3469                 if (get_pool_mode(pool) == PM_FAIL) {
3470                         DMEMIT("Fail");
3471                         break;
3472                 }
3473
3474                 /* Commit to ensure statistics aren't out-of-date */
3475                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
3476                         (void) commit(pool);
3477
3478                 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3479                 if (r) {
3480                         DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3481                               dm_device_name(pool->pool_md), r);
3482                         goto err;
3483                 }
3484
3485                 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3486                 if (r) {
3487                         DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3488                               dm_device_name(pool->pool_md), r);
3489                         goto err;
3490                 }
3491
3492                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
3493                 if (r) {
3494                         DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3495                               dm_device_name(pool->pool_md), r);
3496                         goto err;
3497                 }
3498
3499                 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3500                 if (r) {
3501                         DMERR("%s: dm_pool_get_free_block_count returned %d",
3502                               dm_device_name(pool->pool_md), r);
3503                         goto err;
3504                 }
3505
3506                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
3507                 if (r) {
3508                         DMERR("%s: dm_pool_get_data_dev_size returned %d",
3509                               dm_device_name(pool->pool_md), r);
3510                         goto err;
3511                 }
3512
3513                 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
3514                 if (r) {
3515                         DMERR("%s: dm_pool_get_metadata_snap returned %d",
3516                               dm_device_name(pool->pool_md), r);
3517                         goto err;
3518                 }
3519
3520                 DMEMIT("%llu %llu/%llu %llu/%llu ",
3521                        (unsigned long long)transaction_id,
3522                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3523                        (unsigned long long)nr_blocks_metadata,
3524                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3525                        (unsigned long long)nr_blocks_data);
3526
3527                 if (held_root)
3528                         DMEMIT("%llu ", held_root);
3529                 else
3530                         DMEMIT("- ");
3531
3532                 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3533                         DMEMIT("out_of_data_space ");
3534                 else if (pool->pf.mode == PM_READ_ONLY)
3535                         DMEMIT("ro ");
3536                 else
3537                         DMEMIT("rw ");
3538
3539                 if (!pool->pf.discard_enabled)
3540                         DMEMIT("ignore_discard ");
3541                 else if (pool->pf.discard_passdown)
3542                         DMEMIT("discard_passdown ");
3543                 else
3544                         DMEMIT("no_discard_passdown ");
3545
3546                 if (pool->pf.error_if_no_space)
3547                         DMEMIT("error_if_no_space ");
3548                 else
3549                         DMEMIT("queue_if_no_space ");
3550
3551                 break;
3552
3553         case STATUSTYPE_TABLE:
3554                 DMEMIT("%s %s %lu %llu ",
3555                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3556                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3557                        (unsigned long)pool->sectors_per_block,
3558                        (unsigned long long)pt->low_water_blocks);
3559                 emit_flags(&pt->requested_pf, result, sz, maxlen);
3560                 break;
3561         }
3562         return;
3563
3564 err:
3565         DMEMIT("Error");
3566 }
3567
3568 static int pool_iterate_devices(struct dm_target *ti,
3569                                 iterate_devices_callout_fn fn, void *data)
3570 {
3571         struct pool_c *pt = ti->private;
3572
3573         return fn(ti, pt->data_dev, 0, ti->len, data);
3574 }
3575
3576 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3577                       struct bio_vec *biovec, int max_size)
3578 {
3579         struct pool_c *pt = ti->private;
3580         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3581
3582         if (!q->merge_bvec_fn)
3583                 return max_size;
3584
3585         bvm->bi_bdev = pt->data_dev->bdev;
3586
3587         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3588 }
3589
3590 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
3591 {
3592         struct pool *pool = pt->pool;
3593         struct queue_limits *data_limits;
3594
3595         limits->max_discard_sectors = pool->sectors_per_block;
3596
3597         /*
3598          * discard_granularity is just a hint, and not enforced.
3599          */
3600         if (pt->adjusted_pf.discard_passdown) {
3601                 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
3602                 limits->discard_granularity = max(data_limits->discard_granularity,
3603                                                   pool->sectors_per_block << SECTOR_SHIFT);
3604         } else
3605                 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
3606 }
3607
3608 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3609 {
3610         struct pool_c *pt = ti->private;
3611         struct pool *pool = pt->pool;
3612         sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3613
3614         /*
3615          * If max_sectors is smaller than pool->sectors_per_block adjust it
3616          * to the highest possible power-of-2 factor of pool->sectors_per_block.
3617          * This is especially beneficial when the pool's data device is a RAID
3618          * device that has a full stripe width that matches pool->sectors_per_block
3619          * -- because even though partial RAID stripe-sized IOs will be issued to a
3620          *    single RAID stripe; when aggregated they will end on a full RAID stripe
3621          *    boundary.. which avoids additional partial RAID stripe writes cascading
3622          */
3623         if (limits->max_sectors < pool->sectors_per_block) {
3624                 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3625                         if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3626                                 limits->max_sectors--;
3627                         limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3628                 }
3629         }
3630
3631         /*
3632          * If the system-determined stacked limits are compatible with the
3633          * pool's blocksize (io_opt is a factor) do not override them.
3634          */
3635         if (io_opt_sectors < pool->sectors_per_block ||
3636             !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3637                 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3638                         blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3639                 else
3640                         blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
3641                 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3642         }
3643
3644         /*
3645          * pt->adjusted_pf is a staging area for the actual features to use.
3646          * They get transferred to the live pool in bind_control_target()
3647          * called from pool_preresume().
3648          */
3649         if (!pt->adjusted_pf.discard_enabled) {
3650                 /*
3651                  * Must explicitly disallow stacking discard limits otherwise the
3652                  * block layer will stack them if pool's data device has support.
3653                  * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3654                  * user to see that, so make sure to set all discard limits to 0.
3655                  */
3656                 limits->discard_granularity = 0;
3657                 return;
3658         }
3659
3660         disable_passdown_if_not_supported(pt);
3661
3662         set_discard_limits(pt, limits);
3663 }
3664
3665 static struct target_type pool_target = {
3666         .name = "thin-pool",
3667         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3668                     DM_TARGET_IMMUTABLE,
3669         .version = {1, 14, 0},
3670         .module = THIS_MODULE,
3671         .ctr = pool_ctr,
3672         .dtr = pool_dtr,
3673         .map = pool_map,
3674         .presuspend = pool_presuspend,
3675         .presuspend_undo = pool_presuspend_undo,
3676         .postsuspend = pool_postsuspend,
3677         .preresume = pool_preresume,
3678         .resume = pool_resume,
3679         .message = pool_message,
3680         .status = pool_status,
3681         .merge = pool_merge,
3682         .iterate_devices = pool_iterate_devices,
3683         .io_hints = pool_io_hints,
3684 };
3685
3686 /*----------------------------------------------------------------
3687  * Thin target methods
3688  *--------------------------------------------------------------*/
3689 static void thin_get(struct thin_c *tc)
3690 {
3691         atomic_inc(&tc->refcount);
3692 }
3693
3694 static void thin_put(struct thin_c *tc)
3695 {
3696         if (atomic_dec_and_test(&tc->refcount))
3697                 complete(&tc->can_destroy);
3698 }
3699
3700 static void thin_dtr(struct dm_target *ti)
3701 {
3702         struct thin_c *tc = ti->private;
3703         unsigned long flags;
3704
3705         spin_lock_irqsave(&tc->pool->lock, flags);
3706         list_del_rcu(&tc->list);
3707         spin_unlock_irqrestore(&tc->pool->lock, flags);
3708         synchronize_rcu();
3709
3710         thin_put(tc);
3711         wait_for_completion(&tc->can_destroy);
3712
3713         mutex_lock(&dm_thin_pool_table.mutex);
3714
3715         __pool_dec(tc->pool);
3716         dm_pool_close_thin_device(tc->td);
3717         dm_put_device(ti, tc->pool_dev);
3718         if (tc->origin_dev)
3719                 dm_put_device(ti, tc->origin_dev);
3720         kfree(tc);
3721
3722         mutex_unlock(&dm_thin_pool_table.mutex);
3723 }
3724
3725 /*
3726  * Thin target parameters:
3727  *
3728  * <pool_dev> <dev_id> [origin_dev]
3729  *
3730  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3731  * dev_id: the internal device identifier
3732  * origin_dev: a device external to the pool that should act as the origin
3733  *
3734  * If the pool device has discards disabled, they get disabled for the thin
3735  * device as well.
3736  */
3737 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3738 {
3739         int r;
3740         struct thin_c *tc;
3741         struct dm_dev *pool_dev, *origin_dev;
3742         struct mapped_device *pool_md;
3743         unsigned long flags;
3744
3745         mutex_lock(&dm_thin_pool_table.mutex);
3746
3747         if (argc != 2 && argc != 3) {
3748                 ti->error = "Invalid argument count";
3749                 r = -EINVAL;
3750                 goto out_unlock;
3751         }
3752
3753         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3754         if (!tc) {
3755                 ti->error = "Out of memory";
3756                 r = -ENOMEM;
3757                 goto out_unlock;
3758         }
3759         tc->thin_md = dm_table_get_md(ti->table);
3760         spin_lock_init(&tc->lock);
3761         INIT_LIST_HEAD(&tc->deferred_cells);
3762         bio_list_init(&tc->deferred_bio_list);
3763         bio_list_init(&tc->retry_on_resume_list);
3764         tc->sort_bio_list = RB_ROOT;
3765
3766         if (argc == 3) {
3767                 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3768                 if (r) {
3769                         ti->error = "Error opening origin device";
3770                         goto bad_origin_dev;
3771                 }
3772                 tc->origin_dev = origin_dev;
3773         }
3774
3775         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3776         if (r) {
3777                 ti->error = "Error opening pool device";
3778                 goto bad_pool_dev;
3779         }
3780         tc->pool_dev = pool_dev;
3781
3782         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3783                 ti->error = "Invalid device id";
3784                 r = -EINVAL;
3785                 goto bad_common;
3786         }
3787
3788         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3789         if (!pool_md) {
3790                 ti->error = "Couldn't get pool mapped device";
3791                 r = -EINVAL;
3792                 goto bad_common;
3793         }
3794
3795         tc->pool = __pool_table_lookup(pool_md);
3796         if (!tc->pool) {
3797                 ti->error = "Couldn't find pool object";
3798                 r = -EINVAL;
3799                 goto bad_pool_lookup;
3800         }
3801         __pool_inc(tc->pool);
3802
3803         if (get_pool_mode(tc->pool) == PM_FAIL) {
3804                 ti->error = "Couldn't open thin device, Pool is in fail mode";
3805                 r = -EINVAL;
3806                 goto bad_pool;
3807         }
3808
3809         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3810         if (r) {
3811                 ti->error = "Couldn't open thin internal device";
3812                 goto bad_pool;
3813         }
3814
3815         r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3816         if (r)
3817                 goto bad;
3818
3819         ti->num_flush_bios = 1;
3820         ti->flush_supported = true;
3821         ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
3822
3823         /* In case the pool supports discards, pass them on. */
3824         ti->discard_zeroes_data_unsupported = true;
3825         if (tc->pool->pf.discard_enabled) {
3826                 ti->discards_supported = true;
3827                 ti->num_discard_bios = 1;
3828                 /* Discard bios must be split on a block boundary */
3829                 ti->split_discard_bios = true;
3830         }
3831
3832         mutex_unlock(&dm_thin_pool_table.mutex);
3833
3834         spin_lock_irqsave(&tc->pool->lock, flags);
3835         if (tc->pool->suspended) {
3836                 spin_unlock_irqrestore(&tc->pool->lock, flags);
3837                 mutex_lock(&dm_thin_pool_table.mutex); /* reacquire for __pool_dec */
3838                 ti->error = "Unable to activate thin device while pool is suspended";
3839                 r = -EINVAL;
3840                 goto bad;
3841         }
3842         atomic_set(&tc->refcount, 1);
3843         init_completion(&tc->can_destroy);
3844         list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3845         spin_unlock_irqrestore(&tc->pool->lock, flags);
3846         /*
3847          * This synchronize_rcu() call is needed here otherwise we risk a
3848          * wake_worker() call finding no bios to process (because the newly
3849          * added tc isn't yet visible).  So this reduces latency since we
3850          * aren't then dependent on the periodic commit to wake_worker().
3851          */
3852         synchronize_rcu();
3853
3854         dm_put(pool_md);
3855
3856         return 0;
3857
3858 bad:
3859         dm_pool_close_thin_device(tc->td);
3860 bad_pool:
3861         __pool_dec(tc->pool);
3862 bad_pool_lookup:
3863         dm_put(pool_md);
3864 bad_common:
3865         dm_put_device(ti, tc->pool_dev);
3866 bad_pool_dev:
3867         if (tc->origin_dev)
3868                 dm_put_device(ti, tc->origin_dev);
3869 bad_origin_dev:
3870         kfree(tc);
3871 out_unlock:
3872         mutex_unlock(&dm_thin_pool_table.mutex);
3873
3874         return r;
3875 }
3876
3877 static int thin_map(struct dm_target *ti, struct bio *bio)
3878 {
3879         bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
3880
3881         return thin_bio_map(ti, bio);
3882 }
3883
3884 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
3885 {
3886         unsigned long flags;
3887         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
3888         struct list_head work;
3889         struct dm_thin_new_mapping *m, *tmp;
3890         struct pool *pool = h->tc->pool;
3891
3892         if (h->shared_read_entry) {
3893                 INIT_LIST_HEAD(&work);
3894                 dm_deferred_entry_dec(h->shared_read_entry, &work);
3895
3896                 spin_lock_irqsave(&pool->lock, flags);
3897                 list_for_each_entry_safe(m, tmp, &work, list) {
3898                         list_del(&m->list);
3899                         __complete_mapping_preparation(m);
3900                 }
3901                 spin_unlock_irqrestore(&pool->lock, flags);
3902         }
3903
3904         if (h->all_io_entry) {
3905                 INIT_LIST_HEAD(&work);
3906                 dm_deferred_entry_dec(h->all_io_entry, &work);
3907                 if (!list_empty(&work)) {
3908                         spin_lock_irqsave(&pool->lock, flags);
3909                         list_for_each_entry_safe(m, tmp, &work, list)
3910                                 list_add_tail(&m->list, &pool->prepared_discards);
3911                         spin_unlock_irqrestore(&pool->lock, flags);
3912                         wake_worker(pool);
3913                 }
3914         }
3915
3916         return 0;
3917 }
3918
3919 static void thin_presuspend(struct dm_target *ti)
3920 {
3921         struct thin_c *tc = ti->private;
3922
3923         if (dm_noflush_suspending(ti))
3924                 noflush_work(tc, do_noflush_start);
3925 }
3926
3927 static void thin_postsuspend(struct dm_target *ti)
3928 {
3929         struct thin_c *tc = ti->private;
3930
3931         /*
3932          * The dm_noflush_suspending flag has been cleared by now, so
3933          * unfortunately we must always run this.
3934          */
3935         noflush_work(tc, do_noflush_stop);
3936 }
3937
3938 static int thin_preresume(struct dm_target *ti)
3939 {
3940         struct thin_c *tc = ti->private;
3941
3942         if (tc->origin_dev)
3943                 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
3944
3945         return 0;
3946 }
3947
3948 /*
3949  * <nr mapped sectors> <highest mapped sector>
3950  */
3951 static void thin_status(struct dm_target *ti, status_type_t type,
3952                         unsigned status_flags, char *result, unsigned maxlen)
3953 {
3954         int r;
3955         ssize_t sz = 0;
3956         dm_block_t mapped, highest;
3957         char buf[BDEVNAME_SIZE];
3958         struct thin_c *tc = ti->private;
3959
3960         if (get_pool_mode(tc->pool) == PM_FAIL) {
3961                 DMEMIT("Fail");
3962                 return;
3963         }
3964
3965         if (!tc->td)
3966                 DMEMIT("-");
3967         else {
3968                 switch (type) {
3969                 case STATUSTYPE_INFO:
3970                         r = dm_thin_get_mapped_count(tc->td, &mapped);
3971                         if (r) {
3972                                 DMERR("dm_thin_get_mapped_count returned %d", r);
3973                                 goto err;
3974                         }
3975
3976                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
3977                         if (r < 0) {
3978                                 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3979                                 goto err;
3980                         }
3981
3982                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3983                         if (r)
3984                                 DMEMIT("%llu", ((highest + 1) *
3985                                                 tc->pool->sectors_per_block) - 1);
3986                         else
3987                                 DMEMIT("-");
3988                         break;
3989
3990                 case STATUSTYPE_TABLE:
3991                         DMEMIT("%s %lu",
3992                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3993                                (unsigned long) tc->dev_id);
3994                         if (tc->origin_dev)
3995                                 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
3996                         break;
3997                 }
3998         }
3999
4000         return;
4001
4002 err:
4003         DMEMIT("Error");
4004 }
4005
4006 static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
4007                       struct bio_vec *biovec, int max_size)
4008 {
4009         struct thin_c *tc = ti->private;
4010         struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
4011
4012         if (!q->merge_bvec_fn)
4013                 return max_size;
4014
4015         bvm->bi_bdev = tc->pool_dev->bdev;
4016         bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
4017
4018         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
4019 }
4020
4021 static int thin_iterate_devices(struct dm_target *ti,
4022                                 iterate_devices_callout_fn fn, void *data)
4023 {
4024         sector_t blocks;
4025         struct thin_c *tc = ti->private;
4026         struct pool *pool = tc->pool;
4027
4028         /*
4029          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
4030          * we follow a more convoluted path through to the pool's target.
4031          */
4032         if (!pool->ti)
4033                 return 0;       /* nothing is bound */
4034
4035         blocks = pool->ti->len;
4036         (void) sector_div(blocks, pool->sectors_per_block);
4037         if (blocks)
4038                 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
4039
4040         return 0;
4041 }
4042
4043 static struct target_type thin_target = {
4044         .name = "thin",
4045         .version = {1, 14, 0},
4046         .module = THIS_MODULE,
4047         .ctr = thin_ctr,
4048         .dtr = thin_dtr,
4049         .map = thin_map,
4050         .end_io = thin_endio,
4051         .preresume = thin_preresume,
4052         .presuspend = thin_presuspend,
4053         .postsuspend = thin_postsuspend,
4054         .status = thin_status,
4055         .merge = thin_merge,
4056         .iterate_devices = thin_iterate_devices,
4057 };
4058
4059 /*----------------------------------------------------------------*/
4060
4061 static int __init dm_thin_init(void)
4062 {
4063         int r;
4064
4065         pool_table_init();
4066
4067         r = dm_register_target(&thin_target);
4068         if (r)
4069                 return r;
4070
4071         r = dm_register_target(&pool_target);
4072         if (r)
4073                 goto bad_pool_target;
4074
4075         r = -ENOMEM;
4076
4077         _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4078         if (!_new_mapping_cache)
4079                 goto bad_new_mapping_cache;
4080
4081         return 0;
4082
4083 bad_new_mapping_cache:
4084         dm_unregister_target(&pool_target);
4085 bad_pool_target:
4086         dm_unregister_target(&thin_target);
4087
4088         return r;
4089 }
4090
4091 static void dm_thin_exit(void)
4092 {
4093         dm_unregister_target(&thin_target);
4094         dm_unregister_target(&pool_target);
4095
4096         kmem_cache_destroy(_new_mapping_cache);
4097 }
4098
4099 module_init(dm_thin_init);
4100 module_exit(dm_thin_exit);
4101
4102 module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4103 MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4104
4105 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
4106 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4107 MODULE_LICENSE("GPL");