Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / md / dm-snap.c
1 /*
2  * dm-snapshot.c
3  *
4  * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5  *
6  * This file is released under the GPL.
7  */
8
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22
23 #include "dm.h"
24
25 #include "dm-exception-store.h"
26
27 #define DM_MSG_PREFIX "snapshots"
28
29 static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
30
31 #define dm_target_is_snapshot_merge(ti) \
32         ((ti)->type->name == dm_snapshot_merge_target_name)
33
34 /*
35  * The size of the mempool used to track chunks in use.
36  */
37 #define MIN_IOS 256
38
39 #define DM_TRACKED_CHUNK_HASH_SIZE      16
40 #define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
41                                          (DM_TRACKED_CHUNK_HASH_SIZE - 1))
42
43 struct dm_exception_table {
44         uint32_t hash_mask;
45         unsigned hash_shift;
46         struct list_head *table;
47 };
48
49 struct dm_snapshot {
50         struct rw_semaphore lock;
51
52         struct dm_dev *origin;
53         struct dm_dev *cow;
54
55         struct dm_target *ti;
56
57         /* List of snapshots per Origin */
58         struct list_head list;
59
60         /*
61          * You can't use a snapshot if this is 0 (e.g. if full).
62          * A snapshot-merge target never clears this.
63          */
64         int valid;
65
66         /* Origin writes don't trigger exceptions until this is set */
67         int active;
68
69         atomic_t pending_exceptions_count;
70
71         /* Protected by "lock" */
72         sector_t exception_start_sequence;
73
74         /* Protected by kcopyd single-threaded callback */
75         sector_t exception_complete_sequence;
76
77         /*
78          * A list of pending exceptions that completed out of order.
79          * Protected by kcopyd single-threaded callback.
80          */
81         struct list_head out_of_order_list;
82
83         mempool_t *pending_pool;
84
85         struct dm_exception_table pending;
86         struct dm_exception_table complete;
87
88         /*
89          * pe_lock protects all pending_exception operations and access
90          * as well as the snapshot_bios list.
91          */
92         spinlock_t pe_lock;
93
94         /* Chunks with outstanding reads */
95         spinlock_t tracked_chunk_lock;
96         struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
97
98         /* The on disk metadata handler */
99         struct dm_exception_store *store;
100
101         struct dm_kcopyd_client *kcopyd_client;
102
103         /* Wait for events based on state_bits */
104         unsigned long state_bits;
105
106         /* Range of chunks currently being merged. */
107         chunk_t first_merging_chunk;
108         int num_merging_chunks;
109
110         /*
111          * The merge operation failed if this flag is set.
112          * Failure modes are handled as follows:
113          * - I/O error reading the header
114          *      => don't load the target; abort.
115          * - Header does not have "valid" flag set
116          *      => use the origin; forget about the snapshot.
117          * - I/O error when reading exceptions
118          *      => don't load the target; abort.
119          *         (We can't use the intermediate origin state.)
120          * - I/O error while merging
121          *      => stop merging; set merge_failed; process I/O normally.
122          */
123         int merge_failed;
124
125         /*
126          * Incoming bios that overlap with chunks being merged must wait
127          * for them to be committed.
128          */
129         struct bio_list bios_queued_during_merge;
130 };
131
132 /*
133  * state_bits:
134  *   RUNNING_MERGE  - Merge operation is in progress.
135  *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
136  *                    cleared afterwards.
137  */
138 #define RUNNING_MERGE          0
139 #define SHUTDOWN_MERGE         1
140
141 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
142                 "A percentage of time allocated for copy on write");
143
144 struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
145 {
146         return s->origin;
147 }
148 EXPORT_SYMBOL(dm_snap_origin);
149
150 struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
151 {
152         return s->cow;
153 }
154 EXPORT_SYMBOL(dm_snap_cow);
155
156 static sector_t chunk_to_sector(struct dm_exception_store *store,
157                                 chunk_t chunk)
158 {
159         return chunk << store->chunk_shift;
160 }
161
162 static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
163 {
164         /*
165          * There is only ever one instance of a particular block
166          * device so we can compare pointers safely.
167          */
168         return lhs == rhs;
169 }
170
171 struct dm_snap_pending_exception {
172         struct dm_exception e;
173
174         /*
175          * Origin buffers waiting for this to complete are held
176          * in a bio list
177          */
178         struct bio_list origin_bios;
179         struct bio_list snapshot_bios;
180
181         /* Pointer back to snapshot context */
182         struct dm_snapshot *snap;
183
184         /*
185          * 1 indicates the exception has already been sent to
186          * kcopyd.
187          */
188         int started;
189
190         /* There was copying error. */
191         int copy_error;
192
193         /* A sequence number, it is used for in-order completion. */
194         sector_t exception_sequence;
195
196         struct list_head out_of_order_entry;
197
198         /*
199          * For writing a complete chunk, bypassing the copy.
200          */
201         struct bio *full_bio;
202         bio_end_io_t *full_bio_end_io;
203         void *full_bio_private;
204 };
205
206 /*
207  * Hash table mapping origin volumes to lists of snapshots and
208  * a lock to protect it
209  */
210 static struct kmem_cache *exception_cache;
211 static struct kmem_cache *pending_cache;
212
213 struct dm_snap_tracked_chunk {
214         struct hlist_node node;
215         chunk_t chunk;
216 };
217
218 static void init_tracked_chunk(struct bio *bio)
219 {
220         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
221         INIT_HLIST_NODE(&c->node);
222 }
223
224 static bool is_bio_tracked(struct bio *bio)
225 {
226         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
227         return !hlist_unhashed(&c->node);
228 }
229
230 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
231 {
232         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
233
234         c->chunk = chunk;
235
236         spin_lock_irq(&s->tracked_chunk_lock);
237         hlist_add_head(&c->node,
238                        &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
239         spin_unlock_irq(&s->tracked_chunk_lock);
240 }
241
242 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
243 {
244         struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
245         unsigned long flags;
246
247         spin_lock_irqsave(&s->tracked_chunk_lock, flags);
248         hlist_del(&c->node);
249         spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
250 }
251
252 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
253 {
254         struct dm_snap_tracked_chunk *c;
255         int found = 0;
256
257         spin_lock_irq(&s->tracked_chunk_lock);
258
259         hlist_for_each_entry(c,
260             &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
261                 if (c->chunk == chunk) {
262                         found = 1;
263                         break;
264                 }
265         }
266
267         spin_unlock_irq(&s->tracked_chunk_lock);
268
269         return found;
270 }
271
272 /*
273  * This conflicting I/O is extremely improbable in the caller,
274  * so msleep(1) is sufficient and there is no need for a wait queue.
275  */
276 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
277 {
278         while (__chunk_is_tracked(s, chunk))
279                 msleep(1);
280 }
281
282 /*
283  * One of these per registered origin, held in the snapshot_origins hash
284  */
285 struct origin {
286         /* The origin device */
287         struct block_device *bdev;
288
289         struct list_head hash_list;
290
291         /* List of snapshots for this origin */
292         struct list_head snapshots;
293 };
294
295 /*
296  * This structure is allocated for each origin target
297  */
298 struct dm_origin {
299         struct dm_dev *dev;
300         struct dm_target *ti;
301         unsigned split_boundary;
302         struct list_head hash_list;
303 };
304
305 /*
306  * Size of the hash table for origin volumes. If we make this
307  * the size of the minors list then it should be nearly perfect
308  */
309 #define ORIGIN_HASH_SIZE 256
310 #define ORIGIN_MASK      0xFF
311 static struct list_head *_origins;
312 static struct list_head *_dm_origins;
313 static struct rw_semaphore _origins_lock;
314
315 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
316 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
317 static uint64_t _pending_exceptions_done_count;
318
319 static int init_origin_hash(void)
320 {
321         int i;
322
323         _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
324                            GFP_KERNEL);
325         if (!_origins) {
326                 DMERR("unable to allocate memory for _origins");
327                 return -ENOMEM;
328         }
329         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
330                 INIT_LIST_HEAD(_origins + i);
331
332         _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
333                               GFP_KERNEL);
334         if (!_dm_origins) {
335                 DMERR("unable to allocate memory for _dm_origins");
336                 kfree(_origins);
337                 return -ENOMEM;
338         }
339         for (i = 0; i < ORIGIN_HASH_SIZE; i++)
340                 INIT_LIST_HEAD(_dm_origins + i);
341
342         init_rwsem(&_origins_lock);
343
344         return 0;
345 }
346
347 static void exit_origin_hash(void)
348 {
349         kfree(_origins);
350         kfree(_dm_origins);
351 }
352
353 static unsigned origin_hash(struct block_device *bdev)
354 {
355         return bdev->bd_dev & ORIGIN_MASK;
356 }
357
358 static struct origin *__lookup_origin(struct block_device *origin)
359 {
360         struct list_head *ol;
361         struct origin *o;
362
363         ol = &_origins[origin_hash(origin)];
364         list_for_each_entry (o, ol, hash_list)
365                 if (bdev_equal(o->bdev, origin))
366                         return o;
367
368         return NULL;
369 }
370
371 static void __insert_origin(struct origin *o)
372 {
373         struct list_head *sl = &_origins[origin_hash(o->bdev)];
374         list_add_tail(&o->hash_list, sl);
375 }
376
377 static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
378 {
379         struct list_head *ol;
380         struct dm_origin *o;
381
382         ol = &_dm_origins[origin_hash(origin)];
383         list_for_each_entry (o, ol, hash_list)
384                 if (bdev_equal(o->dev->bdev, origin))
385                         return o;
386
387         return NULL;
388 }
389
390 static void __insert_dm_origin(struct dm_origin *o)
391 {
392         struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
393         list_add_tail(&o->hash_list, sl);
394 }
395
396 static void __remove_dm_origin(struct dm_origin *o)
397 {
398         list_del(&o->hash_list);
399 }
400
401 /*
402  * _origins_lock must be held when calling this function.
403  * Returns number of snapshots registered using the supplied cow device, plus:
404  * snap_src - a snapshot suitable for use as a source of exception handover
405  * snap_dest - a snapshot capable of receiving exception handover.
406  * snap_merge - an existing snapshot-merge target linked to the same origin.
407  *   There can be at most one snapshot-merge target. The parameter is optional.
408  *
409  * Possible return values and states of snap_src and snap_dest.
410  *   0: NULL, NULL  - first new snapshot
411  *   1: snap_src, NULL - normal snapshot
412  *   2: snap_src, snap_dest  - waiting for handover
413  *   2: snap_src, NULL - handed over, waiting for old to be deleted
414  *   1: NULL, snap_dest - source got destroyed without handover
415  */
416 static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
417                                         struct dm_snapshot **snap_src,
418                                         struct dm_snapshot **snap_dest,
419                                         struct dm_snapshot **snap_merge)
420 {
421         struct dm_snapshot *s;
422         struct origin *o;
423         int count = 0;
424         int active;
425
426         o = __lookup_origin(snap->origin->bdev);
427         if (!o)
428                 goto out;
429
430         list_for_each_entry(s, &o->snapshots, list) {
431                 if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
432                         *snap_merge = s;
433                 if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
434                         continue;
435
436                 down_read(&s->lock);
437                 active = s->active;
438                 up_read(&s->lock);
439
440                 if (active) {
441                         if (snap_src)
442                                 *snap_src = s;
443                 } else if (snap_dest)
444                         *snap_dest = s;
445
446                 count++;
447         }
448
449 out:
450         return count;
451 }
452
453 /*
454  * On success, returns 1 if this snapshot is a handover destination,
455  * otherwise returns 0.
456  */
457 static int __validate_exception_handover(struct dm_snapshot *snap)
458 {
459         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
460         struct dm_snapshot *snap_merge = NULL;
461
462         /* Does snapshot need exceptions handed over to it? */
463         if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
464                                           &snap_merge) == 2) ||
465             snap_dest) {
466                 snap->ti->error = "Snapshot cow pairing for exception "
467                                   "table handover failed";
468                 return -EINVAL;
469         }
470
471         /*
472          * If no snap_src was found, snap cannot become a handover
473          * destination.
474          */
475         if (!snap_src)
476                 return 0;
477
478         /*
479          * Non-snapshot-merge handover?
480          */
481         if (!dm_target_is_snapshot_merge(snap->ti))
482                 return 1;
483
484         /*
485          * Do not allow more than one merging snapshot.
486          */
487         if (snap_merge) {
488                 snap->ti->error = "A snapshot is already merging.";
489                 return -EINVAL;
490         }
491
492         if (!snap_src->store->type->prepare_merge ||
493             !snap_src->store->type->commit_merge) {
494                 snap->ti->error = "Snapshot exception store does not "
495                                   "support snapshot-merge.";
496                 return -EINVAL;
497         }
498
499         return 1;
500 }
501
502 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
503 {
504         struct dm_snapshot *l;
505
506         /* Sort the list according to chunk size, largest-first smallest-last */
507         list_for_each_entry(l, &o->snapshots, list)
508                 if (l->store->chunk_size < s->store->chunk_size)
509                         break;
510         list_add_tail(&s->list, &l->list);
511 }
512
513 /*
514  * Make a note of the snapshot and its origin so we can look it
515  * up when the origin has a write on it.
516  *
517  * Also validate snapshot exception store handovers.
518  * On success, returns 1 if this registration is a handover destination,
519  * otherwise returns 0.
520  */
521 static int register_snapshot(struct dm_snapshot *snap)
522 {
523         struct origin *o, *new_o = NULL;
524         struct block_device *bdev = snap->origin->bdev;
525         int r = 0;
526
527         new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
528         if (!new_o)
529                 return -ENOMEM;
530
531         down_write(&_origins_lock);
532
533         r = __validate_exception_handover(snap);
534         if (r < 0) {
535                 kfree(new_o);
536                 goto out;
537         }
538
539         o = __lookup_origin(bdev);
540         if (o)
541                 kfree(new_o);
542         else {
543                 /* New origin */
544                 o = new_o;
545
546                 /* Initialise the struct */
547                 INIT_LIST_HEAD(&o->snapshots);
548                 o->bdev = bdev;
549
550                 __insert_origin(o);
551         }
552
553         __insert_snapshot(o, snap);
554
555 out:
556         up_write(&_origins_lock);
557
558         return r;
559 }
560
561 /*
562  * Move snapshot to correct place in list according to chunk size.
563  */
564 static void reregister_snapshot(struct dm_snapshot *s)
565 {
566         struct block_device *bdev = s->origin->bdev;
567
568         down_write(&_origins_lock);
569
570         list_del(&s->list);
571         __insert_snapshot(__lookup_origin(bdev), s);
572
573         up_write(&_origins_lock);
574 }
575
576 static void unregister_snapshot(struct dm_snapshot *s)
577 {
578         struct origin *o;
579
580         down_write(&_origins_lock);
581         o = __lookup_origin(s->origin->bdev);
582
583         list_del(&s->list);
584         if (o && list_empty(&o->snapshots)) {
585                 list_del(&o->hash_list);
586                 kfree(o);
587         }
588
589         up_write(&_origins_lock);
590 }
591
592 /*
593  * Implementation of the exception hash tables.
594  * The lowest hash_shift bits of the chunk number are ignored, allowing
595  * some consecutive chunks to be grouped together.
596  */
597 static int dm_exception_table_init(struct dm_exception_table *et,
598                                    uint32_t size, unsigned hash_shift)
599 {
600         unsigned int i;
601
602         et->hash_shift = hash_shift;
603         et->hash_mask = size - 1;
604         et->table = dm_vcalloc(size, sizeof(struct list_head));
605         if (!et->table)
606                 return -ENOMEM;
607
608         for (i = 0; i < size; i++)
609                 INIT_LIST_HEAD(et->table + i);
610
611         return 0;
612 }
613
614 static void dm_exception_table_exit(struct dm_exception_table *et,
615                                     struct kmem_cache *mem)
616 {
617         struct list_head *slot;
618         struct dm_exception *ex, *next;
619         int i, size;
620
621         size = et->hash_mask + 1;
622         for (i = 0; i < size; i++) {
623                 slot = et->table + i;
624
625                 list_for_each_entry_safe (ex, next, slot, hash_list)
626                         kmem_cache_free(mem, ex);
627         }
628
629         vfree(et->table);
630 }
631
632 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
633 {
634         return (chunk >> et->hash_shift) & et->hash_mask;
635 }
636
637 static void dm_remove_exception(struct dm_exception *e)
638 {
639         list_del(&e->hash_list);
640 }
641
642 /*
643  * Return the exception data for a sector, or NULL if not
644  * remapped.
645  */
646 static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
647                                                 chunk_t chunk)
648 {
649         struct list_head *slot;
650         struct dm_exception *e;
651
652         slot = &et->table[exception_hash(et, chunk)];
653         list_for_each_entry (e, slot, hash_list)
654                 if (chunk >= e->old_chunk &&
655                     chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
656                         return e;
657
658         return NULL;
659 }
660
661 static struct dm_exception *alloc_completed_exception(gfp_t gfp)
662 {
663         struct dm_exception *e;
664
665         e = kmem_cache_alloc(exception_cache, gfp);
666         if (!e && gfp == GFP_NOIO)
667                 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
668
669         return e;
670 }
671
672 static void free_completed_exception(struct dm_exception *e)
673 {
674         kmem_cache_free(exception_cache, e);
675 }
676
677 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
678 {
679         struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
680                                                              GFP_NOIO);
681
682         atomic_inc(&s->pending_exceptions_count);
683         pe->snap = s;
684
685         return pe;
686 }
687
688 static void free_pending_exception(struct dm_snap_pending_exception *pe)
689 {
690         struct dm_snapshot *s = pe->snap;
691
692         mempool_free(pe, s->pending_pool);
693         smp_mb__before_atomic();
694         atomic_dec(&s->pending_exceptions_count);
695 }
696
697 static void dm_insert_exception(struct dm_exception_table *eh,
698                                 struct dm_exception *new_e)
699 {
700         struct list_head *l;
701         struct dm_exception *e = NULL;
702
703         l = &eh->table[exception_hash(eh, new_e->old_chunk)];
704
705         /* Add immediately if this table doesn't support consecutive chunks */
706         if (!eh->hash_shift)
707                 goto out;
708
709         /* List is ordered by old_chunk */
710         list_for_each_entry_reverse(e, l, hash_list) {
711                 /* Insert after an existing chunk? */
712                 if (new_e->old_chunk == (e->old_chunk +
713                                          dm_consecutive_chunk_count(e) + 1) &&
714                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
715                                          dm_consecutive_chunk_count(e) + 1)) {
716                         dm_consecutive_chunk_count_inc(e);
717                         free_completed_exception(new_e);
718                         return;
719                 }
720
721                 /* Insert before an existing chunk? */
722                 if (new_e->old_chunk == (e->old_chunk - 1) &&
723                     new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
724                         dm_consecutive_chunk_count_inc(e);
725                         e->old_chunk--;
726                         e->new_chunk--;
727                         free_completed_exception(new_e);
728                         return;
729                 }
730
731                 if (new_e->old_chunk > e->old_chunk)
732                         break;
733         }
734
735 out:
736         list_add(&new_e->hash_list, e ? &e->hash_list : l);
737 }
738
739 /*
740  * Callback used by the exception stores to load exceptions when
741  * initialising.
742  */
743 static int dm_add_exception(void *context, chunk_t old, chunk_t new)
744 {
745         struct dm_snapshot *s = context;
746         struct dm_exception *e;
747
748         e = alloc_completed_exception(GFP_KERNEL);
749         if (!e)
750                 return -ENOMEM;
751
752         e->old_chunk = old;
753
754         /* Consecutive_count is implicitly initialised to zero */
755         e->new_chunk = new;
756
757         dm_insert_exception(&s->complete, e);
758
759         return 0;
760 }
761
762 /*
763  * Return a minimum chunk size of all snapshots that have the specified origin.
764  * Return zero if the origin has no snapshots.
765  */
766 static uint32_t __minimum_chunk_size(struct origin *o)
767 {
768         struct dm_snapshot *snap;
769         unsigned chunk_size = 0;
770
771         if (o)
772                 list_for_each_entry(snap, &o->snapshots, list)
773                         chunk_size = min_not_zero(chunk_size,
774                                                   snap->store->chunk_size);
775
776         return (uint32_t) chunk_size;
777 }
778
779 /*
780  * Hard coded magic.
781  */
782 static int calc_max_buckets(void)
783 {
784         /* use a fixed size of 2MB */
785         unsigned long mem = 2 * 1024 * 1024;
786         mem /= sizeof(struct list_head);
787
788         return mem;
789 }
790
791 /*
792  * Allocate room for a suitable hash table.
793  */
794 static int init_hash_tables(struct dm_snapshot *s)
795 {
796         sector_t hash_size, cow_dev_size, max_buckets;
797
798         /*
799          * Calculate based on the size of the original volume or
800          * the COW volume...
801          */
802         cow_dev_size = get_dev_size(s->cow->bdev);
803         max_buckets = calc_max_buckets();
804
805         hash_size = cow_dev_size >> s->store->chunk_shift;
806         hash_size = min(hash_size, max_buckets);
807
808         if (hash_size < 64)
809                 hash_size = 64;
810         hash_size = rounddown_pow_of_two(hash_size);
811         if (dm_exception_table_init(&s->complete, hash_size,
812                                     DM_CHUNK_CONSECUTIVE_BITS))
813                 return -ENOMEM;
814
815         /*
816          * Allocate hash table for in-flight exceptions
817          * Make this smaller than the real hash table
818          */
819         hash_size >>= 3;
820         if (hash_size < 64)
821                 hash_size = 64;
822
823         if (dm_exception_table_init(&s->pending, hash_size, 0)) {
824                 dm_exception_table_exit(&s->complete, exception_cache);
825                 return -ENOMEM;
826         }
827
828         return 0;
829 }
830
831 static void merge_shutdown(struct dm_snapshot *s)
832 {
833         clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
834         smp_mb__after_atomic();
835         wake_up_bit(&s->state_bits, RUNNING_MERGE);
836 }
837
838 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
839 {
840         s->first_merging_chunk = 0;
841         s->num_merging_chunks = 0;
842
843         return bio_list_get(&s->bios_queued_during_merge);
844 }
845
846 /*
847  * Remove one chunk from the index of completed exceptions.
848  */
849 static int __remove_single_exception_chunk(struct dm_snapshot *s,
850                                            chunk_t old_chunk)
851 {
852         struct dm_exception *e;
853
854         e = dm_lookup_exception(&s->complete, old_chunk);
855         if (!e) {
856                 DMERR("Corruption detected: exception for block %llu is "
857                       "on disk but not in memory",
858                       (unsigned long long)old_chunk);
859                 return -EINVAL;
860         }
861
862         /*
863          * If this is the only chunk using this exception, remove exception.
864          */
865         if (!dm_consecutive_chunk_count(e)) {
866                 dm_remove_exception(e);
867                 free_completed_exception(e);
868                 return 0;
869         }
870
871         /*
872          * The chunk may be either at the beginning or the end of a
873          * group of consecutive chunks - never in the middle.  We are
874          * removing chunks in the opposite order to that in which they
875          * were added, so this should always be true.
876          * Decrement the consecutive chunk counter and adjust the
877          * starting point if necessary.
878          */
879         if (old_chunk == e->old_chunk) {
880                 e->old_chunk++;
881                 e->new_chunk++;
882         } else if (old_chunk != e->old_chunk +
883                    dm_consecutive_chunk_count(e)) {
884                 DMERR("Attempt to merge block %llu from the "
885                       "middle of a chunk range [%llu - %llu]",
886                       (unsigned long long)old_chunk,
887                       (unsigned long long)e->old_chunk,
888                       (unsigned long long)
889                       e->old_chunk + dm_consecutive_chunk_count(e));
890                 return -EINVAL;
891         }
892
893         dm_consecutive_chunk_count_dec(e);
894
895         return 0;
896 }
897
898 static void flush_bios(struct bio *bio);
899
900 static int remove_single_exception_chunk(struct dm_snapshot *s)
901 {
902         struct bio *b = NULL;
903         int r;
904         chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
905
906         down_write(&s->lock);
907
908         /*
909          * Process chunks (and associated exceptions) in reverse order
910          * so that dm_consecutive_chunk_count_dec() accounting works.
911          */
912         do {
913                 r = __remove_single_exception_chunk(s, old_chunk);
914                 if (r)
915                         goto out;
916         } while (old_chunk-- > s->first_merging_chunk);
917
918         b = __release_queued_bios_after_merge(s);
919
920 out:
921         up_write(&s->lock);
922         if (b)
923                 flush_bios(b);
924
925         return r;
926 }
927
928 static int origin_write_extent(struct dm_snapshot *merging_snap,
929                                sector_t sector, unsigned chunk_size);
930
931 static void merge_callback(int read_err, unsigned long write_err,
932                            void *context);
933
934 static uint64_t read_pending_exceptions_done_count(void)
935 {
936         uint64_t pending_exceptions_done;
937
938         spin_lock(&_pending_exceptions_done_spinlock);
939         pending_exceptions_done = _pending_exceptions_done_count;
940         spin_unlock(&_pending_exceptions_done_spinlock);
941
942         return pending_exceptions_done;
943 }
944
945 static void increment_pending_exceptions_done_count(void)
946 {
947         spin_lock(&_pending_exceptions_done_spinlock);
948         _pending_exceptions_done_count++;
949         spin_unlock(&_pending_exceptions_done_spinlock);
950
951         wake_up_all(&_pending_exceptions_done);
952 }
953
954 static void snapshot_merge_next_chunks(struct dm_snapshot *s)
955 {
956         int i, linear_chunks;
957         chunk_t old_chunk, new_chunk;
958         struct dm_io_region src, dest;
959         sector_t io_size;
960         uint64_t previous_count;
961
962         BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
963         if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
964                 goto shut;
965
966         /*
967          * valid flag never changes during merge, so no lock required.
968          */
969         if (!s->valid) {
970                 DMERR("Snapshot is invalid: can't merge");
971                 goto shut;
972         }
973
974         linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
975                                                       &new_chunk);
976         if (linear_chunks <= 0) {
977                 if (linear_chunks < 0) {
978                         DMERR("Read error in exception store: "
979                               "shutting down merge");
980                         down_write(&s->lock);
981                         s->merge_failed = 1;
982                         up_write(&s->lock);
983                 }
984                 goto shut;
985         }
986
987         /* Adjust old_chunk and new_chunk to reflect start of linear region */
988         old_chunk = old_chunk + 1 - linear_chunks;
989         new_chunk = new_chunk + 1 - linear_chunks;
990
991         /*
992          * Use one (potentially large) I/O to copy all 'linear_chunks'
993          * from the exception store to the origin
994          */
995         io_size = linear_chunks * s->store->chunk_size;
996
997         dest.bdev = s->origin->bdev;
998         dest.sector = chunk_to_sector(s->store, old_chunk);
999         dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
1000
1001         src.bdev = s->cow->bdev;
1002         src.sector = chunk_to_sector(s->store, new_chunk);
1003         src.count = dest.count;
1004
1005         /*
1006          * Reallocate any exceptions needed in other snapshots then
1007          * wait for the pending exceptions to complete.
1008          * Each time any pending exception (globally on the system)
1009          * completes we are woken and repeat the process to find out
1010          * if we can proceed.  While this may not seem a particularly
1011          * efficient algorithm, it is not expected to have any
1012          * significant impact on performance.
1013          */
1014         previous_count = read_pending_exceptions_done_count();
1015         while (origin_write_extent(s, dest.sector, io_size)) {
1016                 wait_event(_pending_exceptions_done,
1017                            (read_pending_exceptions_done_count() !=
1018                             previous_count));
1019                 /* Retry after the wait, until all exceptions are done. */
1020                 previous_count = read_pending_exceptions_done_count();
1021         }
1022
1023         down_write(&s->lock);
1024         s->first_merging_chunk = old_chunk;
1025         s->num_merging_chunks = linear_chunks;
1026         up_write(&s->lock);
1027
1028         /* Wait until writes to all 'linear_chunks' drain */
1029         for (i = 0; i < linear_chunks; i++)
1030                 __check_for_conflicting_io(s, old_chunk + i);
1031
1032         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
1033         return;
1034
1035 shut:
1036         merge_shutdown(s);
1037 }
1038
1039 static void error_bios(struct bio *bio);
1040
1041 static void merge_callback(int read_err, unsigned long write_err, void *context)
1042 {
1043         struct dm_snapshot *s = context;
1044         struct bio *b = NULL;
1045
1046         if (read_err || write_err) {
1047                 if (read_err)
1048                         DMERR("Read error: shutting down merge.");
1049                 else
1050                         DMERR("Write error: shutting down merge.");
1051                 goto shut;
1052         }
1053
1054         if (s->store->type->commit_merge(s->store,
1055                                          s->num_merging_chunks) < 0) {
1056                 DMERR("Write error in exception store: shutting down merge");
1057                 goto shut;
1058         }
1059
1060         if (remove_single_exception_chunk(s) < 0)
1061                 goto shut;
1062
1063         snapshot_merge_next_chunks(s);
1064
1065         return;
1066
1067 shut:
1068         down_write(&s->lock);
1069         s->merge_failed = 1;
1070         b = __release_queued_bios_after_merge(s);
1071         up_write(&s->lock);
1072         error_bios(b);
1073
1074         merge_shutdown(s);
1075 }
1076
1077 static void start_merge(struct dm_snapshot *s)
1078 {
1079         if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1080                 snapshot_merge_next_chunks(s);
1081 }
1082
1083 /*
1084  * Stop the merging process and wait until it finishes.
1085  */
1086 static void stop_merge(struct dm_snapshot *s)
1087 {
1088         set_bit(SHUTDOWN_MERGE, &s->state_bits);
1089         wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
1090         clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1091 }
1092
1093 /*
1094  * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1095  */
1096 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1097 {
1098         struct dm_snapshot *s;
1099         int i;
1100         int r = -EINVAL;
1101         char *origin_path, *cow_path;
1102         unsigned args_used, num_flush_bios = 1;
1103         fmode_t origin_mode = FMODE_READ;
1104
1105         if (argc != 4) {
1106                 ti->error = "requires exactly 4 arguments";
1107                 r = -EINVAL;
1108                 goto bad;
1109         }
1110
1111         if (dm_target_is_snapshot_merge(ti)) {
1112                 num_flush_bios = 2;
1113                 origin_mode = FMODE_WRITE;
1114         }
1115
1116         s = kmalloc(sizeof(*s), GFP_KERNEL);
1117         if (!s) {
1118                 ti->error = "Cannot allocate private snapshot structure";
1119                 r = -ENOMEM;
1120                 goto bad;
1121         }
1122
1123         origin_path = argv[0];
1124         argv++;
1125         argc--;
1126
1127         r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1128         if (r) {
1129                 ti->error = "Cannot get origin device";
1130                 goto bad_origin;
1131         }
1132
1133         cow_path = argv[0];
1134         argv++;
1135         argc--;
1136
1137         r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1138         if (r) {
1139                 ti->error = "Cannot get COW device";
1140                 goto bad_cow;
1141         }
1142
1143         r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1144         if (r) {
1145                 ti->error = "Couldn't create exception store";
1146                 r = -EINVAL;
1147                 goto bad_store;
1148         }
1149
1150         argv += args_used;
1151         argc -= args_used;
1152
1153         s->ti = ti;
1154         s->valid = 1;
1155         s->active = 0;
1156         atomic_set(&s->pending_exceptions_count, 0);
1157         s->exception_start_sequence = 0;
1158         s->exception_complete_sequence = 0;
1159         INIT_LIST_HEAD(&s->out_of_order_list);
1160         init_rwsem(&s->lock);
1161         INIT_LIST_HEAD(&s->list);
1162         spin_lock_init(&s->pe_lock);
1163         s->state_bits = 0;
1164         s->merge_failed = 0;
1165         s->first_merging_chunk = 0;
1166         s->num_merging_chunks = 0;
1167         bio_list_init(&s->bios_queued_during_merge);
1168
1169         /* Allocate hash table for COW data */
1170         if (init_hash_tables(s)) {
1171                 ti->error = "Unable to allocate hash table space";
1172                 r = -ENOMEM;
1173                 goto bad_hash_tables;
1174         }
1175
1176         s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1177         if (IS_ERR(s->kcopyd_client)) {
1178                 r = PTR_ERR(s->kcopyd_client);
1179                 ti->error = "Could not create kcopyd client";
1180                 goto bad_kcopyd;
1181         }
1182
1183         s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1184         if (!s->pending_pool) {
1185                 ti->error = "Could not allocate mempool for pending exceptions";
1186                 r = -ENOMEM;
1187                 goto bad_pending_pool;
1188         }
1189
1190         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1191                 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1192
1193         spin_lock_init(&s->tracked_chunk_lock);
1194
1195         ti->private = s;
1196         ti->num_flush_bios = num_flush_bios;
1197         ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1198
1199         /* Add snapshot to the list of snapshots for this origin */
1200         /* Exceptions aren't triggered till snapshot_resume() is called */
1201         r = register_snapshot(s);
1202         if (r == -ENOMEM) {
1203                 ti->error = "Snapshot origin struct allocation failed";
1204                 goto bad_load_and_register;
1205         } else if (r < 0) {
1206                 /* invalid handover, register_snapshot has set ti->error */
1207                 goto bad_load_and_register;
1208         }
1209
1210         /*
1211          * Metadata must only be loaded into one table at once, so skip this
1212          * if metadata will be handed over during resume.
1213          * Chunk size will be set during the handover - set it to zero to
1214          * ensure it's ignored.
1215          */
1216         if (r > 0) {
1217                 s->store->chunk_size = 0;
1218                 return 0;
1219         }
1220
1221         r = s->store->type->read_metadata(s->store, dm_add_exception,
1222                                           (void *)s);
1223         if (r < 0) {
1224                 ti->error = "Failed to read snapshot metadata";
1225                 goto bad_read_metadata;
1226         } else if (r > 0) {
1227                 s->valid = 0;
1228                 DMWARN("Snapshot is marked invalid.");
1229         }
1230
1231         if (!s->store->chunk_size) {
1232                 ti->error = "Chunk size not set";
1233                 goto bad_read_metadata;
1234         }
1235
1236         r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1237         if (r)
1238                 goto bad_read_metadata;
1239
1240         return 0;
1241
1242 bad_read_metadata:
1243         unregister_snapshot(s);
1244
1245 bad_load_and_register:
1246         mempool_destroy(s->pending_pool);
1247
1248 bad_pending_pool:
1249         dm_kcopyd_client_destroy(s->kcopyd_client);
1250
1251 bad_kcopyd:
1252         dm_exception_table_exit(&s->pending, pending_cache);
1253         dm_exception_table_exit(&s->complete, exception_cache);
1254
1255 bad_hash_tables:
1256         dm_exception_store_destroy(s->store);
1257
1258 bad_store:
1259         dm_put_device(ti, s->cow);
1260
1261 bad_cow:
1262         dm_put_device(ti, s->origin);
1263
1264 bad_origin:
1265         kfree(s);
1266
1267 bad:
1268         return r;
1269 }
1270
1271 static void __free_exceptions(struct dm_snapshot *s)
1272 {
1273         dm_kcopyd_client_destroy(s->kcopyd_client);
1274         s->kcopyd_client = NULL;
1275
1276         dm_exception_table_exit(&s->pending, pending_cache);
1277         dm_exception_table_exit(&s->complete, exception_cache);
1278 }
1279
1280 static void __handover_exceptions(struct dm_snapshot *snap_src,
1281                                   struct dm_snapshot *snap_dest)
1282 {
1283         union {
1284                 struct dm_exception_table table_swap;
1285                 struct dm_exception_store *store_swap;
1286         } u;
1287
1288         /*
1289          * Swap all snapshot context information between the two instances.
1290          */
1291         u.table_swap = snap_dest->complete;
1292         snap_dest->complete = snap_src->complete;
1293         snap_src->complete = u.table_swap;
1294
1295         u.store_swap = snap_dest->store;
1296         snap_dest->store = snap_src->store;
1297         snap_src->store = u.store_swap;
1298
1299         snap_dest->store->snap = snap_dest;
1300         snap_src->store->snap = snap_src;
1301
1302         snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1303         snap_dest->valid = snap_src->valid;
1304
1305         /*
1306          * Set source invalid to ensure it receives no further I/O.
1307          */
1308         snap_src->valid = 0;
1309 }
1310
1311 static void snapshot_dtr(struct dm_target *ti)
1312 {
1313 #ifdef CONFIG_DM_DEBUG
1314         int i;
1315 #endif
1316         struct dm_snapshot *s = ti->private;
1317         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1318
1319         down_read(&_origins_lock);
1320         /* Check whether exception handover must be cancelled */
1321         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1322         if (snap_src && snap_dest && (s == snap_src)) {
1323                 down_write(&snap_dest->lock);
1324                 snap_dest->valid = 0;
1325                 up_write(&snap_dest->lock);
1326                 DMERR("Cancelling snapshot handover.");
1327         }
1328         up_read(&_origins_lock);
1329
1330         if (dm_target_is_snapshot_merge(ti))
1331                 stop_merge(s);
1332
1333         /* Prevent further origin writes from using this snapshot. */
1334         /* After this returns there can be no new kcopyd jobs. */
1335         unregister_snapshot(s);
1336
1337         while (atomic_read(&s->pending_exceptions_count))
1338                 msleep(1);
1339         /*
1340          * Ensure instructions in mempool_destroy aren't reordered
1341          * before atomic_read.
1342          */
1343         smp_mb();
1344
1345 #ifdef CONFIG_DM_DEBUG
1346         for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1347                 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1348 #endif
1349
1350         __free_exceptions(s);
1351
1352         mempool_destroy(s->pending_pool);
1353
1354         dm_exception_store_destroy(s->store);
1355
1356         dm_put_device(ti, s->cow);
1357
1358         dm_put_device(ti, s->origin);
1359
1360         kfree(s);
1361 }
1362
1363 /*
1364  * Flush a list of buffers.
1365  */
1366 static void flush_bios(struct bio *bio)
1367 {
1368         struct bio *n;
1369
1370         while (bio) {
1371                 n = bio->bi_next;
1372                 bio->bi_next = NULL;
1373                 generic_make_request(bio);
1374                 bio = n;
1375         }
1376 }
1377
1378 static int do_origin(struct dm_dev *origin, struct bio *bio);
1379
1380 /*
1381  * Flush a list of buffers.
1382  */
1383 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1384 {
1385         struct bio *n;
1386         int r;
1387
1388         while (bio) {
1389                 n = bio->bi_next;
1390                 bio->bi_next = NULL;
1391                 r = do_origin(s->origin, bio);
1392                 if (r == DM_MAPIO_REMAPPED)
1393                         generic_make_request(bio);
1394                 bio = n;
1395         }
1396 }
1397
1398 /*
1399  * Error a list of buffers.
1400  */
1401 static void error_bios(struct bio *bio)
1402 {
1403         struct bio *n;
1404
1405         while (bio) {
1406                 n = bio->bi_next;
1407                 bio->bi_next = NULL;
1408                 bio_io_error(bio);
1409                 bio = n;
1410         }
1411 }
1412
1413 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1414 {
1415         if (!s->valid)
1416                 return;
1417
1418         if (err == -EIO)
1419                 DMERR("Invalidating snapshot: Error reading/writing.");
1420         else if (err == -ENOMEM)
1421                 DMERR("Invalidating snapshot: Unable to allocate exception.");
1422
1423         if (s->store->type->drop_snapshot)
1424                 s->store->type->drop_snapshot(s->store);
1425
1426         s->valid = 0;
1427
1428         dm_table_event(s->ti->table);
1429 }
1430
1431 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1432 {
1433         struct dm_exception *e;
1434         struct dm_snapshot *s = pe->snap;
1435         struct bio *origin_bios = NULL;
1436         struct bio *snapshot_bios = NULL;
1437         struct bio *full_bio = NULL;
1438         int error = 0;
1439
1440         if (!success) {
1441                 /* Read/write error - snapshot is unusable */
1442                 down_write(&s->lock);
1443                 __invalidate_snapshot(s, -EIO);
1444                 error = 1;
1445                 goto out;
1446         }
1447
1448         e = alloc_completed_exception(GFP_NOIO);
1449         if (!e) {
1450                 down_write(&s->lock);
1451                 __invalidate_snapshot(s, -ENOMEM);
1452                 error = 1;
1453                 goto out;
1454         }
1455         *e = pe->e;
1456
1457         down_write(&s->lock);
1458         if (!s->valid) {
1459                 free_completed_exception(e);
1460                 error = 1;
1461                 goto out;
1462         }
1463
1464         /* Check for conflicting reads */
1465         __check_for_conflicting_io(s, pe->e.old_chunk);
1466
1467         /*
1468          * Add a proper exception, and remove the
1469          * in-flight exception from the list.
1470          */
1471         dm_insert_exception(&s->complete, e);
1472
1473 out:
1474         dm_remove_exception(&pe->e);
1475         snapshot_bios = bio_list_get(&pe->snapshot_bios);
1476         origin_bios = bio_list_get(&pe->origin_bios);
1477         full_bio = pe->full_bio;
1478         if (full_bio) {
1479                 full_bio->bi_end_io = pe->full_bio_end_io;
1480                 full_bio->bi_private = pe->full_bio_private;
1481                 atomic_inc(&full_bio->bi_remaining);
1482         }
1483         increment_pending_exceptions_done_count();
1484
1485         up_write(&s->lock);
1486
1487         /* Submit any pending write bios */
1488         if (error) {
1489                 if (full_bio)
1490                         bio_io_error(full_bio);
1491                 error_bios(snapshot_bios);
1492         } else {
1493                 if (full_bio)
1494                         bio_endio(full_bio, 0);
1495                 flush_bios(snapshot_bios);
1496         }
1497
1498         retry_origin_bios(s, origin_bios);
1499
1500         free_pending_exception(pe);
1501 }
1502
1503 static void commit_callback(void *context, int success)
1504 {
1505         struct dm_snap_pending_exception *pe = context;
1506
1507         pending_complete(pe, success);
1508 }
1509
1510 static void complete_exception(struct dm_snap_pending_exception *pe)
1511 {
1512         struct dm_snapshot *s = pe->snap;
1513
1514         if (unlikely(pe->copy_error))
1515                 pending_complete(pe, 0);
1516
1517         else
1518                 /* Update the metadata if we are persistent */
1519                 s->store->type->commit_exception(s->store, &pe->e,
1520                                                  commit_callback, pe);
1521 }
1522
1523 /*
1524  * Called when the copy I/O has finished.  kcopyd actually runs
1525  * this code so don't block.
1526  */
1527 static void copy_callback(int read_err, unsigned long write_err, void *context)
1528 {
1529         struct dm_snap_pending_exception *pe = context;
1530         struct dm_snapshot *s = pe->snap;
1531
1532         pe->copy_error = read_err || write_err;
1533
1534         if (pe->exception_sequence == s->exception_complete_sequence) {
1535                 s->exception_complete_sequence++;
1536                 complete_exception(pe);
1537
1538                 while (!list_empty(&s->out_of_order_list)) {
1539                         pe = list_entry(s->out_of_order_list.next,
1540                                         struct dm_snap_pending_exception, out_of_order_entry);
1541                         if (pe->exception_sequence != s->exception_complete_sequence)
1542                                 break;
1543                         s->exception_complete_sequence++;
1544                         list_del(&pe->out_of_order_entry);
1545                         complete_exception(pe);
1546                 }
1547         } else {
1548                 struct list_head *lh;
1549                 struct dm_snap_pending_exception *pe2;
1550
1551                 list_for_each_prev(lh, &s->out_of_order_list) {
1552                         pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
1553                         if (pe2->exception_sequence < pe->exception_sequence)
1554                                 break;
1555                 }
1556                 list_add(&pe->out_of_order_entry, lh);
1557         }
1558 }
1559
1560 /*
1561  * Dispatches the copy operation to kcopyd.
1562  */
1563 static void start_copy(struct dm_snap_pending_exception *pe)
1564 {
1565         struct dm_snapshot *s = pe->snap;
1566         struct dm_io_region src, dest;
1567         struct block_device *bdev = s->origin->bdev;
1568         sector_t dev_size;
1569
1570         dev_size = get_dev_size(bdev);
1571
1572         src.bdev = bdev;
1573         src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1574         src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1575
1576         dest.bdev = s->cow->bdev;
1577         dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1578         dest.count = src.count;
1579
1580         /* Hand over to kcopyd */
1581         dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1582 }
1583
1584 static void full_bio_end_io(struct bio *bio, int error)
1585 {
1586         void *callback_data = bio->bi_private;
1587
1588         dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1589 }
1590
1591 static void start_full_bio(struct dm_snap_pending_exception *pe,
1592                            struct bio *bio)
1593 {
1594         struct dm_snapshot *s = pe->snap;
1595         void *callback_data;
1596
1597         pe->full_bio = bio;
1598         pe->full_bio_end_io = bio->bi_end_io;
1599         pe->full_bio_private = bio->bi_private;
1600
1601         callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1602                                                    copy_callback, pe);
1603
1604         bio->bi_end_io = full_bio_end_io;
1605         bio->bi_private = callback_data;
1606
1607         generic_make_request(bio);
1608 }
1609
1610 static struct dm_snap_pending_exception *
1611 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1612 {
1613         struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1614
1615         if (!e)
1616                 return NULL;
1617
1618         return container_of(e, struct dm_snap_pending_exception, e);
1619 }
1620
1621 /*
1622  * Looks to see if this snapshot already has a pending exception
1623  * for this chunk, otherwise it allocates a new one and inserts
1624  * it into the pending table.
1625  *
1626  * NOTE: a write lock must be held on snap->lock before calling
1627  * this.
1628  */
1629 static struct dm_snap_pending_exception *
1630 __find_pending_exception(struct dm_snapshot *s,
1631                          struct dm_snap_pending_exception *pe, chunk_t chunk)
1632 {
1633         struct dm_snap_pending_exception *pe2;
1634
1635         pe2 = __lookup_pending_exception(s, chunk);
1636         if (pe2) {
1637                 free_pending_exception(pe);
1638                 return pe2;
1639         }
1640
1641         pe->e.old_chunk = chunk;
1642         bio_list_init(&pe->origin_bios);
1643         bio_list_init(&pe->snapshot_bios);
1644         pe->started = 0;
1645         pe->full_bio = NULL;
1646
1647         if (s->store->type->prepare_exception(s->store, &pe->e)) {
1648                 free_pending_exception(pe);
1649                 return NULL;
1650         }
1651
1652         pe->exception_sequence = s->exception_start_sequence++;
1653
1654         dm_insert_exception(&s->pending, &pe->e);
1655
1656         return pe;
1657 }
1658
1659 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1660                             struct bio *bio, chunk_t chunk)
1661 {
1662         bio->bi_bdev = s->cow->bdev;
1663         bio->bi_iter.bi_sector =
1664                 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1665                                 (chunk - e->old_chunk)) +
1666                 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1667 }
1668
1669 static int snapshot_map(struct dm_target *ti, struct bio *bio)
1670 {
1671         struct dm_exception *e;
1672         struct dm_snapshot *s = ti->private;
1673         int r = DM_MAPIO_REMAPPED;
1674         chunk_t chunk;
1675         struct dm_snap_pending_exception *pe = NULL;
1676
1677         init_tracked_chunk(bio);
1678
1679         if (bio->bi_rw & REQ_FLUSH) {
1680                 bio->bi_bdev = s->cow->bdev;
1681                 return DM_MAPIO_REMAPPED;
1682         }
1683
1684         chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1685
1686         /* Full snapshots are not usable */
1687         /* To get here the table must be live so s->active is always set. */
1688         if (!s->valid)
1689                 return -EIO;
1690
1691         /* FIXME: should only take write lock if we need
1692          * to copy an exception */
1693         down_write(&s->lock);
1694
1695         if (!s->valid) {
1696                 r = -EIO;
1697                 goto out_unlock;
1698         }
1699
1700         /* If the block is already remapped - use that, else remap it */
1701         e = dm_lookup_exception(&s->complete, chunk);
1702         if (e) {
1703                 remap_exception(s, e, bio, chunk);
1704                 goto out_unlock;
1705         }
1706
1707         /*
1708          * Write to snapshot - higher level takes care of RW/RO
1709          * flags so we should only get this if we are
1710          * writeable.
1711          */
1712         if (bio_rw(bio) == WRITE) {
1713                 pe = __lookup_pending_exception(s, chunk);
1714                 if (!pe) {
1715                         up_write(&s->lock);
1716                         pe = alloc_pending_exception(s);
1717                         down_write(&s->lock);
1718
1719                         if (!s->valid) {
1720                                 free_pending_exception(pe);
1721                                 r = -EIO;
1722                                 goto out_unlock;
1723                         }
1724
1725                         e = dm_lookup_exception(&s->complete, chunk);
1726                         if (e) {
1727                                 free_pending_exception(pe);
1728                                 remap_exception(s, e, bio, chunk);
1729                                 goto out_unlock;
1730                         }
1731
1732                         pe = __find_pending_exception(s, pe, chunk);
1733                         if (!pe) {
1734                                 __invalidate_snapshot(s, -ENOMEM);
1735                                 r = -EIO;
1736                                 goto out_unlock;
1737                         }
1738                 }
1739
1740                 remap_exception(s, &pe->e, bio, chunk);
1741
1742                 r = DM_MAPIO_SUBMITTED;
1743
1744                 if (!pe->started &&
1745                     bio->bi_iter.bi_size ==
1746                     (s->store->chunk_size << SECTOR_SHIFT)) {
1747                         pe->started = 1;
1748                         up_write(&s->lock);
1749                         start_full_bio(pe, bio);
1750                         goto out;
1751                 }
1752
1753                 bio_list_add(&pe->snapshot_bios, bio);
1754
1755                 if (!pe->started) {
1756                         /* this is protected by snap->lock */
1757                         pe->started = 1;
1758                         up_write(&s->lock);
1759                         start_copy(pe);
1760                         goto out;
1761                 }
1762         } else {
1763                 bio->bi_bdev = s->origin->bdev;
1764                 track_chunk(s, bio, chunk);
1765         }
1766
1767 out_unlock:
1768         up_write(&s->lock);
1769 out:
1770         return r;
1771 }
1772
1773 /*
1774  * A snapshot-merge target behaves like a combination of a snapshot
1775  * target and a snapshot-origin target.  It only generates new
1776  * exceptions in other snapshots and not in the one that is being
1777  * merged.
1778  *
1779  * For each chunk, if there is an existing exception, it is used to
1780  * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1781  * which in turn might generate exceptions in other snapshots.
1782  * If merging is currently taking place on the chunk in question, the
1783  * I/O is deferred by adding it to s->bios_queued_during_merge.
1784  */
1785 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1786 {
1787         struct dm_exception *e;
1788         struct dm_snapshot *s = ti->private;
1789         int r = DM_MAPIO_REMAPPED;
1790         chunk_t chunk;
1791
1792         init_tracked_chunk(bio);
1793
1794         if (bio->bi_rw & REQ_FLUSH) {
1795                 if (!dm_bio_get_target_bio_nr(bio))
1796                         bio->bi_bdev = s->origin->bdev;
1797                 else
1798                         bio->bi_bdev = s->cow->bdev;
1799                 return DM_MAPIO_REMAPPED;
1800         }
1801
1802         chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1803
1804         down_write(&s->lock);
1805
1806         /* Full merging snapshots are redirected to the origin */
1807         if (!s->valid)
1808                 goto redirect_to_origin;
1809
1810         /* If the block is already remapped - use that */
1811         e = dm_lookup_exception(&s->complete, chunk);
1812         if (e) {
1813                 /* Queue writes overlapping with chunks being merged */
1814                 if (bio_rw(bio) == WRITE &&
1815                     chunk >= s->first_merging_chunk &&
1816                     chunk < (s->first_merging_chunk +
1817                              s->num_merging_chunks)) {
1818                         bio->bi_bdev = s->origin->bdev;
1819                         bio_list_add(&s->bios_queued_during_merge, bio);
1820                         r = DM_MAPIO_SUBMITTED;
1821                         goto out_unlock;
1822                 }
1823
1824                 remap_exception(s, e, bio, chunk);
1825
1826                 if (bio_rw(bio) == WRITE)
1827                         track_chunk(s, bio, chunk);
1828                 goto out_unlock;
1829         }
1830
1831 redirect_to_origin:
1832         bio->bi_bdev = s->origin->bdev;
1833
1834         if (bio_rw(bio) == WRITE) {
1835                 up_write(&s->lock);
1836                 return do_origin(s->origin, bio);
1837         }
1838
1839 out_unlock:
1840         up_write(&s->lock);
1841
1842         return r;
1843 }
1844
1845 static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1846 {
1847         struct dm_snapshot *s = ti->private;
1848
1849         if (is_bio_tracked(bio))
1850                 stop_tracking_chunk(s, bio);
1851
1852         return 0;
1853 }
1854
1855 static void snapshot_merge_presuspend(struct dm_target *ti)
1856 {
1857         struct dm_snapshot *s = ti->private;
1858
1859         stop_merge(s);
1860 }
1861
1862 static int snapshot_preresume(struct dm_target *ti)
1863 {
1864         int r = 0;
1865         struct dm_snapshot *s = ti->private;
1866         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1867
1868         down_read(&_origins_lock);
1869         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1870         if (snap_src && snap_dest) {
1871                 down_read(&snap_src->lock);
1872                 if (s == snap_src) {
1873                         DMERR("Unable to resume snapshot source until "
1874                               "handover completes.");
1875                         r = -EINVAL;
1876                 } else if (!dm_suspended(snap_src->ti)) {
1877                         DMERR("Unable to perform snapshot handover until "
1878                               "source is suspended.");
1879                         r = -EINVAL;
1880                 }
1881                 up_read(&snap_src->lock);
1882         }
1883         up_read(&_origins_lock);
1884
1885         return r;
1886 }
1887
1888 static void snapshot_resume(struct dm_target *ti)
1889 {
1890         struct dm_snapshot *s = ti->private;
1891         struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL;
1892         struct dm_origin *o;
1893         struct mapped_device *origin_md = NULL;
1894         bool must_restart_merging = false;
1895
1896         down_read(&_origins_lock);
1897
1898         o = __lookup_dm_origin(s->origin->bdev);
1899         if (o)
1900                 origin_md = dm_table_get_md(o->ti->table);
1901         if (!origin_md) {
1902                 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging);
1903                 if (snap_merging)
1904                         origin_md = dm_table_get_md(snap_merging->ti->table);
1905         }
1906         if (origin_md == dm_table_get_md(ti->table))
1907                 origin_md = NULL;
1908         if (origin_md) {
1909                 if (dm_hold(origin_md))
1910                         origin_md = NULL;
1911         }
1912
1913         up_read(&_origins_lock);
1914
1915         if (origin_md) {
1916                 dm_internal_suspend_fast(origin_md);
1917                 if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) {
1918                         must_restart_merging = true;
1919                         stop_merge(snap_merging);
1920                 }
1921         }
1922
1923         down_read(&_origins_lock);
1924
1925         (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1926         if (snap_src && snap_dest) {
1927                 down_write(&snap_src->lock);
1928                 down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1929                 __handover_exceptions(snap_src, snap_dest);
1930                 up_write(&snap_dest->lock);
1931                 up_write(&snap_src->lock);
1932         }
1933
1934         up_read(&_origins_lock);
1935
1936         if (origin_md) {
1937                 if (must_restart_merging)
1938                         start_merge(snap_merging);
1939                 dm_internal_resume_fast(origin_md);
1940                 dm_put(origin_md);
1941         }
1942
1943         /* Now we have correct chunk size, reregister */
1944         reregister_snapshot(s);
1945
1946         down_write(&s->lock);
1947         s->active = 1;
1948         up_write(&s->lock);
1949 }
1950
1951 static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1952 {
1953         uint32_t min_chunksize;
1954
1955         down_read(&_origins_lock);
1956         min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1957         up_read(&_origins_lock);
1958
1959         return min_chunksize;
1960 }
1961
1962 static void snapshot_merge_resume(struct dm_target *ti)
1963 {
1964         struct dm_snapshot *s = ti->private;
1965
1966         /*
1967          * Handover exceptions from existing snapshot.
1968          */
1969         snapshot_resume(ti);
1970
1971         /*
1972          * snapshot-merge acts as an origin, so set ti->max_io_len
1973          */
1974         ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1975
1976         start_merge(s);
1977 }
1978
1979 static void snapshot_status(struct dm_target *ti, status_type_t type,
1980                             unsigned status_flags, char *result, unsigned maxlen)
1981 {
1982         unsigned sz = 0;
1983         struct dm_snapshot *snap = ti->private;
1984
1985         switch (type) {
1986         case STATUSTYPE_INFO:
1987
1988                 down_write(&snap->lock);
1989
1990                 if (!snap->valid)
1991                         DMEMIT("Invalid");
1992                 else if (snap->merge_failed)
1993                         DMEMIT("Merge failed");
1994                 else {
1995                         if (snap->store->type->usage) {
1996                                 sector_t total_sectors, sectors_allocated,
1997                                          metadata_sectors;
1998                                 snap->store->type->usage(snap->store,
1999                                                          &total_sectors,
2000                                                          &sectors_allocated,
2001                                                          &metadata_sectors);
2002                                 DMEMIT("%llu/%llu %llu",
2003                                        (unsigned long long)sectors_allocated,
2004                                        (unsigned long long)total_sectors,
2005                                        (unsigned long long)metadata_sectors);
2006                         }
2007                         else
2008                                 DMEMIT("Unknown");
2009                 }
2010
2011                 up_write(&snap->lock);
2012
2013                 break;
2014
2015         case STATUSTYPE_TABLE:
2016                 /*
2017                  * kdevname returns a static pointer so we need
2018                  * to make private copies if the output is to
2019                  * make sense.
2020                  */
2021                 DMEMIT("%s %s", snap->origin->name, snap->cow->name);
2022                 snap->store->type->status(snap->store, type, result + sz,
2023                                           maxlen - sz);
2024                 break;
2025         }
2026 }
2027
2028 static int snapshot_iterate_devices(struct dm_target *ti,
2029                                     iterate_devices_callout_fn fn, void *data)
2030 {
2031         struct dm_snapshot *snap = ti->private;
2032         int r;
2033
2034         r = fn(ti, snap->origin, 0, ti->len, data);
2035
2036         if (!r)
2037                 r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
2038
2039         return r;
2040 }
2041
2042
2043 /*-----------------------------------------------------------------
2044  * Origin methods
2045  *---------------------------------------------------------------*/
2046
2047 /*
2048  * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
2049  * supplied bio was ignored.  The caller may submit it immediately.
2050  * (No remapping actually occurs as the origin is always a direct linear
2051  * map.)
2052  *
2053  * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
2054  * and any supplied bio is added to a list to be submitted once all
2055  * the necessary exceptions exist.
2056  */
2057 static int __origin_write(struct list_head *snapshots, sector_t sector,
2058                           struct bio *bio)
2059 {
2060         int r = DM_MAPIO_REMAPPED;
2061         struct dm_snapshot *snap;
2062         struct dm_exception *e;
2063         struct dm_snap_pending_exception *pe;
2064         struct dm_snap_pending_exception *pe_to_start_now = NULL;
2065         struct dm_snap_pending_exception *pe_to_start_last = NULL;
2066         chunk_t chunk;
2067
2068         /* Do all the snapshots on this origin */
2069         list_for_each_entry (snap, snapshots, list) {
2070                 /*
2071                  * Don't make new exceptions in a merging snapshot
2072                  * because it has effectively been deleted
2073                  */
2074                 if (dm_target_is_snapshot_merge(snap->ti))
2075                         continue;
2076
2077                 down_write(&snap->lock);
2078
2079                 /* Only deal with valid and active snapshots */
2080                 if (!snap->valid || !snap->active)
2081                         goto next_snapshot;
2082
2083                 /* Nothing to do if writing beyond end of snapshot */
2084                 if (sector >= dm_table_get_size(snap->ti->table))
2085                         goto next_snapshot;
2086
2087                 /*
2088                  * Remember, different snapshots can have
2089                  * different chunk sizes.
2090                  */
2091                 chunk = sector_to_chunk(snap->store, sector);
2092
2093                 /*
2094                  * Check exception table to see if block
2095                  * is already remapped in this snapshot
2096                  * and trigger an exception if not.
2097                  */
2098                 e = dm_lookup_exception(&snap->complete, chunk);
2099                 if (e)
2100                         goto next_snapshot;
2101
2102                 pe = __lookup_pending_exception(snap, chunk);
2103                 if (!pe) {
2104                         up_write(&snap->lock);
2105                         pe = alloc_pending_exception(snap);
2106                         down_write(&snap->lock);
2107
2108                         if (!snap->valid) {
2109                                 free_pending_exception(pe);
2110                                 goto next_snapshot;
2111                         }
2112
2113                         e = dm_lookup_exception(&snap->complete, chunk);
2114                         if (e) {
2115                                 free_pending_exception(pe);
2116                                 goto next_snapshot;
2117                         }
2118
2119                         pe = __find_pending_exception(snap, pe, chunk);
2120                         if (!pe) {
2121                                 __invalidate_snapshot(snap, -ENOMEM);
2122                                 goto next_snapshot;
2123                         }
2124                 }
2125
2126                 r = DM_MAPIO_SUBMITTED;
2127
2128                 /*
2129                  * If an origin bio was supplied, queue it to wait for the
2130                  * completion of this exception, and start this one last,
2131                  * at the end of the function.
2132                  */
2133                 if (bio) {
2134                         bio_list_add(&pe->origin_bios, bio);
2135                         bio = NULL;
2136
2137                         if (!pe->started) {
2138                                 pe->started = 1;
2139                                 pe_to_start_last = pe;
2140                         }
2141                 }
2142
2143                 if (!pe->started) {
2144                         pe->started = 1;
2145                         pe_to_start_now = pe;
2146                 }
2147
2148 next_snapshot:
2149                 up_write(&snap->lock);
2150
2151                 if (pe_to_start_now) {
2152                         start_copy(pe_to_start_now);
2153                         pe_to_start_now = NULL;
2154                 }
2155         }
2156
2157         /*
2158          * Submit the exception against which the bio is queued last,
2159          * to give the other exceptions a head start.
2160          */
2161         if (pe_to_start_last)
2162                 start_copy(pe_to_start_last);
2163
2164         return r;
2165 }
2166
2167 /*
2168  * Called on a write from the origin driver.
2169  */
2170 static int do_origin(struct dm_dev *origin, struct bio *bio)
2171 {
2172         struct origin *o;
2173         int r = DM_MAPIO_REMAPPED;
2174
2175         down_read(&_origins_lock);
2176         o = __lookup_origin(origin->bdev);
2177         if (o)
2178                 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2179         up_read(&_origins_lock);
2180
2181         return r;
2182 }
2183
2184 /*
2185  * Trigger exceptions in all non-merging snapshots.
2186  *
2187  * The chunk size of the merging snapshot may be larger than the chunk
2188  * size of some other snapshot so we may need to reallocate multiple
2189  * chunks in other snapshots.
2190  *
2191  * We scan all the overlapping exceptions in the other snapshots.
2192  * Returns 1 if anything was reallocated and must be waited for,
2193  * otherwise returns 0.
2194  *
2195  * size must be a multiple of merging_snap's chunk_size.
2196  */
2197 static int origin_write_extent(struct dm_snapshot *merging_snap,
2198                                sector_t sector, unsigned size)
2199 {
2200         int must_wait = 0;
2201         sector_t n;
2202         struct origin *o;
2203
2204         /*
2205          * The origin's __minimum_chunk_size() got stored in max_io_len
2206          * by snapshot_merge_resume().
2207          */
2208         down_read(&_origins_lock);
2209         o = __lookup_origin(merging_snap->origin->bdev);
2210         for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2211                 if (__origin_write(&o->snapshots, sector + n, NULL) ==
2212                     DM_MAPIO_SUBMITTED)
2213                         must_wait = 1;
2214         up_read(&_origins_lock);
2215
2216         return must_wait;
2217 }
2218
2219 /*
2220  * Origin: maps a linear range of a device, with hooks for snapshotting.
2221  */
2222
2223 /*
2224  * Construct an origin mapping: <dev_path>
2225  * The context for an origin is merely a 'struct dm_dev *'
2226  * pointing to the real device.
2227  */
2228 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2229 {
2230         int r;
2231         struct dm_origin *o;
2232
2233         if (argc != 1) {
2234                 ti->error = "origin: incorrect number of arguments";
2235                 return -EINVAL;
2236         }
2237
2238         o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL);
2239         if (!o) {
2240                 ti->error = "Cannot allocate private origin structure";
2241                 r = -ENOMEM;
2242                 goto bad_alloc;
2243         }
2244
2245         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev);
2246         if (r) {
2247                 ti->error = "Cannot get target device";
2248                 goto bad_open;
2249         }
2250
2251         o->ti = ti;
2252         ti->private = o;
2253         ti->num_flush_bios = 1;
2254
2255         return 0;
2256
2257 bad_open:
2258         kfree(o);
2259 bad_alloc:
2260         return r;
2261 }
2262
2263 static void origin_dtr(struct dm_target *ti)
2264 {
2265         struct dm_origin *o = ti->private;
2266
2267         dm_put_device(ti, o->dev);
2268         kfree(o);
2269 }
2270
2271 static int origin_map(struct dm_target *ti, struct bio *bio)
2272 {
2273         struct dm_origin *o = ti->private;
2274         unsigned available_sectors;
2275
2276         bio->bi_bdev = o->dev->bdev;
2277
2278         if (unlikely(bio->bi_rw & REQ_FLUSH))
2279                 return DM_MAPIO_REMAPPED;
2280
2281         if (bio_rw(bio) != WRITE)
2282                 return DM_MAPIO_REMAPPED;
2283
2284         available_sectors = o->split_boundary -
2285                 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2286
2287         if (bio_sectors(bio) > available_sectors)
2288                 dm_accept_partial_bio(bio, available_sectors);
2289
2290         /* Only tell snapshots if this is a write */
2291         return do_origin(o->dev, bio);
2292 }
2293
2294 /*
2295  * Set the target "max_io_len" field to the minimum of all the snapshots'
2296  * chunk sizes.
2297  */
2298 static void origin_resume(struct dm_target *ti)
2299 {
2300         struct dm_origin *o = ti->private;
2301
2302         o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
2303
2304         down_write(&_origins_lock);
2305         __insert_dm_origin(o);
2306         up_write(&_origins_lock);
2307 }
2308
2309 static void origin_postsuspend(struct dm_target *ti)
2310 {
2311         struct dm_origin *o = ti->private;
2312
2313         down_write(&_origins_lock);
2314         __remove_dm_origin(o);
2315         up_write(&_origins_lock);
2316 }
2317
2318 static void origin_status(struct dm_target *ti, status_type_t type,
2319                           unsigned status_flags, char *result, unsigned maxlen)
2320 {
2321         struct dm_origin *o = ti->private;
2322
2323         switch (type) {
2324         case STATUSTYPE_INFO:
2325                 result[0] = '\0';
2326                 break;
2327
2328         case STATUSTYPE_TABLE:
2329                 snprintf(result, maxlen, "%s", o->dev->name);
2330                 break;
2331         }
2332 }
2333
2334 static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2335                         struct bio_vec *biovec, int max_size)
2336 {
2337         struct dm_origin *o = ti->private;
2338         struct request_queue *q = bdev_get_queue(o->dev->bdev);
2339
2340         if (!q->merge_bvec_fn)
2341                 return max_size;
2342
2343         bvm->bi_bdev = o->dev->bdev;
2344
2345         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2346 }
2347
2348 static int origin_iterate_devices(struct dm_target *ti,
2349                                   iterate_devices_callout_fn fn, void *data)
2350 {
2351         struct dm_origin *o = ti->private;
2352
2353         return fn(ti, o->dev, 0, ti->len, data);
2354 }
2355
2356 static struct target_type origin_target = {
2357         .name    = "snapshot-origin",
2358         .version = {1, 9, 0},
2359         .module  = THIS_MODULE,
2360         .ctr     = origin_ctr,
2361         .dtr     = origin_dtr,
2362         .map     = origin_map,
2363         .resume  = origin_resume,
2364         .postsuspend = origin_postsuspend,
2365         .status  = origin_status,
2366         .merge   = origin_merge,
2367         .iterate_devices = origin_iterate_devices,
2368 };
2369
2370 static struct target_type snapshot_target = {
2371         .name    = "snapshot",
2372         .version = {1, 13, 0},
2373         .module  = THIS_MODULE,
2374         .ctr     = snapshot_ctr,
2375         .dtr     = snapshot_dtr,
2376         .map     = snapshot_map,
2377         .end_io  = snapshot_end_io,
2378         .preresume  = snapshot_preresume,
2379         .resume  = snapshot_resume,
2380         .status  = snapshot_status,
2381         .iterate_devices = snapshot_iterate_devices,
2382 };
2383
2384 static struct target_type merge_target = {
2385         .name    = dm_snapshot_merge_target_name,
2386         .version = {1, 3, 0},
2387         .module  = THIS_MODULE,
2388         .ctr     = snapshot_ctr,
2389         .dtr     = snapshot_dtr,
2390         .map     = snapshot_merge_map,
2391         .end_io  = snapshot_end_io,
2392         .presuspend = snapshot_merge_presuspend,
2393         .preresume  = snapshot_preresume,
2394         .resume  = snapshot_merge_resume,
2395         .status  = snapshot_status,
2396         .iterate_devices = snapshot_iterate_devices,
2397 };
2398
2399 static int __init dm_snapshot_init(void)
2400 {
2401         int r;
2402
2403         r = dm_exception_store_init();
2404         if (r) {
2405                 DMERR("Failed to initialize exception stores");
2406                 return r;
2407         }
2408
2409         r = dm_register_target(&snapshot_target);
2410         if (r < 0) {
2411                 DMERR("snapshot target register failed %d", r);
2412                 goto bad_register_snapshot_target;
2413         }
2414
2415         r = dm_register_target(&origin_target);
2416         if (r < 0) {
2417                 DMERR("Origin target register failed %d", r);
2418                 goto bad_register_origin_target;
2419         }
2420
2421         r = dm_register_target(&merge_target);
2422         if (r < 0) {
2423                 DMERR("Merge target register failed %d", r);
2424                 goto bad_register_merge_target;
2425         }
2426
2427         r = init_origin_hash();
2428         if (r) {
2429                 DMERR("init_origin_hash failed.");
2430                 goto bad_origin_hash;
2431         }
2432
2433         exception_cache = KMEM_CACHE(dm_exception, 0);
2434         if (!exception_cache) {
2435                 DMERR("Couldn't create exception cache.");
2436                 r = -ENOMEM;
2437                 goto bad_exception_cache;
2438         }
2439
2440         pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2441         if (!pending_cache) {
2442                 DMERR("Couldn't create pending cache.");
2443                 r = -ENOMEM;
2444                 goto bad_pending_cache;
2445         }
2446
2447         return 0;
2448
2449 bad_pending_cache:
2450         kmem_cache_destroy(exception_cache);
2451 bad_exception_cache:
2452         exit_origin_hash();
2453 bad_origin_hash:
2454         dm_unregister_target(&merge_target);
2455 bad_register_merge_target:
2456         dm_unregister_target(&origin_target);
2457 bad_register_origin_target:
2458         dm_unregister_target(&snapshot_target);
2459 bad_register_snapshot_target:
2460         dm_exception_store_exit();
2461
2462         return r;
2463 }
2464
2465 static void __exit dm_snapshot_exit(void)
2466 {
2467         dm_unregister_target(&snapshot_target);
2468         dm_unregister_target(&origin_target);
2469         dm_unregister_target(&merge_target);
2470
2471         exit_origin_hash();
2472         kmem_cache_destroy(pending_cache);
2473         kmem_cache_destroy(exception_cache);
2474
2475         dm_exception_store_exit();
2476 }
2477
2478 /* Module hooks */
2479 module_init(dm_snapshot_init);
2480 module_exit(dm_snapshot_exit);
2481
2482 MODULE_DESCRIPTION(DM_NAME " snapshot target");
2483 MODULE_AUTHOR("Joe Thornber");
2484 MODULE_LICENSE("GPL");
2485 MODULE_ALIAS("dm-snapshot-origin");
2486 MODULE_ALIAS("dm-snapshot-merge");