These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / md / dm-table.c
index 16ba55a..061152a 100644 (file)
@@ -440,14 +440,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
                       q->limits.alignment_offset,
                       (unsigned long long) start << SECTOR_SHIFT);
 
-       /*
-        * Check if merge fn is supported.
-        * If not we'll force DM to use PAGE_SIZE or
-        * smaller I/O, just to be safe.
-        */
-       if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
-               blk_limits_max_hw_sectors(limits,
-                                         (unsigned int) (PAGE_SIZE >> 9));
        return 0;
 }
 
@@ -1022,15 +1014,16 @@ static int dm_table_build_index(struct dm_table *t)
        return r;
 }
 
+static bool integrity_profile_exists(struct gendisk *disk)
+{
+       return !!blk_get_integrity(disk);
+}
+
 /*
  * Get a disk whose integrity profile reflects the table's profile.
- * If %match_all is true, all devices' profiles must match.
- * If %match_all is false, all devices must at least have an
- * allocated integrity profile; but uninitialized is ok.
  * Returns NULL if integrity support was inconsistent or unavailable.
  */
-static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
-                                                   bool match_all)
+static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
 {
        struct list_head *devices = dm_table_get_devices(t);
        struct dm_dev_internal *dd = NULL;
@@ -1038,10 +1031,8 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
 
        list_for_each_entry(dd, devices, list) {
                template_disk = dd->dm_dev->bdev->bd_disk;
-               if (!blk_get_integrity(template_disk))
+               if (!integrity_profile_exists(template_disk))
                        goto no_integrity;
-               if (!match_all && !blk_integrity_is_initialized(template_disk))
-                       continue; /* skip uninitialized profiles */
                else if (prev_disk &&
                         blk_integrity_compare(prev_disk, template_disk) < 0)
                        goto no_integrity;
@@ -1060,34 +1051,40 @@ no_integrity:
 }
 
 /*
- * Register the mapped device for blk_integrity support if
- * the underlying devices have an integrity profile.  But all devices
- * may not have matching profiles (checking all devices isn't reliable
+ * Register the mapped device for blk_integrity support if the
+ * underlying devices have an integrity profile.  But all devices may
+ * not have matching profiles (checking all devices isn't reliable
  * during table load because this table may use other DM device(s) which
- * must be resumed before they will have an initialized integity profile).
- * Stacked DM devices force a 2 stage integrity profile validation:
- * 1 - during load, validate all initialized integrity profiles match
- * 2 - during resume, validate all integrity profiles match
+ * must be resumed before they will have an initialized integity
+ * profile).  Consequently, stacked DM devices force a 2 stage integrity
+ * profile validation: First pass during table load, final pass during
+ * resume.
  */
-static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
+static int dm_table_register_integrity(struct dm_table *t)
 {
+       struct mapped_device *md = t->md;
        struct gendisk *template_disk = NULL;
 
-       template_disk = dm_table_get_integrity_disk(t, false);
+       template_disk = dm_table_get_integrity_disk(t);
        if (!template_disk)
                return 0;
 
-       if (!blk_integrity_is_initialized(dm_disk(md))) {
+       if (!integrity_profile_exists(dm_disk(md))) {
                t->integrity_supported = 1;
-               return blk_integrity_register(dm_disk(md), NULL);
+               /*
+                * Register integrity profile during table load; we can do
+                * this because the final profile must match during resume.
+                */
+               blk_integrity_register(dm_disk(md),
+                                      blk_get_integrity(template_disk));
+               return 0;
        }
 
        /*
-        * If DM device already has an initalized integrity
+        * If DM device already has an initialized integrity
         * profile the new profile should not conflict.
         */
-       if (blk_integrity_is_initialized(template_disk) &&
-           blk_integrity_compare(dm_disk(md), template_disk) < 0) {
+       if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
                DMWARN("%s: conflict with existing integrity profile: "
                       "%s profile mismatch",
                       dm_device_name(t->md),
@@ -1095,7 +1092,7 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device
                return 1;
        }
 
-       /* Preserve existing initialized integrity profile */
+       /* Preserve existing integrity profile */
        t->integrity_supported = 1;
        return 0;
 }
@@ -1120,7 +1117,7 @@ int dm_table_complete(struct dm_table *t)
                return r;
        }
 
-       r = dm_table_prealloc_integrity(t, t->md);
+       r = dm_table_register_integrity(t);
        if (r) {
                DMERR("could not register integrity profile.");
                return r;
@@ -1286,29 +1283,30 @@ combine_limits:
 }
 
 /*
- * Set the integrity profile for this device if all devices used have
- * matching profiles.  We're quite deep in the resume path but still
- * don't know if all devices (particularly DM devices this device
- * may be stacked on) have matching profiles.  Even if the profiles
- * don't match we have no way to fail (to resume) at this point.
+ * Verify that all devices have an integrity profile that matches the
+ * DM device's registered integrity profile.  If the profiles don't
+ * match then unregister the DM device's integrity profile.
  */
-static void dm_table_set_integrity(struct dm_table *t)
+static void dm_table_verify_integrity(struct dm_table *t)
 {
        struct gendisk *template_disk = NULL;
 
-       if (!blk_get_integrity(dm_disk(t->md)))
-               return;
+       if (t->integrity_supported) {
+               /*
+                * Verify that the original integrity profile
+                * matches all the devices in this table.
+                */
+               template_disk = dm_table_get_integrity_disk(t);
+               if (template_disk &&
+                   blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
+                       return;
+       }
 
-       template_disk = dm_table_get_integrity_disk(t, true);
-       if (template_disk)
-               blk_integrity_register(dm_disk(t->md),
-                                      blk_get_integrity(template_disk));
-       else if (blk_integrity_is_initialized(dm_disk(t->md)))
-               DMWARN("%s: device no longer has a valid integrity profile",
-                      dm_device_name(t->md));
-       else
+       if (integrity_profile_exists(dm_disk(t->md))) {
                DMWARN("%s: unable to establish an integrity profile",
                       dm_device_name(t->md));
+               blk_integrity_unregister(dm_disk(t->md));
+       }
 }
 
 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1388,14 +1386,6 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
        return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
 }
 
-static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev,
-                                 sector_t start, sector_t len, void *data)
-{
-       struct request_queue *q = bdev_get_queue(dev->bdev);
-
-       return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags);
-}
-
 static bool dm_table_all_devices_attribute(struct dm_table *t,
                                           iterate_devices_callout_fn func)
 {
@@ -1516,12 +1506,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        else
                queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
 
-       if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps))
-               queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q);
-       else
-               queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q);
-
-       dm_table_set_integrity(t);
+       dm_table_verify_integrity(t);
 
        /*
         * Determine whether or not this queue's I/O timings contribute