Remove osd_pool_default_min_size to allow Ceph cluster to do the right thing by default
authorKeith Schincke <keith.schincke@gmail.com>
Fri, 12 May 2017 12:12:56 +0000 (08:12 -0400)
committerKeith Schincke <keith.schincke@gmail.com>
Mon, 22 May 2017 18:38:52 +0000 (14:38 -0400)
The default value is 0 which has the minimum number be caluclated based on the replica count
from osd_pool_defaut_size. The default replica count is 3 and the calculated min_size is 2.
If the replica count is 1 then the min_size is 1. ie: min_size = replica - (replica/2)
Add CephPoolDefaultSize parameter to ceph-mon.yaml. This parameter defaults to 3 but can
be overriden. See puppet-ceph-devel.yaml for an example

Change-Id: Ie9bdd9b16bcb9f11107ece614b010e87d3ae98a9

ci/environments/scenario001-multinode.yaml
ci/environments/scenario004-multinode.yaml
environments/puppet-ceph-devel.yaml
puppet/services/ceph-base.yaml
puppet/services/ceph-mon.yaml
releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml [new file with mode: 0644]

index eee6f1c..473beb0 100644 (file)
@@ -102,6 +102,7 @@ parameter_defaults:
   CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
   CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
   CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+  CephPoolDefaultSize: 1
   NovaEnableRbdBackend: true
   CinderEnableRbdBackend: true
   CinderBackupBackend: ceph
index 24fb2bf..14f181c 100644 (file)
@@ -86,6 +86,7 @@ parameter_defaults:
   CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
   CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
   CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+  CephPoolDefaultSize: 1
   SwiftCeilometerPipelineEnabled: false
   NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
   BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
index 9c8abbb..8fc4bf2 100644 (file)
@@ -20,3 +20,5 @@ parameter_defaults:
   GlanceBackend: rbd
   GnocchiBackend: rbd
   CinderEnableIscsiBackend: false
+  CephPoolDefaultSite: 1
+
index 033d3f7..1eea3dc 100644 (file)
@@ -91,7 +91,6 @@ outputs:
       service_name: ceph_base
       config_settings:
         tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage}
-        ceph::profile::params::osd_pool_default_min_size: 1
         ceph::profile::params::osds: {/srv/data: {}}
         ceph::profile::params::manage_repo: false
         ceph::profile::params::authentication_type: cephx
index d589ef8..0f72eb7 100644 (file)
@@ -70,6 +70,10 @@ parameters:
   MonitoringSubscriptionCephMon:
     default: 'overcloud-ceph-mon'
     type: string
+  CephPoolDefaultSize:
+    description: default minimum replication for RBD copies
+    type: number
+    default: 3
 
 resources:
   CephBase:
@@ -92,7 +96,7 @@ outputs:
             ceph::profile::params::mon_key: {get_param: CephMonKey}
             ceph::profile::params::osd_pool_default_pg_num: 32
             ceph::profile::params::osd_pool_default_pgp_num: 32
-            ceph::profile::params::osd_pool_default_size: 3
+            ceph::profile::params::osd_pool_default_size: {get_param: CephPoolDefaultSize}
             # repeat returns items in a list, so we need to map_merge twice
             tripleo::profile::base::ceph::mon::ceph_pools:
               map_merge:
diff --git a/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml b/releasenotes/notes/unset-ceph-default-min-size-0297620ed99dab5b.yaml
new file mode 100644 (file)
index 0000000..fc2cb48
--- /dev/null
@@ -0,0 +1,12 @@
+---
+fixes:
+  - |
+    Removed the hard coding of osd_pool_default_min_size. Setting this value
+    to 1 can result in data loss in operating production deployments. Not
+    setting this value (or setting it to 0) will allow ceph to calculate the
+    value based on the current setting of osd_pool_default_size. If the
+    replication count is 3, then the calculated min_size is 2.  If the
+    replication count is 1, then the calcualted min_size is 1. For a POC
+    deployments using a single OSD, set osd_pool_default_size = 1. See
+    description at http://docs.ceph.com/docs/master/rados/configuration/pool-pg-config-ref/
+    Added CephPoolDefaultSize to set default replication size. Default value is 3.