From: Dan Radez Date: Mon, 14 Nov 2016 17:36:27 +0000 (-0500) Subject: Allow passing a device name to ceph X-Git-Tag: colorado.3.0~9 X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=011c7a4b750b11c138728e5537a6e8f65a5d43fa;p=apex.git Allow passing a device name to ceph JIRA: APEX-347 Change-Id: Ibc6d141e20faf613e0f6314286b55aff01ce862e Signed-off-by: Dan Radez (cherry picked from commit e36f790d036c0bfb5d7ed81d656f9bb1f5200a1a) --- diff --git a/config/deploy/deploy_settings.yaml b/config/deploy/deploy_settings.yaml index e7821f18..ee1dc146 100644 --- a/config/deploy/deploy_settings.yaml +++ b/config/deploy/deploy_settings.yaml @@ -48,6 +48,13 @@ deploy_options: # Whether to run vsperf after the install has completed #vsperf: false + # Specify a device for ceph to use for the OSDs. By default a virtual disk + # is created for the OSDs. This setting allows you to specify a different + # target for the OSDs. The setting must be valid on all overcloud nodes. + # The controllers and the compute nodes all have OSDs setup on them and + # therefore this device name must be valid on all overcloud nodes. + #ceph_device: /dev/sdb + # Set performance options on specific roles. The valid roles are 'Compute', 'Controller' # and 'Storage', and the valid sections are 'kernel' and 'nova' #performance: diff --git a/docs/installationprocedure/architecture.rst b/docs/installationprocedure/architecture.rst index c2b38d00..33536788 100644 --- a/docs/installationprocedure/architecture.rst +++ b/docs/installationprocedure/architecture.rst @@ -44,6 +44,7 @@ will run the following services: - OpenDaylight - HA Proxy - Pacemaker & VIPs +- Ceph Monitors and OSDs Stateless OpenStack services All running statesless OpenStack services are load balanced by HA Proxy. @@ -77,6 +78,12 @@ Pacemaker & VIPs start up order and Virtual IPs associated with specific services are running on the proper host. +Ceph Monitors & OSDs + The Ceph monitors run on each of the control nodes. Each control node also + has a Ceph OSD running on it. By default the OSDs use an autogenerated + virtual disk as their target device. A non-autogenerated device can be + specified in the deploy file. + VM Migration is configured and VMs can be evacuated as needed or as invoked by tools such as heat as part of a monitored stack deployment in the overcloud. diff --git a/lib/python/apex/deploy_env.py b/lib/python/apex/deploy_env.py index 10b7831f..0d48bd88 100644 --- a/lib/python/apex/deploy_env.py +++ b/lib/python/apex/deploy_env.py @@ -21,7 +21,7 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller', 'vpn', 'vpp'] -OPT_DEPLOY_SETTINGS = ['performance', 'vsperf'] +OPT_DEPLOY_SETTINGS = ['performance', 'vsperf', 'ceph_device'] VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage'] VALID_PERF_OPTS = ['kernel', 'nova', 'vpp'] diff --git a/lib/undercloud-functions.sh b/lib/undercloud-functions.sh index 95122980..d0347428 100755 --- a/lib/undercloud-functions.sh +++ b/lib/undercloud-functions.sh @@ -217,6 +217,10 @@ if [[ "$net_isolation_enabled" == "TRUE" ]]; then fi +if [[ -n "${deploy_options_array['ceph_device']}" ]]; then + sed -i '/ExtraConfig/a\\ ceph::profile::params::osds: {\\x27${deploy_options_array['ceph_device']}\\x27: {}}' opnfv-environment.yaml +fi + sudo sed -i '/CephClusterFSID:/c\\ CephClusterFSID: \\x27$(cat /proc/sys/kernel/random/uuid)\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml sudo sed -i '/CephMonKey:/c\\ CephMonKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml sudo sed -i '/CephAdminKey:/c\\ CephAdminKey: \\x27'"\$(ceph-authtool --gen-print-key)"'\\x27' /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml