Merge "Keystone domain for Heat"
authorJenkins <jenkins@review.openstack.org>
Mon, 14 Mar 2016 12:30:37 +0000 (12:30 +0000)
committerGerrit Code Review <review@openstack.org>
Mon, 14 Mar 2016 12:30:38 +0000 (12:30 +0000)
26 files changed:
docker/compute-post.yaml
docker/firstboot/start_docker_agents.sh
environments/docker-network-isolation.yaml
environments/docker.yaml
environments/ips-from-pool-all.yaml [new file with mode: 0644]
environments/major-upgrade-pacemaker-init.yaml [moved from environments/major-upgrade-script-delivery.yaml with 94% similarity]
environments/network-isolation-v6.yaml
extraconfig/tasks/major_upgrade_block_storage.sh [new file with mode: 0644]
extraconfig/tasks/major_upgrade_ceph_storage.sh [new file with mode: 0644]
extraconfig/tasks/major_upgrade_object_storage.sh
extraconfig/tasks/major_upgrade_pacemaker.yaml
extraconfig/tasks/major_upgrade_pacemaker_init.yaml [new file with mode: 0644]
extraconfig/tasks/major_upgrade_script_delivery.yaml [deleted file]
overcloud.yaml
puppet/ceph-cluster-config.yaml
puppet/ceph-storage.yaml
puppet/cinder-storage.yaml
puppet/compute.yaml
puppet/controller.yaml
puppet/extraconfig/ceph/ceph-external-config.yaml
puppet/hieradata/compute.yaml
puppet/manifests/overcloud_cephstorage.pp
puppet/manifests/overcloud_compute.pp
puppet/manifests/overcloud_controller.pp
puppet/manifests/overcloud_controller_pacemaker.pp
puppet/swift-storage.yaml

index 8f9e962..4532549 100644 (file)
@@ -17,8 +17,6 @@ parameters:
     type: string
   DockerLibvirtImage:
     type: string
-  DockerNeutronAgentImage:
-    type: string
   DockerOpenvswitchImage:
     type: string
   DockerOvsVswitchdImage:
@@ -32,18 +30,13 @@ parameters:
     type: string
     default: "/etc/nova/nova.conf"
   NeutronOpenvswitchAgentConfig:
-    type: string
-    default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/ml2/ml2_conf.ini"
-  NeutronAgentConfig:
     type: string
     default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
-  NeutronAgentPluginVolume:
+  NeutronOpenvswitchAgentPluginVolume:
     type: string
-    description: The neutron agent plugin to mount into the neutron-agents container
     default: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro"
-  NeutronAgentOvsVolume:
+  NeutronOpenvswitchAgentOvsVolume:
     type: string
-    description: The neutron agent ovs agents to mount into the neutron-agents container
     default: " "
 
 resources:
@@ -99,7 +92,6 @@ resources:
       - name: libvirt_config
       - name: nova_config
       - name: neutron_openvswitch_agent_config
-      - name: neutron_agent_config
       config: |
         #!/bin/python
         import json
@@ -112,13 +104,11 @@ resources:
         libvirt_config = os.getenv('libvirt_config').split(',')
         nova_config = os.getenv('nova_config').split(',')
         neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',')
-        neutron_agent_config = os.getenv('neutron_agent_config').split(',')
 
         # Command, Config_files, Owner, Perms
         services = {'nova-libvirt': ['/usr/sbin/libvirtd', libvirt_config, 'root', libvirt_perms],
                     'nova-compute': ['/usr/bin/nova-compute', nova_config, 'nova', file_perms],
                     'neutron-openvswitch-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_openvswitch_agent_config, 'neutron', file_perms],
-                    'neutron-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_agent_config, 'neutron', file_perms],
                     'ovs-vswitchd': ['/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log'],
                     'ovsdb-server': ['/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --log-file=/var/log/openvswitch/ovsdb-server.log']
                    }
@@ -171,7 +161,6 @@ resources:
         libvirt_config: {get_param: LibvirtConfig}
         nova_config: {get_param: NovaConfig}
         neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig}
-        neutron_agent_config: {get_param: NeutronAgentConfig}
 
   NovaComputeContainersDeploymentOVS:
     type: OS::Heat::StructuredDeployments
@@ -291,27 +280,7 @@ resources:
     properties:
       group: docker-compose
       config:
-        openvswitch:
-          image:
-            list_join:
-            - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
-          net: host
-          privileged: true
-          restart: always
-          volumes:
-           - /run:/run
-           - /lib/modules:/lib/modules:ro
-           - /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json
-           - /var/lib/etc-data/neutron/neutron.conf:/etc/kolla/neutron-openvswitch-agent/:ro
-           - /var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro
-           - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro
-          environment:
-           - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-          volumes_from:
-           - computedata
-
-        neutronagent:
+        neutronovsagent:
           image:
             list_join:
             - '/'
@@ -326,10 +295,11 @@ resources:
               - list_join:
                  - ","
                  - [ "/run:/run", "/lib/modules:/lib/modules:ro",
-                     "/var/lib/etc-data/json-config/neutron-agent.json:/var/lib/kolla/config_files/config.json",
+                     "/var/lib/etc-data/json-config/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json",
                      "/var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro",
-                     {get_param: NeutronAgentPluginVolume},
-                     {get_param: NeutronAgentOvsVolume} ]
+                     "/var/lib/etc-data/neutron/plugins/ml2/ml2_conf.ini:/var/lib/kolla/config_files/ml2_conf.ini:ro",
+                     {get_param: NeutronOpenvswitchAgentPluginVolume},
+                     {get_param: NeutronOpenvswitchAgentOvsVolume} ]
           environment:
            - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
           volumes_from:
index 963c7ee..bb458a6 100644 (file)
@@ -73,3 +73,22 @@ chmod 0640 /etc/systemd/system/heat-docker-agents.service
 # Disable NetworkManager and let the ifup/down scripts work properly.
 /usr/bin/systemctl disable NetworkManager
 /usr/bin/systemctl stop NetworkManager
+
+# Atomic's root partition & logical volume defaults to 3G.  In order to launch
+# larger VMs, we need to enlarge the root logical volume and scale down the
+# docker_pool logical volume. We are allocating 80% of the disk space for
+# vm data and the remaining 20% for docker images.
+ATOMIC_ROOT='/dev/mapper/atomicos-root'
+ROOT_DEVICE=`pvs -o vg_name,pv_name --no-headings | grep atomicos | awk '{ print $2}'`
+
+growpart $( echo "${ROOT_DEVICE}" | sed -r 's/([^0-9]*)([0-9]+)/\1 \2/' )
+pvresize "${ROOT_DEVICE}"
+lvresize -l +80%FREE "${ATOMIC_ROOT}"
+xfs_growfs "${ATOMIC_ROOT}"
+
+cat <<EOF > /etc/sysconfig/docker-storage-setup
+GROWPART=true
+AUTO_EXTEND_POOL=yes
+POOL_AUTOEXTEND_PERCENT=30
+POOL_AUTOEXTEND_THRESHOLD=70
+EOF
index 257d03d..87c81d0 100644 (file)
@@ -1,4 +1,4 @@
 parameter_defaults:
-  NeutronAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
-  NeutronAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
-  NeutronAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
+  NeutronOpenvswitchAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
+  NeutronOpenvswitchAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
+  NeutronOpenvswitchAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
index 7c6dc40..be21d84 100644 (file)
@@ -14,9 +14,9 @@ parameter_defaults:
   DockerNamespaceIsRegistry: false
   # Compute Node Images
   DockerComputeImage: centos-binary-nova-compute:latest
+  DockerAgentImage: heat-docker-agents:latest
   DockerComputeDataImage: centos-binary-data:latest
   DockerLibvirtImage: centos-binary-nova-libvirt:latest
-  DockerNeutronAgentImage: centos-binary-neutron-agents:latest
   DockerOpenvswitchImage: centos-binary-neutron-openvswitch-agent:latest
   DockerOvsVswitchdImage: centos-binary-openvswitch-vswitchd:latest
   DockerOpenvswitchDBImage: centos-binary-openvswitch-db-server:latest
diff --git a/environments/ips-from-pool-all.yaml b/environments/ips-from-pool-all.yaml
new file mode 100644 (file)
index 0000000..f660d50
--- /dev/null
@@ -0,0 +1,75 @@
+# Environment file demonstrating how to pre-assign IPs to all node types
+resource_registry:
+  OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
+  OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+  OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+  OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+  OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+  OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
+  OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+  OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+  OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
+  OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+  OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+  OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
+  OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+  OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+  OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+  OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+  OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+  OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+  OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+  OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+  OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+  OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+  OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+  OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+  OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
+
+parameter_defaults:
+  ControllerIPs:
+    # Each controller will get an IP from the lists below, first controller, first IP
+    external:
+    - 10.0.0.251
+    internal_api:
+    - 172.16.2.251
+    storage:
+    - 172.16.1.251
+    storage_mgmt:
+    - 172.16.3.251
+    tenant:
+    - 172.16.0.251
+  NovaComputeIPs:
+    # Each compute will get an IP from the lists below, first compute, first IP
+    internal_api:
+    - 172.16.2.252
+    storage:
+    - 172.16.1.252
+    tenant:
+    - 172.16.0.252
+  CephStorageIPs:
+    # Each ceph node will get an IP from the lists below, first node, first IP
+    storage:
+    - 172.16.1.253
+    storage_mgmt:
+    - 172.16.3.253
+  SwiftStorageIPs:
+    # Each swift node will get an IP from the lists below, first node, first IP
+    internal_api:
+    - 172.16.2.254
+    storage:
+    - 172.16.1.254
+    storage_mgmt:
+    - 172.16.3.254
+  BlockStorageIPs:
+    # Each cinder node will get an IP from the lists below, first node, first IP
+    internal_api:
+    - 172.16.2.250
+    storage:
+    - 172.16.1.250
+    storage_mgmt:
+    - 172.16.3.250
@@ -2,7 +2,7 @@ parameter_defaults:
   UpgradeLevelNovaCompute: liberty
 
 resource_registry:
-  OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_script_delivery.yaml
+  OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
   OS::TripleO::Tasks::PackageUpdate:  ../extraconfig/tasks/yum_update_noop.yaml
   OS::TripleO::ControllerPostDeployment: OS::Heat::None
   OS::TripleO::ComputePostDeployment: OS::Heat::None
index 4c07174..599a08b 100644 (file)
@@ -43,6 +43,8 @@ resource_registry:
   OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_v6.yaml
 
 parameter_defaults:
+  # Enable IPv6 for Ceph.
+  CephIPv6: True
   # Enable IPv6 for Corosync. This is required when Corosync is using an IPv6 IP in the cluster.
   CorosyncIPv6: True
   # Enable IPv6 for MongoDB. This is required when MongoDB is using an IPv6 IP.
diff --git a/extraconfig/tasks/major_upgrade_block_storage.sh b/extraconfig/tasks/major_upgrade_block_storage.sh
new file mode 100644 (file)
index 0000000..0766624
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+# This runs an upgrade of Cinder Block Storage nodes.
+#
+set -eu
+
+yum -y install python-zaqarclient  # needed for os-collect-config
+yum -y -q update
diff --git a/extraconfig/tasks/major_upgrade_ceph_storage.sh b/extraconfig/tasks/major_upgrade_ceph_storage.sh
new file mode 100644 (file)
index 0000000..de42b16
--- /dev/null
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# This delivers the ceph-storage upgrade script to be invoked as part of the tripleo
+# major upgrade workflow.
+#
+set -eu
+
+UPGRADE_SCRIPT=/root/tripleo_upgrade_node.sh
+
+cat > $UPGRADE_SCRIPT << ENDOFCAT
+### DO NOT MODIFY THIS FILE
+### This file is automatically delivered to the ceph-storage nodes as part of the
+### tripleo upgrades workflow
+
+
+function systemctl_ceph {
+    action=\$1
+    systemctl \$action ceph
+}
+
+# "so that mirrors aren't rebalanced as if the OSD died" - gfidente
+ceph osd set noout
+
+systemctl_ceph stop
+yum -y install python-zaqarclient  # needed for os-collect-config
+yum -y update
+systemctl_ceph start
+
+ceph osd unset noout
+
+ENDOFCAT
+
+# ensure the permissions are OK
+chmod 0755 $UPGRADE_SCRIPT
+
index 0f6d091..931f4f4 100644 (file)
@@ -14,17 +14,18 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
 
 
 function systemctl_swift {
-    action=$1
+    action=\$1
     for S in openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
              openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
-             openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy; do
-                systemctl $action $S
+             openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object; do
+                systemctl \$action \$S
     done
 }
 
 
 systemctl_swift stop
 
+yum -y install python-zaqarclient  # needed for os-collect-config
 yum -y update
 
 systemctl_swift start
index b867d10..4af3186 100644 (file)
@@ -50,6 +50,20 @@ resources:
       config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
       input_values: {get_param: input_values}
 
+  BlockStorageUpgradeConfig:
+    type: OS::Heat::SoftwareConfig
+    depends_on: ControllerPacemakerUpgradeDeployment_Step1
+    properties:
+      group: script
+      config: {get_file: major_upgrade_block_storage.sh}
+
+  BlockStorageUpgradeDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: blockstorage_servers}
+      config: {get_resource: BlockStorageUpgradeConfig}
+      input_values: {get_param: input_values}
+
   ControllerPacemakerUpgradeConfig_Step2:
     type: OS::Heat::SoftwareConfig
     properties:
@@ -63,7 +77,7 @@ resources:
 
   ControllerPacemakerUpgradeDeployment_Step2:
     type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: ControllerPacemakerUpgradeDeployment_Step1
+    depends_on: BlockStorageUpgradeDeployment
     properties:
       servers:  {get_param: controller_servers}
       config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_init.yaml b/extraconfig/tasks/major_upgrade_pacemaker_init.yaml
new file mode 100644 (file)
index 0000000..f662bc3
--- /dev/null
@@ -0,0 +1,128 @@
+heat_template_version: 2014-10-16
+description: 'Upgrade for Pacemaker deployments'
+
+parameters:
+
+  controller_servers:
+    type: json
+  compute_servers:
+    type: json
+  blockstorage_servers:
+    type: json
+  objectstorage_servers:
+    type: json
+  cephstorage_servers:
+    type: json
+  input_values:
+    type: json
+    description: input values for the software deployments
+
+  UpgradeInitCommand:
+    type: string
+    description: |
+      Command or script snippet to run on all overcloud nodes to
+      initialize the upgrade process. E.g. a repository switch.
+    default: ''
+  UpgradeLevelNovaCompute:
+    type: string
+    description: Nova Compute upgrade level
+    default: ''
+
+resources:
+
+  UpgradeInitConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config:
+        list_join:
+        - ''
+        - - "#!/bin/bash\n\n"
+          - get_param: UpgradeInitCommand
+
+  UpgradeInitControllerDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: controller_servers}
+      config: {get_resource: UpgradeInitConfig}
+      input_values: {get_param: input_values}
+
+  UpgradeInitComputeDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: compute_servers}
+      config: {get_resource: UpgradeInitConfig}
+      input_values: {get_param: input_values}
+
+  UpgradeInitBlockStorageDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: blockstorage_servers}
+      config: {get_resource: UpgradeInitConfig}
+      input_values: {get_param: input_values}
+
+  UpgradeInitObjectStorageDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: objectstorage_servers}
+      config: {get_resource: UpgradeInitConfig}
+      input_values: {get_param: input_values}
+
+  UpgradeInitCephStorageDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: cephstorage_servers}
+      config: {get_resource: UpgradeInitConfig}
+      input_values: {get_param: input_values}
+
+  # TODO(jistr): for Mitaka->Newton upgrades and further we can use
+  # map_merge with input_values instead of feeding params into scripts
+  # via str_replace on bash snippets
+
+  ComputeDeliverUpgradeScriptConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config:
+        list_join:
+        - ''
+        - - str_replace:
+              template: |
+                #!/bin/bash
+                upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
+              params:
+                UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
+          - get_file: major_upgrade_compute.sh
+
+  ComputeDeliverUpgradeScriptDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: compute_servers}
+      config: {get_resource: ComputeDeliverUpgradeScriptConfig}
+      input_values: {get_param: input_values}
+
+  ObjectStorageDeliverUpgradeScriptConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: {get_file: major_upgrade_object_storage.sh}
+
+  ObjectStorageDeliverUpgradeScriptDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: objectstorage_servers}
+      config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig}
+      input_values: {get_param: input_values}
+
+  CephStorageDeliverUpgradeScriptConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: {get_file: major_upgrade_ceph_storage.sh}
+
+  CephStorageDeliverUpgradeScriptDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: cephstorage_servers}
+      config: {get_resource: CephStorageDeliverUpgradeScriptConfig}
+      input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_script_delivery.yaml b/extraconfig/tasks/major_upgrade_script_delivery.yaml
deleted file mode 100644 (file)
index f7faa7f..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-heat_template_version: 2014-10-16
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
-
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
-    type: json
-  input_values:
-    type: json
-    description: input values for the software deployments
-
-  UpgradeLevelNovaCompute:
-    type: string
-    description: Nova Compute upgrade level
-    default: ''
-
-resources:
-  # TODO(jistr): for Mitaka->Newton upgrades and further we can use
-  # map_merge with input_values instead of feeding params into scripts
-  # via str_replace on bash snippets
-
-  ComputeDeliverUpgradeScriptConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config:
-        list_join:
-        - ''
-        - - str_replace:
-              template: |
-                #!/bin/bash
-                upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
-              params:
-                UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
-          - get_file: major_upgrade_compute.sh
-
-  ComputeDeliverUpgradeScriptDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: compute_servers}
-      config: {get_resource: ComputeDeliverUpgradeScriptConfig}
-      input_values: {get_param: input_values}
-
-
-  ObjectStoreDeliverUpgradeScriptConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: {get_file: major_upgrade_object_storage.sh}
-
-  ObjectStoreDeliverUpgradeScriptDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: objectstorage_servers}
-      config: {get_resource: ObjectStoreDeliverUpgradeScriptConfig}
-      input_values: {get_param: input_values}
-
index a17d36e..0feeca9 100644 (file)
@@ -1083,6 +1083,7 @@ resources:
           NovaComputeLibvirtType: {get_param: NovaComputeLibvirtType}
           NovaComputeLibvirtVifDriver: {get_param: NovaComputeLibvirtVifDriver}
           NovaEnableRbdBackend: {get_param: NovaEnableRbdBackend}
+          NovaIPv6: {get_param: NovaIPv6}
           NovaPublicIP: {get_attr: [VipMap, net_ip_map, external]}
           NovaPassword: {get_param: NovaPassword}
           NovaOVSBridge: {get_param: NovaOVSBridge}
@@ -1107,6 +1108,7 @@ resources:
           CloudDomain: {get_param: CloudDomain}
           ServerMetadata: {get_param: ServerMetadata}
           SchedulerHints: {get_param: NovaComputeSchedulerHints}
+          NodeIndex: '%index%'
 
   BlockStorage:
     type: OS::Heat::ResourceGroup
@@ -1148,6 +1150,7 @@ resources:
           CloudDomain: {get_param: CloudDomain}
           ServerMetadata: {get_param: ServerMetadata}
           SchedulerHints: {get_param: BlockStorageSchedulerHints}
+          NodeIndex: '%index%'
 
   ObjectStorage:
     type: OS::Heat::ResourceGroup
@@ -1180,6 +1183,7 @@ resources:
           CloudDomain: {get_param: CloudDomain}
           ServerMetadata: {get_param: ServerMetadata}
           SchedulerHints: {get_param: ObjectStorageSchedulerHints}
+          NodeIndex: '%index%'
 
   CephStorage:
     type: OS::Heat::ResourceGroup
@@ -1207,6 +1211,7 @@ resources:
           CloudDomain: {get_param: CloudDomain}
           ServerMetadata: {get_param: ServerMetadata}
           SchedulerHints: {get_param: CephStorageSchedulerHints}
+          NodeIndex: '%index%'
 
   ControllerIpListMap:
     type: OS::TripleO::Network::Ports::NetIpListMap
index 96198c3..dc2f98e 100644 (file)
@@ -39,6 +39,9 @@ parameters:
   CephClientUserName:
     default: openstack
     type: string
+  CephIPv6:
+    default: False
+    type: boolean
 
 resources:
   CephClusterConfigImpl:
@@ -50,15 +53,25 @@ resources:
           datafiles:
             ceph_cluster:
               mapped_data:
+                ceph_ipv6: {get_param: CephIPv6}
                 ceph_storage_count: {get_param: ceph_storage_count}
                 ceph_mon_initial_members:
                   list_join:
                   - ','
                   - {get_param: ceph_mon_names}
-                ceph::profile::params::mon_host:
+                ceph_mon_host:
                   list_join:
                   - ','
                   - {get_param: ceph_mon_ips}
+                ceph_mon_host_v6:
+                  str_replace:
+                    template: "'[IPS_LIST]'"
+                    params:
+                      IPS_LIST:
+                        list_join:
+                        - '],['
+                        - {get_param: ceph_mon_ips}
+                ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
                 ceph::profile::params::fsid: {get_param: ceph_fsid}
                 ceph::profile::params::mon_key: {get_param: ceph_mon_key}
                 # We should use a separated key for the non-admin clients
index 88120b9..d298892 100644 (file)
@@ -62,6 +62,9 @@ parameters:
     description: |
       Role specific additional hiera configuration to inject into the cluster.
     type: json
+  CephStorageIPs:
+    default: {}
+    type: json
   NetworkDeploymentActions:
     type: comma_delimited_list
     description: >
@@ -90,6 +93,9 @@ parameters:
     type: json
     description: Optional scheduler hints to pass to nova
     default: {}
+  NodeIndex:
+    type: number
+    default: 0
 
 resources:
   CephStorage:
@@ -135,31 +141,43 @@ resources:
     type: OS::TripleO::CephStorage::Ports::ExternalPort
     properties:
       ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: CephStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   InternalApiPort:
     type: OS::TripleO::CephStorage::Ports::InternalApiPort
     properties:
       ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: CephStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   StoragePort:
     type: OS::TripleO::CephStorage::Ports::StoragePort
     properties:
       ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: CephStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   StorageMgmtPort:
     type: OS::TripleO::CephStorage::Ports::StorageMgmtPort
     properties:
       ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: CephStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   TenantPort:
     type: OS::TripleO::CephStorage::Ports::TenantPort
     properties:
       ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: CephStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   ManagementPort:
     type: OS::TripleO::CephStorage::Ports::ManagementPort
     properties:
       ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: CephStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   NetworkConfig:
     type: OS::TripleO::CephStorage::Net::SoftwareConfig
index 0c22b57..888f3cf 100644 (file)
@@ -38,6 +38,9 @@ parameters:
     description: |
       Role specific additional hiera configuration to inject into the cluster.
     type: json
+  BlockStorageIPs:
+    default: {}
+    type: json
   Flavor:
     description: Flavor for block storage nodes to request when deploying.
     type: string
@@ -141,6 +144,9 @@ parameters:
     type: json
     description: Optional scheduler hints to pass to nova
     default: {}
+  NodeIndex:
+    type: number
+    default: 0
 
 
 resources:
@@ -187,31 +193,43 @@ resources:
     type: OS::TripleO::BlockStorage::Ports::ExternalPort
     properties:
       ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: BlockStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   InternalApiPort:
     type: OS::TripleO::BlockStorage::Ports::InternalApiPort
     properties:
       ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: BlockStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   StoragePort:
     type: OS::TripleO::BlockStorage::Ports::StoragePort
     properties:
       ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: BlockStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   StorageMgmtPort:
     type: OS::TripleO::BlockStorage::Ports::StorageMgmtPort
     properties:
       ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: BlockStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   TenantPort:
     type: OS::TripleO::BlockStorage::Ports::TenantPort
     properties:
       ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: BlockStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   ManagementPort:
     type: OS::TripleO::BlockStorage::Ports::ManagementPort
     properties:
       ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: BlockStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   NetworkConfig:
     type: OS::TripleO::BlockStorage::Net::SoftwareConfig
index 58ca71e..ee5bced 100644 (file)
@@ -195,6 +195,9 @@ parameters:
     default: 'dvr_snat'
     description: Agent mode for the neutron-l3-agent on the controller hosts
     type: string
+  NodeIndex:
+    type: number
+    default: 0
   NovaApiHost:
     type: string
     default: ''  # Has to be here because of the ignored empty value bug
@@ -207,6 +210,9 @@ parameters:
       NovaCompute specific configuration to inject into the cluster. Same
       structure as ExtraConfig.
     type: json
+  NovaComputeIPs:
+    default: {}
+    type: json
   NovaComputeLibvirtType:
     type: string
     default: kvm
@@ -218,6 +224,10 @@ parameters:
     default: false
     description: Whether to enable or not the Rbd backend for Nova
     type: boolean
+  NovaIPv6:
+    default: false
+    description: Enable IPv6 features in Nova
+    type: boolean
   NovaPassword:
     description: The password for the nova service account, used by nova-api.
     type: string
@@ -378,31 +388,43 @@ resources:
     type: OS::TripleO::Compute::Ports::ExternalPort
     properties:
       ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+      IPPool: {get_param: NovaComputeIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   InternalApiPort:
     type: OS::TripleO::Compute::Ports::InternalApiPort
     properties:
       ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+      IPPool: {get_param: NovaComputeIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   StoragePort:
     type: OS::TripleO::Compute::Ports::StoragePort
     properties:
       ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+      IPPool: {get_param: NovaComputeIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   StorageMgmtPort:
     type: OS::TripleO::Compute::Ports::StorageMgmtPort
     properties:
       ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+      IPPool: {get_param: NovaComputeIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   TenantPort:
     type: OS::TripleO::Compute::Ports::TenantPort
     properties:
       ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+      IPPool: {get_param: NovaComputeIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   ManagementPort:
     type: OS::TripleO::Compute::Ports::ManagementPort
     properties:
       ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+      IPPool: {get_param: NovaComputeIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   NetIpMap:
     type: OS::TripleO::Network::Ports::NetIpMap
@@ -478,6 +500,7 @@ resources:
               raw_data: {get_file: hieradata/compute.yaml}
               mapped_data:
                 cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend}
+                nova::use_ipv6: {get_input: nova_ipv6}
                 nova::debug: {get_input: debug}
                 nova::rabbit_userid: {get_input: rabbit_username}
                 nova::rabbit_password: {get_input: rabbit_password}
@@ -567,10 +590,18 @@ resources:
         nova_api_host: {get_param: NovaApiHost}
         nova_password: {get_param: NovaPassword}
         nova_enable_rbd_backend: {get_param: NovaEnableRbdBackend}
+        nova_ipv6: {get_param: NovaIPv6}
         cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
         nova_vnc_proxyclient_address: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NovaVncProxyNetwork]}]}
         nova_vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
-        nova_vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+        # Remove brackets that may come if the IP address is IPv6.
+        # For DNS names and IPv4, this will just get the NovaVNCProxyPublic value
+        nova_vncproxy_host:
+          str_replace:
+            template: {get_param: [EndpointMap, NovaVNCProxyPublic, host]}
+            params:
+              '[': ''
+              ']': ''
         nova_vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
         nova_ovs_bridge: {get_param: NovaOVSBridge}
         nova_security_group_api: {get_param: NovaSecurityGroupAPI}
index cf7b403..9e9a764 100644 (file)
@@ -1295,7 +1295,7 @@ resources:
               mapped_data:
                 ceph::profile::params::cluster_network: {get_input: ceph_cluster_network}
                 ceph::profile::params::public_network: {get_input: ceph_public_network}
-                ceph::mon::public_addr: {get_input: ceph_public_ip}
+                ceph::profile::params::public_addr: {get_input: ceph_public_ip}
             database:
               raw_data: {get_file: hieradata/database.yaml}
             object:
index ebd6c25..312d49a 100644 (file)
@@ -41,6 +41,9 @@ parameters:
   CephClientUserName:
     default: openstack
     type: string
+  CephIPv6:
+    default: False
+    type: boolean
 
 resources:
   CephClusterConfigImpl:
@@ -54,7 +57,9 @@ resources:
               mapped_data:
                 ceph_storage_count: {get_param: ceph_storage_count}
                 enable_external_ceph: true
-                ceph::profile::params::mon_host: {get_param: ceph_external_mon_ips}
+                ceph_ipv6: {get_param: CephIPv6}
+                ceph_mon_host: {get_param: ceph_external_mon_ips}
+                ceph_mon_host_v6: {get_param: ceph_external_mon_ips}
                 ceph::profile::params::fsid: {get_param: ceph_fsid}
                 ceph::profile::params::client_keys:
                   str_replace:
@@ -72,6 +77,7 @@ resources:
                       NOVA_POOL: {get_param: NovaRbdPoolName}
                       CINDER_POOL: {get_param: CinderRbdPoolName}
                       GLANCE_POOL: {get_param: GlanceRbdPoolName}
+                ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
                 nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
                 cinder_rbd_pool_name: {get_param: CinderRbdPoolName}
                 glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
index 572eef9..865210c 100644 (file)
@@ -7,7 +7,6 @@ nova::compute::instance_usage_audit: true
 nova::compute::instance_usage_audit_period: 'hour'
 nova::compute::vnc_enabled: true
 
-nova::compute::libvirt::vncserver_listen: '0.0.0.0'
 nova::compute::libvirt::migration_support: true
 
 nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
index 0db5b45..fd7faff 100644 (file)
@@ -40,6 +40,14 @@ if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
   } -> Class['ceph::profile::osd']
 }
 
+if str2bool(hiera('ceph_ipv6', false)) {
+  $mon_host = hiera('ceph_mon_host_v6')
+} else {
+  $mon_host = hiera('ceph_mon_host')
+}
+class { '::ceph::profile::params':
+  mon_host            => $mon_host,
+}
 include ::ceph::conf
 include ::ceph::profile::client
 include ::ceph::profile::osd
index 99220ff..7c8cda7 100644 (file)
@@ -62,6 +62,14 @@ nova_config {
 $rbd_ephemeral_storage = hiera('nova::compute::rbd::ephemeral_storage', false)
 $rbd_persistent_storage = hiera('rbd_persistent_storage', false)
 if $rbd_ephemeral_storage or $rbd_persistent_storage {
+  if str2bool(hiera('ceph_ipv6', false)) {
+    $mon_host = hiera('ceph_mon_host_v6')
+  } else {
+    $mon_host = hiera('ceph_mon_host')
+  }
+  class { '::ceph::profile::params':
+    mon_host            => $mon_host,
+  }
   include ::ceph::conf
   include ::ceph::profile::client
 
@@ -83,7 +91,14 @@ if hiera('cinder_enable_nfs_backend', false) {
   package {'nfs-utils': } -> Service['nova-compute']
 }
 
-include ::nova::compute::libvirt
+if str2bool(hiera('nova::use_ipv6', false)) {
+  $vncserver_listen = '::0'
+} else {
+  $vncserver_listen = '0.0.0.0'
+}
+class { '::nova::compute::libvirt' :
+  vncserver_listen => $vncserver_listen,
+}
 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
   file {'/etc/libvirt/qemu.conf':
     ensure  => present,
index 87c339a..9e5c556 100644 (file)
@@ -48,14 +48,24 @@ if hiera('step') >= 2 {
     include ::mongodb::globals
 
     include ::mongodb::server
-    $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+    # NOTE(gfidente): We need to pass the list of IPv6 addresses *with* port and
+    # without the brackets as 'members' argument for the 'mongodb_replset'
+    # resource.
+    if str2bool(hiera('mongodb::server::ipv6', false)) {
+      $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+      $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+      $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+    } else {
+      $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+      $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+    }
     $mongo_node_string = join($mongo_node_ips_with_port, ',')
 
     $mongodb_replset = hiera('mongodb::server::replset')
     $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
     if downcase(hiera('bootstrap_nodeid')) == $::hostname {
       mongodb_replset { $mongodb_replset :
-        members => $mongo_node_ips_with_port,
+        members => $mongo_node_ips_with_port_nobr,
       }
     }
   }
@@ -151,8 +161,15 @@ if hiera('step') >= 2 {
   $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
 
   if $enable_ceph {
+    $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+    if str2bool(hiera('ceph_ipv6', false)) {
+      $mon_host = hiera('ceph_mon_host_v6')
+    } else {
+      $mon_host = hiera('ceph_mon_host')
+    }
     class { '::ceph::profile::params':
-      mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+      mon_initial_members => $mon_initial_members,
+      mon_host            => $mon_host,
     }
     include ::ceph::conf
     include ::ceph::profile::mon
@@ -178,6 +195,14 @@ if hiera('step') >= 2 {
   }
 
   if str2bool(hiera('enable_external_ceph', false)) {
+    if str2bool(hiera('ceph_ipv6', false)) {
+      $mon_host = hiera('ceph_mon_host_v6')
+    } else {
+      $mon_host = hiera('ceph_mon_host')
+    }
+    class { '::ceph::profile::params':
+      mon_host            => $mon_host,
+    }
     include ::ceph::conf
     include ::ceph::profile::client
   }
index bbba99e..402a3bc 100644 (file)
@@ -201,8 +201,19 @@ if hiera('step') >= 1 {
 if hiera('step') >= 2 {
 
   # NOTE(gfidente): the following vars are needed on all nodes so they
-  # need to stay out of pacemaker_master conditional
-  $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+  # need to stay out of pacemaker_master conditional.
+  # The addresses mangling will hopefully go away when we'll be able to
+  # configure the connection string via hostnames, until then, we need to pass
+  # the list of IPv6 addresses *with* port and without the brackets as 'members'
+  # argument for the 'mongodb_replset' resource.
+  if str2bool(hiera('mongodb::server::ipv6', false)) {
+    $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
+    $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
+    $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+  } else {
+    $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+    $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
+  }
   $mongodb_replset = hiera('mongodb::server::replset')
 
   if $pacemaker_master {
@@ -431,7 +442,7 @@ if hiera('step') >= 2 {
         before  => Mongodb_replset[$mongodb_replset],
       }
       mongodb_replset { $mongodb_replset :
-        members => $mongo_node_ips_with_port,
+        members => $mongo_node_ips_with_port_nobr,
       }
     }
 
@@ -526,8 +537,15 @@ MYSQL_HOST=localhost\n",
   $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
 
   if $enable_ceph {
+    $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
+    if str2bool(hiera('ceph_ipv6', false)) {
+      $mon_host = hiera('ceph_mon_host_v6')
+    } else {
+      $mon_host = hiera('ceph_mon_host')
+    }
     class { '::ceph::profile::params':
-      mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
+      mon_initial_members => $mon_initial_members,
+      mon_host            => $mon_host,
     }
     include ::ceph::conf
     include ::ceph::profile::mon
@@ -553,6 +571,14 @@ MYSQL_HOST=localhost\n",
   }
 
   if str2bool(hiera('enable_external_ceph', false)) {
+    if str2bool(hiera('ceph_ipv6', false)) {
+      $mon_host = hiera('ceph_mon_host_v6')
+    } else {
+      $mon_host = hiera('ceph_mon_host')
+    }
+    class { '::ceph::profile::params':
+      mon_host            => $mon_host,
+    }
     include ::ceph::conf
     include ::ceph::profile::client
   }
index 3b04be8..c26aca7 100644 (file)
@@ -83,6 +83,9 @@ parameters:
     description: |
       Role specific additional hiera configuration to inject into the cluster.
     type: json
+  SwiftStorageIPs:
+    default: {}
+    type: json
   NetworkDeploymentActions:
     type: comma_delimited_list
     description: >
@@ -111,6 +114,9 @@ parameters:
     type: json
     description: Optional scheduler hints to pass to nova
     default: {}
+  NodeIndex:
+    type: number
+    default: 0
 
 resources:
 
@@ -156,31 +162,43 @@ resources:
     type: OS::TripleO::SwiftStorage::Ports::ExternalPort
     properties:
       ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: SwiftStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   InternalApiPort:
     type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
     properties:
       ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: SwiftStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   StoragePort:
     type: OS::TripleO::SwiftStorage::Ports::StoragePort
     properties:
       ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: SwiftStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   StorageMgmtPort:
     type: OS::TripleO::SwiftStorage::Ports::StorageMgmtPort
     properties:
       ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: SwiftStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   TenantPort:
     type: OS::TripleO::SwiftStorage::Ports::TenantPort
     properties:
       ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: SwiftStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   ManagementPort:
     type: OS::TripleO::SwiftStorage::Ports::ManagementPort
     properties:
       ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+      IPPool: {get_param: SwiftStorageIPs}
+      NodeIndex: {get_param: NodeIndex}
 
   NetworkConfig:
     type: OS::TripleO::ObjectStorage::Net::SoftwareConfig