Merge "Add update yaml backward compatibe with PublicVirtualIP on ctlplane"
authorJenkins <jenkins@review.openstack.org>
Thu, 21 Jan 2016 19:41:04 +0000 (19:41 +0000)
committerGerrit Code Review <review@openstack.org>
Thu, 21 Jan 2016 19:41:04 +0000 (19:41 +0000)
108 files changed:
capabilities_map.yaml [new file with mode: 0644]
docker/README-containers.md
docker/compute-post.yaml
docker/firstboot/install_docker_agents.yaml
docker/generate_json_config.sh [deleted file]
environments/cinder-dellsc-config.yaml [new file with mode: 0644]
environments/cinder-eqlx-config.yaml [new file with mode: 0644]
environments/docker-network-isolation.yaml [new file with mode: 0644]
environments/docker-network.yaml [new file with mode: 0644]
environments/docker.yaml [moved from environments/docker-rdo.yaml with 93% similarity]
environments/enable-tls.yaml
environments/external-loadbalancer-vip.yaml
environments/ips-from-pool.yaml [new file with mode: 0644]
environments/network-isolation.yaml
environments/network-management.yaml [new file with mode: 0644]
environments/neutron-midonet.yaml [new file with mode: 0644]
environments/puppet-pacemaker.yaml
environments/storage-environment.yaml
extraconfig/all_nodes/mac_hostname.yaml
extraconfig/all_nodes/random_string.yaml
extraconfig/post_deploy/example.yaml
extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
extraconfig/tasks/noop.yaml [new file with mode: 0644]
extraconfig/tasks/pacemaker_resource_restart.sh [new file with mode: 0755]
extraconfig/tasks/post_puppet_pacemaker.yaml [new file with mode: 0644]
extraconfig/tasks/pre_puppet_pacemaker.yaml [new file with mode: 0644]
extraconfig/tasks/yum_update.sh
net-config-bond.yaml
net-config-bridge.yaml
net-config-linux-bridge.yaml
net-config-noop.yaml
network/config/bond-with-vlans/README.md
network/config/bond-with-vlans/ceph-storage.yaml
network/config/bond-with-vlans/cinder-storage.yaml
network/config/bond-with-vlans/compute.yaml
network/config/bond-with-vlans/controller-no-external.yaml
network/config/bond-with-vlans/controller.yaml
network/config/bond-with-vlans/swift-storage.yaml
network/config/multiple-nics/README.md
network/config/multiple-nics/ceph-storage.yaml
network/config/multiple-nics/cinder-storage.yaml
network/config/multiple-nics/compute.yaml
network/config/multiple-nics/controller.yaml
network/config/multiple-nics/swift-storage.yaml
network/config/single-nic-vlans/README.md
network/config/single-nic-vlans/ceph-storage.yaml
network/config/single-nic-vlans/cinder-storage.yaml
network/config/single-nic-vlans/compute.yaml
network/config/single-nic-vlans/controller-no-external.yaml
network/config/single-nic-vlans/controller.yaml
network/config/single-nic-vlans/swift-storage.yaml
network/endpoints/endpoint_map.yaml
network/management.yaml [new file with mode: 0644]
network/networks.yaml
network/ports/ctlplane_vip.yaml
network/ports/external.yaml
network/ports/external_from_pool.yaml [new file with mode: 0644]
network/ports/from_service.yaml [new file with mode: 0644]
network/ports/internal_api.yaml
network/ports/internal_api_from_pool.yaml [new file with mode: 0644]
network/ports/management.yaml [new file with mode: 0644]
network/ports/net_ip_list_map.yaml
network/ports/net_ip_map.yaml
network/ports/net_ip_subnet_map.yaml
network/ports/net_vip_map_external.yaml
network/ports/noop.yaml
network/ports/storage.yaml
network/ports/storage_from_pool.yaml [new file with mode: 0644]
network/ports/storage_mgmt.yaml
network/ports/storage_mgmt_from_pool.yaml [new file with mode: 0644]
network/ports/tenant.yaml
network/ports/tenant_from_pool.yaml [new file with mode: 0644]
network/ports/vip.yaml
overcloud-resource-registry-puppet.yaml
overcloud.yaml
puppet/all-nodes-config.yaml
puppet/ceph-storage-post.yaml
puppet/ceph-storage.yaml
puppet/cinder-storage-post.yaml
puppet/cinder-storage.yaml
puppet/compute-post.yaml
puppet/compute.yaml
puppet/controller-post.yaml
puppet/controller.yaml
puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml [new file with mode: 0644]
puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml [new file with mode: 0644]
puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml [new file with mode: 0644]
puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
puppet/extraconfig/pre_deploy/controller/neutron-nuage.yaml
puppet/extraconfig/pre_deploy/per_node.yaml
puppet/extraconfig/tls/ca-inject.yaml
puppet/extraconfig/tls/tls-cert-inject.yaml
puppet/hieradata/controller.yaml
puppet/hieradata/database.yaml
puppet/manifests/overcloud_cephstorage.pp
puppet/manifests/overcloud_compute.pp
puppet/manifests/overcloud_controller.pp
puppet/manifests/overcloud_controller_pacemaker.pp
puppet/manifests/overcloud_object.pp
puppet/manifests/overcloud_volume.pp
puppet/swift-storage-post.yaml
puppet/swift-storage.yaml
puppet/vip-config.yaml
tox.ini

diff --git a/capabilities_map.yaml b/capabilities_map.yaml
new file mode 100644 (file)
index 0000000..30ee211
--- /dev/null
@@ -0,0 +1,226 @@
+# This file holds metadata about the capabilities of the tripleo-heat-templates
+# repository for deployment using puppet. It groups configuration by topic,
+# describes possible combinations of environments and resource capabilities.
+
+# root_template: identifies repository's root template
+# root_environment: identifies root_environment, this one is special in terms of
+#   order in which the environments are merged before deploying. This one serves as
+#   a base and it's parameters/resource_registry gets overriden by other environments
+#   if used.
+
+# topics:
+# High Level grouping by purpose of environments
+# Attributes:
+#  title: (required)
+#  description: (optional)
+#  environment_groups: (required)
+
+# environment_groups:
+# Identifies an environment choice. If group includes multiple environments it
+# indicates that environments in group are mutually exclusive.
+# Attributes:
+#  title: (optional)
+#  description: (optional)
+#  tags: a list of tags to provide aditional information for e.g. filtering (optional)
+#  environments: (required)
+
+# environments:
+# List of environments in environment group
+# Attributes:
+#  file: a file name including path within repository (required)
+#  title: (required)
+#  description: (optional)
+#  requires: an array of environments which are required by this environment (optional)
+#  resource_registry: [tbd] (optional)
+
+# resource_registry:
+# [tbd] Each environment can provide options on resource_registry level applicable
+# only when that given environment is used. (resource_type of that environment can
+# be implemented using multiple templates).
+
+root_template: overcloud.yaml
+root_environment: overcloud-resource-registry-puppet.yaml
+topics:
+  - title: Basic Configuration
+    description:
+    environment_groups:
+      - title:
+        description: Enable basic configuration required for OpenStack Deployment
+        environments:
+          - file: overcloud-resource-registry-puppet.yaml
+            title: Default Configuration
+            description:
+
+  - title: Deployment options
+    description:
+    environment_groups:
+      - title: High Availability
+        description: Enables configuration of an Overcloud controller with Pacemaker
+        environments:
+          - file: environments/puppet-pacemaker.yaml
+            title: Pacemaker
+            description: Enable configuration of an Overcloud controller with Pacemaker
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Docker RDO
+        description: >
+          Docker container with heat agents for containerized compute node
+        environments:
+          - file: environments/docker-rdo.yaml
+            title: Docker RDO
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+
+  # - title: Network Interface Configuration
+  #   description:
+  #   environment_groups:
+
+  - title: Overlay network Configuration
+    description:
+    environment_groups:
+      - title: Network Isolation
+        description: >
+          Enable the creation of Neutron networks for
+          isolated Overcloud traffic and configure each role to assign ports
+          (related to that role) on these networks.
+        environments:
+          - file: environments/network-isolation.yaml
+            title: Network Isolation
+            description: Enable Network Isolation
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Single nic or Bonding
+        description: >
+          Configure roles to use pair of bonded nics or to use Vlans on a
+          single nic. This option assumes use of Network Isolation.
+        environments:
+          - file: environments/net-bond-with-vlans.yaml
+            title: Bond with Vlans
+            description: >
+              Configure each role to use a pair of bonded nics (nic2 and
+              nic3) and configures an IP address on each relevant isolated network
+              for each role. This option assumes use of Network Isolation.
+            requires:
+              - environments/network-isolation.yaml
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/net-single-nic-with-vlans.yaml
+            title: Single nic with Vlans
+            description: >
+              Configure each role to use Vlans on a single nic for
+              each isolated network. This option assumes use of Network Isolation.
+            requires:
+              - environments/network-isolation.yaml
+              - overcloud-resource-registry-puppet.yaml
+
+  - title: Neutron Plugin Configuration
+    description:
+    environment_groups:
+      - title: BigSwitch extensions or Cisco N1KV backend
+        description:
+        environments:
+          - file: environments/neutron-ml2-bigswitch.yaml
+            title: BigSwitch extensions
+            description: >
+              Enable Big Switch extensions, configured via puppet
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-ml2-cisco-n1kv.yaml
+            title: Cisco N1KV backend
+            description: >
+              Enable a Cisco N1KV backend, configured via puppet
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Cisco Neutron plugin
+        description: >
+          Enable a Cisco Neutron plugin
+        environments:
+          - file: environments/neutron-ml2-cisco-nexus-ucsm.yaml
+            title: Cisco Neutron plugin
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+
+  - title: Storage
+    description:
+    environment_groups:
+      - title: Cinder NetApp backend
+        description: >
+          Enable a Cinder NetApp backend, configured via puppet
+        environments:
+          - file: environments/cinder-netapp-config.yaml
+            title: Cinder NetApp backend
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Externally managed Ceph
+        description: >
+          Enable the use of an externally managed Ceph cluster
+        environments:
+          - file: environments/puppet-ceph-external.yaml
+            title: Externally managed Ceph
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Ceph Devel
+        description: >
+          Enable a Ceph storage cluster using the controller and 2 ceph nodes.
+          Rbd backends are enabled for Cinder, Glance, and Nova.
+        environments:
+          - file: environments/puppet-ceph-devel.yaml
+            title: Ceph Devel
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Storage Environment
+        description: >
+          Can be used to set up storage backends. Defaults to Ceph used as a
+          backend for Cinder, Glance and Nova ephemeral storage. It configures
+          for example which services will use Ceph, or if any of the services
+          will use NFS. And more. Usually requires to be edited by user first.
+        tags:
+          - no-gui
+        environments:
+          - file: environments/storage-environment.yaml
+            title: Storage Environment
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+
+  - title: Utilities
+    description:
+    environment_groups:
+      - title: Config Debug
+        description: Enable config management (e.g. Puppet) debugging
+        environments:
+          - file: environments/config-debug.yaml
+            title: Config Debug
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Disable journal in MongoDb
+        description: >
+          Since, when journaling is enabled, MongoDb will create big journal
+          file it can take time. In a CI environment for example journaling is
+          not necessary.
+        environments:
+          - file: environments/mongodb-nojournal.yaml
+            title: Disable journal in MongoDb
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Overcloud Steps
+        description: >
+          Specifies hooks/breakpoints where overcloud deployment should stop
+          Allows operator validation between steps, and/or more granular control.
+          Note: the wildcards relate to naming convention for some resource suffixes,
+          e.g see puppet/*-post.yaml, enabling this will mean we wait for
+          a user signal on every *Deployment_StepN resource defined in those files.
+        tags:
+          - no-gui
+        environments:
+          - file: environments/overcloud-steps.yaml
+            title: Overcloud Steps
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
index 17990b5..ff062a9 100644 (file)
@@ -22,7 +22,12 @@ https://github.com/openstack/tripleo-common/blob/master/scripts/tripleo.sh
 
 Create the Overcloud:
 ```
-$ openstack overcloud deploy --templates=tripleo-heat-templates -e tripleo-heat-templates/environments/docker-rdo.yaml --libvirt-type=qemu
+$ openstack overcloud deploy --templates=tripleo-heat-templates -e tripleo-heat-templates/environments/docker.yaml -e tripleo-heat-templates/environments/docker-network.yaml --libvirt-type=qemu
+```
+
+Using Network Isolation in the Overcloud:
+```
+$ openstack overcloud deploy --templates=tripleo-heat-templates -e tripleo-heat-templates/environments/docker.yaml -e tripleo-heat-templates/environments/docker-network-isolation.yaml --libvirt-type=qemu
 ```
 
 Source the overcloudrc and then you can use the overcloud.
index a6607fd..82572e7 100644 (file)
@@ -1,5 +1,4 @@
-heat_template_version: 2015-04-30
-
+heat_template_version: 2015-10-15
 description: >
   OpenStack compute node post deployment for Docker.
 
@@ -26,6 +25,26 @@ parameters:
     type: string
   DockerOpenvswitchDBImage:
     type: string
+  LibvirtConfig:
+    type: string
+    default: "/etc/libvirt/libvirtd.conf"
+  NovaConfig:
+    type: string
+    default: "/etc/nova/nova.conf"
+  NeutronOpenvswitchAgentConfig:
+    type: string
+    default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/ml2/ml2_conf.ini"
+  NeutronAgentConfig:
+    type: string
+    default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
+  NeutronAgentPluginVolume:
+    type: string
+    description: The neutron agent plugin to mount into the neutron-agents container
+    default: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro"
+  NeutronAgentOvsVolume:
+    type: string
+    description: The neutron agent ovs agents to mount into the neutron-agents container
+    default: " "
 
 resources:
 
@@ -49,6 +68,7 @@ resources:
   ComputePuppetDeployment:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: ComputePuppetDeployment
       servers:  {get_param: servers}
       config: {get_resource: ComputePuppetConfig}
       input_values:
@@ -67,6 +87,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     depends_on: ComputePuppetDeployment
     properties:
+      name: CopyEtcDeployment
       config: {get_resource: CopyEtcConfig}
       servers:  {get_param: servers}
 
@@ -74,21 +95,89 @@ resources:
     type: OS::Heat::SoftwareConfig
     properties:
       group: script
-      outputs:
-      - name: result
-      config: {get_file: ./generate_json_config.sh}
+      inputs:
+      - name: libvirt_config
+      - name: nova_config
+      - name: neutron_openvswitch_agent_config
+      - name: neutron_agent_config
+      config: |
+        #!/bin/python
+        import json
+        import os
+
+        data = {}
+        file_perms = '600'
+        libvirt_perms = '644'
+
+        libvirt_config = os.getenv('libvirt_config').split(',')
+        nova_config = os.getenv('nova_config').split(',')
+        neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',')
+        neutron_agent_config = os.getenv('neutron_agent_config').split(',')
+
+        # Command, Config_files, Owner, Perms
+        services = {'nova-libvirt': ['/usr/sbin/libvirtd', libvirt_config, 'root', libvirt_perms],
+                    'nova-compute': ['/usr/bin/nova-compute', nova_config, 'nova', file_perms],
+                    'neutron-openvswitch-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_openvswitch_agent_config, 'neutron', file_perms],
+                    'neutron-agent': ['/usr/bin/neutron-openvswitch-agent', neutron_agent_config, 'neutron', file_perms],
+                    'ovs-vswitchd': ['/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log'],
+                    'ovsdb-server': ['/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --log-file=/var/log/openvswitch/ovsdb-server.log']
+                   }
+
+
+        def build_config_files(config, owner, perms):
+            config_source = '/var/lib/kolla/config_files/'
+            config_files_dict = {}
+            source = os.path.basename(config)
+            dest = config
+            config_files_dict.update({'source': config_source + source,
+                                      'dest': dest,
+                                      'owner': owner,
+                                      'perm': perms})
+            return config_files_dict
+
+
+        for service in services:
+            if service != 'ovs-vswitchd' and service != 'ovsdb-server':
+                command = services.get(service)[0]
+                config_files = services.get(service)[1]
+                owner = services.get(service)[2]
+                perms = services.get(service)[3]
+                config_files_list = []
+                for config_file in config_files:
+                    if service == 'nova-libvirt':
+                        command = command + ' --config ' + config_file
+                    else:
+                        command = command + ' --config-file ' + config_file
+                    data['command'] = command
+                    config_files_dict = build_config_files(config_file, owner, perms)
+                    config_files_list.append(config_files_dict)
+                data['config_files'] = config_files_list
+            else:
+                data['command'] = services.get(service)[0]
+                data['config_files'] = []
+
+            json_config_dir = '/var/lib/etc-data/json-config/'
+            with open(json_config_dir + service + '.json', 'w') as json_file:
+                json.dump(data, json_file, sort_keys=True, indent=4, separators=(',', ': '))
 
   CopyJsonDeployment:
     type: OS::Heat::SoftwareDeployments
     depends_on: CopyEtcDeployment
     properties:
+      name: CopyJsonDeployment
       config: {get_resource: CopyJsonConfig}
       servers:  {get_param: servers}
+      input_values:
+        libvirt_config: {get_param: LibvirtConfig}
+        nova_config: {get_param: NovaConfig}
+        neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig}
+        neutron_agent_config: {get_param: NeutronAgentConfig}
 
   NovaComputeContainersDeploymentOVS:
     type: OS::Heat::StructuredDeployments
     depends_on: CopyJsonDeployment
     properties:
+      name: NovaComputeContainersDeploymentOVS
       config: {get_resource: NovaComputeContainersConfigOVS}
       servers: {get_param: servers}
 
@@ -118,12 +207,12 @@ resources:
             list_join:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchDBImage} ]
-          container_name: ovs-db-server
+          container_name: ovsdb-server
           net: host
           restart: always
           volumes:
            - /run:/run
-           - /var/lib/etc-data/json-config/ovs-dbserver.json:/var/lib/kolla/config_files/config.json
+           - /var/lib/etc-data/json-config/ovsdb-server.json:/var/lib/kolla/config_files/config.json
           environment:
            - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
 
@@ -131,6 +220,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     depends_on: NovaComputeContainersDeploymentOVS
     properties:
+      name: NovaComputeContainersDeploymentNetconfig
       config: {get_resource: NovaComputeContainersConfigNetconfig}
       servers: {get_param: servers}
 
@@ -151,6 +241,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: [CopyJsonDeployment, CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig]
     properties:
+      name: LibvirtContainersDeployment
       config: {get_resource: LibvirtContainersConfig}
       servers: {get_param: servers}
 
@@ -194,6 +285,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: [CopyJsonDeployment, CopyEtcDeployment, ComputePuppetDeployment, NovaComputeContainersDeploymentNetconfig, LibvirtContainersDeployment]
     properties:
+      name: NovaComputeContainersDeployment
       config: {get_resource: NovaComputeContainersConfig}
       servers: {get_param: servers}
 
@@ -234,11 +326,15 @@ resources:
           privileged: true
           restart: always
           volumes:
-           - /run:/run
-           - /lib/modules:/lib/modules:ro
-           - /var/lib/etc-data/json-config/neutron-agent.json:/var/lib/kolla/config_files/config.json
-           - /var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/ovs_neutron_plugin.ini:ro
-           - /var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro
+            str_split:
+              - ","
+              - list_join:
+                 - ","
+                 - [ "/run:/run", "/lib/modules:/lib/modules:ro",
+                     "/var/lib/etc-data/json-config/neutron-agent.json:/var/lib/kolla/config_files/config.json",
+                     "/var/lib/etc-data/neutron/neutron.conf:/var/lib/kolla/config_files/neutron.conf:ro",
+                     {get_param: NeutronAgentPluginVolume},
+                     {get_param: NeutronAgentOvsVolume} ]
           environment:
            - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
           volumes_from:
index 22a8ff9..348c175 100644 (file)
@@ -3,7 +3,7 @@ heat_template_version: 2014-10-16
 parameters:
   DockerAgentImage:
     type: string
-    default: dprince/heat-docker-agents-centos
+    default: tripleoupstream/heat-docker-agents
   DockerNamespace:
     type: string
     default: kollaglue
diff --git a/docker/generate_json_config.sh b/docker/generate_json_config.sh
deleted file mode 100644 (file)
index 5cf4922..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash
-
-KOLLA_DEST=/var/lib/kolla/config_files
-JSON_DEST=/var/lib/etc-data/json-config
-
-# For more config file generation, simply define a new SERVICE_DATA_
-# prefixed variable. The command string is quoted to include config-file
-# arguments. Note that the variable name following SERVICE_DATA_ will be
-# the filename the JSON config is written to.
-
-# [EXAMPLE]: SERVICE_DATA_<SERVICE_NAME>=(<command> <source> <dest> <owner> <perms>)
-
-SERVICE_DATA_NOVA_LIBVIRT=("/usr/sbin/libvirtd" libvirtd.conf /etc/libvirt/libvirtd.conf root 0644)
-SERVICE_DATA_NOVA_COMPUTE=("/usr/bin/nova-compute" nova.conf /etc/nova/nova.conf nova 0600)
-SERVICE_DATA_NEUTRON_OPENVSWITCH_AGENT=("/usr/bin/neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini" neutron.conf /etc/neutron/neutron.conf neutron 0600 ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini neutron 0600)
-SERVICE_DATA_NEUTRON_AGENT=("/usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini" neutron.conf /etc/neutron/neutron.conf neutron 0600 ovs_neutron_plugin.ini /etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini neutron 0600)
-SERVICE_DATA_OVS_VSWITCHD=("/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/openvswitch/ovs-vswitchd.log")
-SERVICE_DATA_OVS_DBSERVER=("/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --log-file=/var/log/openvswitch/ovsdb-server.log")
-
-function create_json_header() {
-    local command=$1
-
-    echo "\
-{
-    \"command\": \"${command[@]}\","
-
-}
-
-function create_config_file_header() {
-    echo "    \"config_files\": ["
-}
-
-function create_config_file_block() {
-    local source=$KOLLA_DEST/$1
-    local dest=$2
-    local owner=$3
-    local perm=$4
-
-    printf "\
-\t{
-\t    \"source\": \"$source\",
-\t    \"dest\": \"$dest\",
-\t    \"owner\": \"$owner\",
-\t    \"perm\": \"$perm\"
-\t}"
-}
-
-function add_trailing_comma() {
-    printf ", \n"
-}
-
-function create_config_file_trailer() {
-    echo -e "\n    ]"
-}
-
-function create_json_trailer() {
-    echo "}"
-}
-
-function create_json_data() {
-    local config_data=$1
-    shift
-
-    create_json_header "$config_data"
-    create_config_file_header
-    while [ "$1" ]; do
-        create_config_file_block "$@"
-        shift 4
-        if [ "$1" ]; then
-            add_trailing_comma
-        fi
-    done
-    create_config_file_trailer
-    create_json_trailer
-}
-
-function write_json_data() {
-
-    local name=$1[@]
-    local service_data=("${!name}")
-
-    local service_name=${1#SERVICE_DATA_} # chop SERVICE_DATA_ prefix
-    service_name=${service_name//_/-}     # switch underscore to dash
-    service_name=${service_name,,}        # change to lowercase
-
-    echo "Creating JSON file ${service_name}"
-    create_json_data "${service_data[@]}" > "$JSON_DEST/$service_name.json"
-}
-
-function process_configs() {
-    for service in ${!SERVICE_DATA_*}; do
-        write_json_data "${service}"
-    done
-}
-
-process_configs
diff --git a/environments/cinder-dellsc-config.yaml b/environments/cinder-dellsc-config.yaml
new file mode 100644 (file)
index 0000000..92e257d
--- /dev/null
@@ -0,0 +1,17 @@
+# A Heat environment file which can be used to enable a
+# a Cinder  Dell Storage Center ISCSI backend, configured via puppet
+resource_registry:
+  OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
+
+parameter_defaults:
+  CinderEnableDellScBackend: true
+  CinderDellScBackendName: 'tripleo_dellsc'
+  CinderDellScSanIp: ''
+  CinderDellScSanLogin: 'Admin'
+  CinderDellScSanPassword: ''
+  CinderDellScSsn: '64702'
+  CinderDellScIscsiIpAddress: ''
+  CinderDellScIscsiPort: '3260'
+  CinderDellScApiPort: '3033'
+  CinderDellScServerFolder: 'dellsc_server'
+  CinderDellScVolumeFolder: 'dellsc_volume'
diff --git a/environments/cinder-eqlx-config.yaml b/environments/cinder-eqlx-config.yaml
new file mode 100644 (file)
index 0000000..ca2c5e5
--- /dev/null
@@ -0,0 +1,17 @@
+# A Heat environment file which can be used to enable a
+# a Cinder  eqlx backen, configured via puppet
+resource_registry:
+  OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
+
+parameter_defaults:
+  CinderEnableEqlxBackend: true
+  CinderEqlxBackendName: 'tripleo_eqlx'
+  CinderEqlxSanIp: ''
+  CinderEqlxSanLogin: ''
+  CinderEqlxSanPassword: ''
+  CinderEqlxSanThinProvision: true
+  CinderEqlxGroupname: 'group-0'
+  CinderEqlxPool: 'default'
+  CinderEqlxChapLogin: ''
+  CinderEqlxChapPassword: ''
+  CinderEqlxUseChap: false
diff --git a/environments/docker-network-isolation.yaml b/environments/docker-network-isolation.yaml
new file mode 100644 (file)
index 0000000..257d03d
--- /dev/null
@@ -0,0 +1,4 @@
+parameter_defaults:
+  NeutronAgentConfig: "/etc/neutron/neutron.conf,/etc/neutron/plugins/openvswitch/openvswitch_agent.ini"
+  NeutronAgentPluginVolume: "/var/lib/etc-data/neutron/plugins/ml2/openvswitch_agent.ini:/var/lib/kolla/config_files/openvswitch_agent.ini:ro"
+  NeutronAgentOvsVolume: "/var/lib/etc-data/neutron/conf.d/neutron-openvswitch-agent:/etc/neutron/conf.d/neutron-openvswitch-agent:ro"
diff --git a/environments/docker-network.yaml b/environments/docker-network.yaml
new file mode 100644 (file)
index 0000000..f10ec38
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Compute::Net::SoftwareConfig: ../net-config-bridge.yaml
similarity index 93%
rename from environments/docker-rdo.yaml
rename to environments/docker.yaml
index 66824fe..6376b74 100644 (file)
@@ -2,7 +2,6 @@ resource_registry:
   # Docker container with heat agents for containerized compute node.
   OS::TripleO::ComputePostDeployment: ../docker/compute-post.yaml
   OS::TripleO::NodeUserData: ../docker/firstboot/install_docker_agents.yaml
-  OS::TripleO::Compute::Net::SoftwareConfig: ../net-config-bridge.yaml
 
 parameters:
   NovaImage: atomic-image
index 5c2506e..bc4d1be 100644 (file)
@@ -4,6 +4,38 @@ parameter_defaults:
   SSLIntermediateCertificate: ''
   SSLKey: |
     The contents of the private key go here
+  EndpointMap:
+    CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+    CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+    CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+    CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+    CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+    CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+    GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+    GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+    GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+    GlanceRegistryAdmin: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
+    GlanceRegistryInternal: {protocol: 'http', port: '9191', host: 'IP_ADDRESS'}
+    GlanceRegistryPublic: {protocol: 'https', port: '9191', host: 'IP_ADDRESS'} # Not set on the loadbalancer yet.
+    HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+    HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+    HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+    HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+    KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+    KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+    KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+    NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+    NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+    NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+    NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+    NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+    NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+    NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
+    NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
+    NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'}
+    SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
 
 resource_registry:
   OS::TripleO::NodeTLSData: ../puppet/extraconfig/tls/tls-cert-inject.yaml
index 47d5bd9..1cf5982 100644 (file)
@@ -1,14 +1,37 @@
 resource_registry:
   OS::TripleO::Network::Ports::NetVipMap: ../network/ports/net_vip_map_external.yaml
+  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/noop.yaml
+  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/noop.yaml
+  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/noop.yaml
+  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/noop.yaml
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/from_service.yaml
+  OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
+  OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+  OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+  OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+  OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
 
 parameter_defaults:
   # When using an external loadbalancer set the following in parameter_defaults
   # to control your VIPs (currently one per network)
   # NOTE: we will eventually move to one VIP per service
   #
-  # ControlNetworkVip:
-  # ExternalNetworkVip:
-  # InternalApiNetworkVip:
-  # StorageNetworkVip:
-  # StorageMgmtNetworkVip:
-  EnableLoadBalancer: false
\ No newline at end of file
+  ControlPlaneIP: 192.0.2.251
+  ExternalNetworkVip: 10.0.0.251
+  InternalApiNetworkVip: 172.16.2.251
+  StorageNetworkVip: 172.16.1.251
+  StorageMgmtNetworkVip: 172.16.3.251
+  ServiceVips:
+    redis: 172.16.2.252
+  ControllerIPs:
+    external:
+    - 10.0.0.253
+    internal_api:
+    - 172.16.2.253
+    storage:
+    - 172.16.1.253
+    storage_mgmt:
+    - 172.16.3.253
+    tenant:
+    - 172.16.0.253
+  EnableLoadBalancer: false
diff --git a/environments/ips-from-pool.yaml b/environments/ips-from-pool.yaml
new file mode 100644 (file)
index 0000000..8c27fe4
--- /dev/null
@@ -0,0 +1,20 @@
+resource_registry:
+  OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
+  OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
+  OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
+  OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt_from_pool.yaml
+  OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant_from_pool.yaml
+
+parameter_defaults:
+  ControllerIPs:
+    # Each controller will get an IP from the lists below, first controller, first IP
+    external:
+    - 10.0.0.251
+    internal_api:
+    - 172.16.2.251
+    storage:
+    - 172.16.1.251
+    storage_mgmt:
+    - 172.16.3.251
+    tenant:
+    - 172.16.0.251
index 937931d..87fc22f 100644 (file)
@@ -1,12 +1,23 @@
 # Enable the creation of Neutron networks for isolated Overcloud
 # traffic and configure each role to assign ports (related
 # to that role) on these networks.
+# Many networks are disabled by default because they are not used
+# in a typical configuration. Override via parameter_defaults.
 resource_registry:
   OS::TripleO::Network::External: ../network/external.yaml
   OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
   OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
   OS::TripleO::Network::Storage: ../network/storage.yaml
   OS::TripleO::Network::Tenant: ../network/tenant.yaml
+  # Management network is optional and disabled by default
+  OS::TripleO::Network::Management: ../network/noop.yaml
+
+  # Port assignments for the VIPs
+  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
 
   # Port assignments for the controller role
   OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
@@ -14,25 +25,39 @@ resource_registry:
   OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
   OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
   OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
+  OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/noop.yaml
 
   # Port assignments for the compute role
+  OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
   OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
   OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
+  OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
   OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
+  OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/noop.yaml
 
   # Port assignments for the ceph storage role
+  OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
+  OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
   OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
   OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+  OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
+  OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/noop.yaml
 
   # Port assignments for the swift storage role
+  OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
   OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
   OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
   OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+  OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
+  OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/noop.yaml
 
   # Port assignments for the block storage role
+  OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
   OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
   OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
   OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+  OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
+  OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/noop.yaml
 
   # Port assignments for service virtual IPs for the controller role
   OS::TripleO::Controller::Ports::RedisVipPort: ../network/ports/vip.yaml
diff --git a/environments/network-management.yaml b/environments/network-management.yaml
new file mode 100644 (file)
index 0000000..2f0cff8
--- /dev/null
@@ -0,0 +1,24 @@
+# Enable the creation of a system management network. This
+# creates a Neutron network for isolated Overcloud
+# system management traffic and configures each role to
+# assign a port (related to that role) on that network.
+# Note that the basic sample NIC configuration templates
+# do not include the management network, see the
+# single-nic-vlans-mgmt templates for an example.
+resource_registry:
+  OS::TripleO::Network::Management: ../network/management.yaml
+
+  # Port assignments for the controller role
+  OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
+
+  # Port assignments for the compute role
+  OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
+
+  # Port assignments for the ceph storage role
+  OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+  # Port assignments for the swift storage role
+  OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+  # Port assignments for the block storage role
+  OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/neutron-midonet.yaml b/environments/neutron-midonet.yaml
new file mode 100644 (file)
index 0000000..726852a
--- /dev/null
@@ -0,0 +1,20 @@
+# A Heat environment that can be used to deploy MidoNet Services
+resource_registry:
+  OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+  OS::TripleO::Controller::Net::SoftwareConfig: ../net-config-linux-bridge.yaml # We have to avoid any ovs bridge. MidoNet is incompatible with its datapath
+
+parameter_defaults:
+  EnableZookeeperOnController: true
+  EnableCassandraOnController: true
+  NeutronCorePlugin: 'midonet.neutron.plugin_v1.MidonetPluginV2' # Overriding default core_plugin in Neutron. Don't touch it
+  NeutronEnableIsolatedMetadata: true  # MidoNet 1.9 needs this one to work. Don't change it
+  NeutronEnableL3Agent: false
+  NeutronEnableOVSAgent: false
+
+  # Other available options for MidoNet Services
+  # TunnelZoneName: 'tunnelname'
+  # TunnelZoneType: 'gre'
+  # CassandraStoragePort: 7000
+  # CassandraSslStoragePort: 7009
+  # CassandraClientPort: 9042
+  # CassandraClientPortThrift: 9160
index f235cf8..8986e35 100644 (file)
@@ -2,3 +2,5 @@
 # Overcloud controller with Pacemaker.
 resource_registry:
   OS::TripleO::ControllerConfig: ../puppet/controller-config-pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
index 5ccfa58..bd320bd 100644 (file)
@@ -43,9 +43,10 @@ parameter_defaults:
   ## OSDs are deployed on dedicated ceph-storage nodes only.
   # ControllerEnableCephStorage: false
 
-  ## When deploying Ceph through the oscplugin CLI, the following
+  ## When deploying Ceph Nodes through the oscplugin CLI, the following
   ## parameters are set automatically by the CLI. When deploying via
-  ## heat stack-create, they need to be provided manually.
+  ## heat stack-create or ceph on the controller nodes only,
+  ## they need to be provided manually.
 
   ## Number of Ceph storage nodes to deploy
   # CephStorageCount: 0
index 739cbf0..5883e06 100644 (file)
@@ -40,6 +40,7 @@ resources:
   CollectMacDeploymentsController:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsController
       servers:  {get_param: controller_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -47,6 +48,7 @@ resources:
   CollectMacDeploymentsCompute:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsCompute
       servers:  {get_param: compute_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -54,6 +56,7 @@ resources:
   CollectMacDeploymentsBlockStorage:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsBlockStorage
       servers:  {get_param: blockstorage_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -61,6 +64,7 @@ resources:
   CollectMacDeploymentsObjectStorage:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsObjectStorage
       servers:  {get_param: objectstorage_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -68,6 +72,7 @@ resources:
   CollectMacDeploymentsCephStorage:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsCephStorage
       servers:  {get_param: cephstorage_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -95,6 +100,7 @@ resources:
   DistributeMacDeploymentsController:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: DistributeMacDeploymentsController
       servers:  {get_param: controller_servers}
       config: {get_resource: DistributeMacConfig}
       input_values:
index b4b3027..49d2d8b 100644 (file)
@@ -41,6 +41,7 @@ resources:
   RandomDeploymentsController:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: RandomDeploymentsController
       servers:  {get_param: controller_servers}
       config: {get_resource: RandomConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -50,6 +51,7 @@ resources:
   RandomDeploymentsCompute:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: RandomDeploymentsCompute
       servers:  {get_param: compute_servers}
       config: {get_resource: RandomConfig}
       actions: ['CREATE'] # Only do this on CREATE
index 1d3dca2..f83dff7 100644 (file)
@@ -22,6 +22,7 @@ resources:
   ExtraDeployments:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: ExtraDeployments
       servers:  {get_param: servers}
       config: {get_resource: ExtraConfig}
       actions: ['CREATE'] # Only do this on CREATE
index d516091..a884bda 100644 (file)
@@ -73,6 +73,7 @@ resources:
   RHELRegistrationDeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: RHELRegistrationDeployment
       server:  {get_param: server}
       config: {get_resource: RHELRegistration}
       actions: ['CREATE'] # Only do this on CREATE
@@ -106,6 +107,7 @@ resources:
   RHELUnregistrationDeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: RHELUnregistrationDeployment
       server:  {get_param: server}
       config: {get_resource: RHELUnregistration}
       actions: ['DELETE'] # Only do this on DELETE
diff --git a/extraconfig/tasks/noop.yaml b/extraconfig/tasks/noop.yaml
new file mode 100644 (file)
index 0000000..0cff746
--- /dev/null
@@ -0,0 +1,10 @@
+heat_template_version: 2014-10-16
+description: 'No-op task'
+
+parameters:
+  servers:
+    type: json
+  input_values:
+    type: json
+    default: {}
+    description: input values for the software deployments
diff --git a/extraconfig/tasks/pacemaker_resource_restart.sh b/extraconfig/tasks/pacemaker_resource_restart.sh
new file mode 100755 (executable)
index 0000000..1220109
--- /dev/null
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+set -eux
+
+pacemaker_status=$(systemctl is-active pacemaker)
+check_interval=3
+
+function check_resource {
+
+  service=$1
+  state=$2
+  timeout=$3
+  tstart=$(date +%s)
+  tend=$(( $tstart + $timeout ))
+
+  if [ "$state" = "stopped" ]; then
+      match_for_incomplete='Started'
+  else # started
+      match_for_incomplete='Stopped'
+  fi
+
+  while (( $(date +%s) < $tend )); do
+      node_states=$(pcs status --full | grep "$service" | grep -v Clone)
+      if echo "$node_states" | grep -q "$match_for_incomplete"; then
+          echo "$service not yet $state, sleeping $check_interval seconds."
+          sleep $check_interval
+      else
+        echo "$service has $state"
+        timeout -k 10 $timeout crm_resource --wait
+        return
+      fi
+  done
+
+  echo "$service never $state after $timeout seconds" | tee /dev/fd/2
+  exit 1
+
+}
+
+# Run if pacemaker is running, we're the bootstrap node,
+# and we're updating the deployment (not creating).
+if [ "$pacemaker_status" = "active" -a \
+     "$(hiera bootstrap_nodeid)" = "$(facter hostname)" -a \
+     "$(hiera update_identifier)" != "nil" ]; then
+
+    #ensure neutron constraints like
+    #https://review.openstack.org/#/c/245093/
+    if  pcs constraint order show  | grep "start neutron-server-clone then start neutron-ovs-cleanup-clone"; then
+        pcs constraint remove order-neutron-server-clone-neutron-ovs-cleanup-clone-mandatory
+    fi
+
+    pcs resource disable httpd
+    check_resource httpd stopped 300
+    pcs resource disable openstack-keystone
+    check_resource openstack-keystone stopped 1800
+
+    if pcs status | grep haproxy-clone; then
+        pcs resource restart haproxy-clone
+    fi
+    pcs resource restart redis-master
+    pcs resource restart mongod-clone
+    pcs resource restart rabbitmq-clone
+    pcs resource restart memcached-clone
+    pcs resource restart galera-master
+
+    pcs resource enable openstack-keystone
+    check_resource openstack-keystone started 1800
+    pcs resource enable httpd
+    check_resource httpd started 800
+
+fi
diff --git a/extraconfig/tasks/post_puppet_pacemaker.yaml b/extraconfig/tasks/post_puppet_pacemaker.yaml
new file mode 100644 (file)
index 0000000..7de41d9
--- /dev/null
@@ -0,0 +1,44 @@
+heat_template_version: 2014-10-16
+description: 'Post-Puppet Config for Pacemaker deployments'
+
+parameters:
+  servers:
+    type: json
+  input_values:
+     type: json
+     description: input values for the software deployments
+
+resources:
+
+  ControllerPostPuppetMaintenanceModeConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: |
+        #!/bin/bash
+        pacemaker_status=$(systemctl is-active pacemaker)
+
+        if [ "$pacemaker_status" = "active" ]; then
+            pcs property set maintenance-mode=false
+        fi
+
+  ControllerPostPuppetMaintenanceModeDeployment:
+    type: OS::Heat::SoftwareDeployments
+    properties:
+      servers:  {get_param: servers}
+      config: {get_resource: ControllerPostPuppetMaintenanceModeConfig}
+      input_values: {get_param: input_values}
+
+  ControllerPostPuppetRestartConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: {get_file: pacemaker_resource_restart.sh}
+
+  ControllerPostPuppetRestartDeployment:
+    type: OS::Heat::SoftwareDeployments
+    depends_on: ControllerPostPuppetMaintenanceModeDeployment
+    properties:
+      servers:  {get_param: servers}
+      config: {get_resource: ControllerPostPuppetRestartConfig}
+      input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/pre_puppet_pacemaker.yaml b/extraconfig/tasks/pre_puppet_pacemaker.yaml
new file mode 100644 (file)
index 0000000..2cfe92a
--- /dev/null
@@ -0,0 +1,30 @@
+heat_template_version: 2014-10-16
+description: 'Pre-Puppet Config for Pacemaker deployments'
+
+parameters:
+  servers:
+    type: json
+  input_values:
+     type: json
+     description: input values for the software deployments
+
+resources:
+
+  ControllerPrePuppetMaintenanceModeConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: |
+        #!/bin/bash
+        pacemaker_status=$(systemctl is-active pacemaker)
+
+        if [ "$pacemaker_status" = "active" ]; then
+            pcs property set maintenance-mode=true
+        fi
+
+  ControllerPrePuppetMaintenanceModeDeployment:
+    type: OS::Heat::SoftwareDeployments
+    properties:
+      servers:  {get_param: servers}
+      config: {get_resource: ControllerPrePuppetMaintenanceModeConfig}
+      input_values: {get_param: input_values}
index e32369e..3917902 100755 (executable)
@@ -24,6 +24,7 @@ update_identifier=${update_identifier//[^a-zA-Z0-9-_]/}
 # seconds to wait for this node to rejoin the cluster after update
 cluster_start_timeout=600
 galera_sync_timeout=360
+cluster_settle_timeout=1800
 
 timestamp_file="$timestamp_dir/$update_identifier"
 if [[ -a "$timestamp_file" ]]; then
@@ -122,13 +123,16 @@ openstack-nova-scheduler"
 
     echo "Setting resource start/stop timeouts"
     for service in $SERVICES; do
-        pcs -f $pacemaker_dumpfile resource update $service op start timeout=100s op stop timeout=100s
+        pcs -f $pacemaker_dumpfile resource update $service op start timeout=200s op stop timeout=200s
     done
     # mongod start timeout is higher, setting only stop timeout
-    pcs -f $pacemaker_dumpfile resource update mongod op stop timeout=100s
+    pcs -f $pacemaker_dumpfile resource update mongod op start timeout=370s op  stop timeout=200s
 
     echo "Applying new Pacemaker config"
-    pcs cluster cib-push $pacemaker_dumpfile
+    if ! pcs cluster cib-push $pacemaker_dumpfile; then
+        echo "ERROR failed to apply new pacemaker config"
+        exit 1
+    fi
 
     echo "Pacemaker running, stopping cluster node and doing full package update"
     node_count=$(pcs status xml | grep -o "<nodes_configured.*/>" | grep -o 'number="[0-9]*"' | grep -o "[0-9]*")
@@ -146,11 +150,11 @@ openstack-nova-scheduler"
     kill $(ps ax | grep -e "keepalived.*\.pid-vrrp" | awk '{print $1}') 2>/dev/null || :
     kill $(ps ax | grep -e "radvd.*\.pid\.radvd" | awk '{print $1}') 2>/dev/null || :
 else
-    echo "Excluding upgrading packages that are handled by config management tooling"
-    command_arguments="$command_arguments --skip-broken"
-    for exclude in $(cat /var/lib/tripleo/installed-packages/* | sort -u); do
-        command_arguments="$command_arguments --exclude $exclude"
-    done
+    echo "Upgrading openstack-puppet-modules"
+    yum -y update openstack-puppet-modules
+    echo "Upgrading other packages is handled by config management tooling"
+    echo -n "true" > $heat_outputs_path.update_managed_packages
+    exit 0
 fi
 
 command=${command:-update}
@@ -188,10 +192,13 @@ if [[ "$pacemaker_status" == "active" ]] ; then
         fi
     done
 
-    pcs status
+    echo "Waiting for pacemaker cluster to settle"
+    if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
+        echo "ERROR timed out while waiting for the cluster to settle"
+        exit 1
+    fi
 
-else
-    echo -n "true" > $heat_outputs_path.update_managed_packages
+    pcs status
 fi
 
 echo "Finished yum_update.sh on server $deploy_server_id at `date`"
index 797df4b..b624563 100644 (file)
@@ -28,6 +28,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet:
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
 
 resources:
   OsNetConfigImpl:
index ad16ef0..4f7a19d 100644 (file)
@@ -28,6 +28,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet:
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
 
 resources:
   OsNetConfigImpl:
index 0646ffa..0980803 100644 (file)
@@ -28,6 +28,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet:
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   ControlPlaneDefaultRoute: # Override this via parameter_defaults
     description: The default route of the control plane network.
     type: string
index 30de584..94c492c 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet:
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
 
 resources:
   OsNetConfigImpl:
index 98879b4..afe7177 100644 (file)
@@ -3,10 +3,9 @@ Vlans on a bonded pair of NICs for each Overcloud role.
 
 There are two versions of the controller role template, one with
 an external network interface, and another without. If the
-external network interface is not configured the ctlplane address
+external network interface is not configured, the ctlplane address
 ranges will be used for external (public) network traffic.
 
-
 Configuration
 -------------
 
@@ -20,8 +19,31 @@ something like this:
     OS::TripleO::ObjectStorage::Net::SoftwareConfig: network/config/bond-with-vlans/swift-storage.yaml
     OS::TripleO::CephStorage::Net::SoftwareConfig: network/config/bond-with-vlans/ceph-storage.yaml
 
+Or use this Heat environment file:
+
+  environments/net-bond-with-vlans.yaml
+
 Configuration with no External Network
 --------------------------------------
+
 Same as above except set the following value for the controller role:
 
     OS::TripleO::Controller::Net::SoftwareConfig: network/config/bond-with-vlans/controller-no-external.yaml
+
+Configuration with System Management Network
+--------------------------------------------
+
+To enable the optional System Management network, create a Heat environment
+that looks something like this:
+
+  resource\_registry:
+    OS::TripleO::Network::Management: ../network/management.yaml
+    OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+Or use this Heat environment file:
+
+  environments/network-management.yaml
index 620d1f7..93db866 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   BondInterfaceOvsOptions:
     default: ''
     description: The ovs_options string for the bond interface. Set things like
@@ -42,6 +46,10 @@ parameters:
     default: 40
     description: Vlan ID for the storage mgmt network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -114,6 +122,14 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: StorageMgmtIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  device: bond1
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index f4c6de8..bea98c1 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   BondInterfaceOvsOptions:
     default: ''
     description: The ovs_options string for the bond interface. Set things like
@@ -46,6 +50,10 @@ parameters:
     default: 40
     description: Vlan ID for the storage mgmt network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -125,6 +133,14 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: StorageMgmtIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  device: bond1
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 8cb3705..774bf02 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   BondInterfaceOvsOptions:
     default: ''
     description: The ovs_options string for the bond interface. Set things like
@@ -46,6 +50,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -125,6 +133,14 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: TenantIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  device: bond1
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 22579e8..375d40b 100644 (file)
@@ -25,6 +25,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   BondInterfaceOvsOptions:
     default: ''
     description: The ovs_options string for the bond interface. Set things like
@@ -50,6 +54,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ExternalInterfaceDefaultRoute:
     default: '10.0.0.1'
     description: default route for the external network
@@ -66,6 +74,7 @@ resources:
             -
               type: ovs_bridge
               name: {get_input: bridge_name}
+              use_dhcp: true
               members:
                 -
                   type: ovs_bond
@@ -107,6 +116,14 @@ resources:
                   addresses:
                   -
                     ip_netmask: {get_param: TenantIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  device: bond1
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index eb4399e..d3627ea 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   BondInterfaceOvsOptions:
     default: 'bond_mode=active-backup'
     description: The ovs_options string for the bond interface. Set things like
@@ -54,6 +58,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ExternalInterfaceDefaultRoute:
     default: '10.0.0.1'
     description: default route for the external network
@@ -119,7 +127,7 @@ resources:
                       ip_netmask: {get_param: ExternalIpSubnet}
                   routes:
                     -
-                      ip_netmask: 0.0.0.0/0
+                      default: true
                       next_hop: {get_param: ExternalInterfaceDefaultRoute}
                 -
                   type: vlan
@@ -149,6 +157,14 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: TenantIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  device: bond1
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index f6b2a69..de9121e 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   BondInterfaceOvsOptions:
     default: ''
     description: The ovs_options string for the bond interface. Set things like
@@ -46,6 +50,10 @@ parameters:
     default: 40
     description: Vlan ID for the storage mgmt network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -125,6 +133,14 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: StorageMgmtIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  device: bond1
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 3d81f0b..0d8a0f0 100644 (file)
@@ -19,3 +19,21 @@ something like this:
 Or use this Heat environment file:
 
   environments/net-multiple-nics.yaml
+
+Configuration with System Management Network
+--------------------------------------------
+
+To enable the optional System Management network, create a Heat environment
+that looks something like this:
+
+  resource\_registry:
+    OS::TripleO::Network::Management: ../network/management.yaml
+    OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+Or use this Heat environment file:
+
+  environments/network-management.yaml
index 7d650f4..84cb0f1 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ExternalInterfaceDefaultRoute:
     default: '10.0.0.1'
     description: default route for the external network
@@ -58,12 +66,12 @@ parameters:
     description: The subnet CIDR of the control plane network.
     type: string
   ControlPlaneDefaultRoute: # Override this via parameter_defaults
-    description: The subnet CIDR of the control plane network.
+    description: The default route of the control plane network.
     type: string
   DnsServers: # Override this via parameter_defaults
     default: []
     description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
-    type: json
+    type: comma_delimited_list
   EC2MetadataIp: # Override this via parameter_defaults
     description: The IP address of the EC2 metadata server.
     type: string
@@ -76,7 +84,7 @@ resources:
       config:
         os_net_config:
           network_config:
-          -
+            -
               type: interface
               name: nic1
               use_dhcp: false
@@ -109,6 +117,14 @@ resources:
               addresses:
                 -
                   ip_netmask: {get_param: StorageMgmtIpSubnet}
+            # Uncomment when including environments/network-management.yaml
+            #-
+            #  type: interface
+            #  name: nic7
+            #  use_dhcp: false
+            #  addresses:
+            #    -
+            #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index fdb6c9d..0b0218c 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ExternalInterfaceDefaultRoute:
     default: '10.0.0.1'
     description: default route for the external network
@@ -58,12 +66,12 @@ parameters:
     description: The subnet CIDR of the control plane network.
     type: string
   ControlPlaneDefaultRoute: # Override this via parameter_defaults
-    description: The subnet CIDR of the control plane network.
+    description: The default route of the control plane network.
     type: string
   DnsServers: # Override this via parameter_defaults
     default: []
     description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
-    type: json
+    type: comma_delimited_list
   EC2MetadataIp: # Override this via parameter_defaults
     description: The IP address of the EC2 metadata server.
     type: string
@@ -76,7 +84,7 @@ resources:
       config:
         os_net_config:
           network_config:
-          -
+            -
               type: interface
               name: nic1
               use_dhcp: false
@@ -116,6 +124,14 @@ resources:
               addresses:
                 -
                   ip_netmask: {get_param: InternalApiIpSubnet}
+            # Uncomment when including environments/network-management.yaml
+            #-
+            #  type: interface
+            #  name: nic7
+            #  use_dhcp: false
+            #  addresses:
+            #    -
+            #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 0032a28..97eef52 100644 (file)
@@ -29,6 +29,14 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
+  ExternalNetworkVlanID:
+    default: 10
+    description: Vlan ID for the external network traffic.
+    type: number
   InternalApiNetworkVlanID:
     default: 20
     description: Vlan ID for the internal_api network traffic.
@@ -37,21 +45,33 @@ parameters:
     default: 30
     description: Vlan ID for the storage network traffic.
     type: number
+  StorageMgmtNetworkVlanID:
+    default: 40
+    description: Vlan ID for the storage mgmt network traffic.
+    type: number
   TenantNetworkVlanID:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
+  ExternalInterfaceDefaultRoute:
+    default: '10.0.0.1'
+    description: default route for the external network
+    type: string
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
     type: string
   ControlPlaneDefaultRoute: # Override this via parameter_defaults
-    description: The subnet CIDR of the control plane network.
+    description: The default route of the control plane network.
     type: string
   DnsServers: # Override this via parameter_defaults
     default: []
     description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
-    type: json
+    type: comma_delimited_list
   EC2MetadataIp: # Override this via parameter_defaults
     description: The IP address of the EC2 metadata server.
     type: string
@@ -112,6 +132,14 @@ resources:
                   use_dhcp: false
                   # force the MAC address of the bridge to this interface
                   primary: true
+            # Uncomment when including environments/network-management.yaml
+            #-
+            #  type: interface
+            #  name: nic7
+            #  use_dhcp: false
+            #  addresses:
+            #    -
+            #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 63f53a1..377fd11 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ExternalInterfaceDefaultRoute:
     default: '10.0.0.1'
     description: default route for the external network
@@ -58,12 +66,12 @@ parameters:
     description: The subnet CIDR of the control plane network.
     type: string
   ControlPlaneDefaultRoute: # Override this via parameter_defaults
-    description: The subnet CIDR of the control plane network.
+    description: The default route of the control plane network.
     type: string
   DnsServers: # Override this via parameter_defaults
     default: []
     description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
-    type: json
+    type: comma_delimited_list
   EC2MetadataIp: # Override this via parameter_defaults
     description: The IP address of the EC2 metadata server.
     type: string
@@ -76,7 +84,7 @@ resources:
       config:
         os_net_config:
           network_config:
-          -
+            -
               type: interface
               name: nic1
               use_dhcp: false
@@ -131,13 +139,14 @@ resources:
             -
               type: ovs_bridge
               name: {get_input: bridge_name}
+              dns_servers: {get_param: DnsServers}
               use_dhcp: false
               addresses:
                 -
                   ip_netmask: {get_param: ExternalIpSubnet}
               routes:
                 -
-                  ip_netmask: 0.0.0.0/0
+                  default: true
                   next_hop: {get_param: ExternalInterfaceDefaultRoute}
               members:
                 -
@@ -145,6 +154,14 @@ resources:
                   name: nic6
                   # force the MAC address of the bridge to this interface
                   primary: true
+            # Uncomment when including environments/network-management.yaml
+            #-
+            #  type: interface
+            #  name: nic7
+            #  use_dhcp: false
+            #  addresses:
+            #    -
+            #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 00e4f35..b75bbd6 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ExternalInterfaceDefaultRoute:
     default: '10.0.0.1'
     description: default route for the external network
@@ -58,12 +66,12 @@ parameters:
     description: The subnet CIDR of the control plane network.
     type: string
   ControlPlaneDefaultRoute: # Override this via parameter_defaults
-    description: The subnet CIDR of the control plane network.
+    description: The default route of the control plane network.
     type: string
   DnsServers: # Override this via parameter_defaults
     default: []
     description: A list of DNS servers (2 max for some implementations) that will be added to resolv.conf.
-    type: json
+    type: comma_delimited_list
   EC2MetadataIp: # Override this via parameter_defaults
     description: The IP address of the EC2 metadata server.
     type: string
@@ -76,7 +84,7 @@ resources:
       config:
         os_net_config:
           network_config:
-          -
+            -
               type: interface
               name: nic1
               use_dhcp: false
@@ -116,6 +124,14 @@ resources:
               addresses:
                 -
                   ip_netmask: {get_param: InternalApiIpSubnet}
+            # Uncomment when including environments/network-management.yaml
+            #-
+            #  type: interface
+            #  name: nic7
+            #  use_dhcp: false
+            #  addresses:
+            #    -
+            #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 6f12865..f9c2e51 100644 (file)
@@ -1,9 +1,9 @@
 This directory contains Heat templates to help configure
-Vlans on a single NICs for each Overcloud role.
+Vlans on a single NIC for each Overcloud role.
 
 There are two versions of the controller role template, one with
 an external network interface, and another without. If the
-external network interface is not configured the ctlplane address
+external network interface is not configured, the ctlplane address
 ranges will be used for external (public) network traffic.
 
 Configuration
@@ -23,9 +23,27 @@ Or use this Heat environment file:
 
   environments/net-single-nic-with-vlans.yaml
 
-
 Configuration with no External Network
 --------------------------------------
+
 Same as above except set the following value for the controller role:
 
     OS::TripleO::Controller::Net::SoftwareConfig: network/config/single-nic-vlans/controller-no-external.yaml
+
+Configuration with System Management Network
+--------------------------------------------
+
+To enable the optional System Management network, create a Heat environment
+that looks something like this:
+
+  resource\_registry:
+    OS::TripleO::Network::Management: ../network/management.yaml
+    OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
+    OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
+
+Or use this Heat environment file:
+
+  environments/network-management.yaml
index 5148c52..80bc32d 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   StorageNetworkVlanID:
     default: 30
     description: Vlan ID for the storage network traffic.
@@ -37,6 +41,10 @@ parameters:
     default: 40
     description: Vlan ID for the storage mgmt network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -97,6 +105,13 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: StorageMgmtIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index e79a9f4..e509443 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   InternalApiNetworkVlanID:
     default: 20
     description: Vlan ID for the internal_api network traffic.
@@ -41,6 +45,10 @@ parameters:
     default: 40
     description: Vlan ID for the storage mgmt network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -107,6 +115,13 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: StorageMgmtIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 4e93b31..8cf6825 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   InternalApiNetworkVlanID:
     default: 20
     description: Vlan ID for the internal_api network traffic.
@@ -41,6 +45,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -107,6 +115,13 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: TenantIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index faf9e9c..eb5e1e5 100644 (file)
@@ -25,6 +25,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
@@ -45,6 +49,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ExternalInterfaceDefaultRoute:
     default: '10.0.0.1'
     description: default route for the external network
@@ -92,6 +100,13 @@ resources:
                   addresses:
                   -
                     ip_netmask: {get_param: TenantIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 3c536d6..3b22b36 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
@@ -49,6 +53,10 @@ parameters:
     default: 50
     description: Vlan ID for the tenant network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ExternalInterfaceDefaultRoute:
     default: '10.0.0.1'
     description: default route for the external network
@@ -129,6 +137,12 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: TenantIpSubnet}
+                #-  # Uncomment when including environments/network-management.yaml
+                #  type: vlan
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 83b3304..efc0339 100644 (file)
@@ -29,6 +29,10 @@ parameters:
     default: ''
     description: IP address/subnet on the tenant network
     type: string
+  ManagementIpSubnet: # Only populated when including environments/network-management.yaml
+    default: ''
+    description: IP address/subnet on the management network
+    type: string
   InternalApiNetworkVlanID:
     default: 20
     description: Vlan ID for the internal_api network traffic.
@@ -41,6 +45,10 @@ parameters:
     default: 40
     description: Vlan ID for the storage mgmt network traffic.
     type: number
+  ManagementNetworkVlanID:
+    default: 60
+    description: Vlan ID for the management network traffic.
+    type: number
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -107,6 +115,13 @@ resources:
                   addresses:
                     -
                       ip_netmask: {get_param: StorageMgmtIpSubnet}
+                # Uncomment when including environments/network-management.yaml
+                #-
+                #  type: vlan
+                #  vlan_id: {get_param: ManagementNetworkVlanID}
+                #  addresses:
+                #    -
+                #      ip_netmask: {get_param: ManagementIpSubnet}
 
 outputs:
   OS::stack_id:
index 0521401..f6063c0 100644 (file)
@@ -40,6 +40,9 @@ parameters:
   SwiftProxyVirtualIP:
     type: string
     default: ''
+  SaharaApiVirtualIP:
+    type: string
+    default: ''
   EndpointMap:
     type: json
     default:
@@ -74,11 +77,14 @@ parameters:
       SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
       SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
       SwiftPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+      SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+      SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+      SaharaPublic: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
   CloudName:
     type: string
-    default: ''
+    default: overcloud
     description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
 
 resources:
@@ -404,6 +410,31 @@ resources:
       IP: {get_param: SwiftProxyVirtualIP}
       CloudName: {get_param: CloudName}
 
+  SaharaInternal:
+    type: OS::TripleO::Endpoint
+    properties:
+      EndpointName: SaharaInternal
+      EndpointMap: { get_param: EndpointMap }
+      IP: {get_param: SaharaApiVirtualIP}
+      CloudName: {get_param: CloudName}
+      UriSuffix: '/v1.1/%(tenant_id)s'
+  SaharaPublic:
+    type: OS::TripleO::Endpoint
+    properties:
+      EndpointName: SaharaPublic
+      EndpointMap: { get_param: EndpointMap }
+      IP: {get_param: SaharaApiVirtualIP}
+      CloudName: {get_param: CloudName}
+      UriSuffix: '/v1.1/%(tenant_id)s'
+  SaharaAdmin:
+    type: OS::TripleO::Endpoint
+    properties:
+      EndpointName: SaharaAdmin
+      EndpointMap: { get_param: EndpointMap }
+      IP: {get_param: SaharaApiVirtualIP}
+      CloudName: {get_param: CloudName}
+      UriSuffix: '/v1.1/%(tenant_id)s'
+
 outputs:
   endpoint_map:
     value:
@@ -448,3 +479,6 @@ outputs:
       SwiftS3Internal: {get_attr: [ SwiftS3Internal, endpoint] }
       SwiftS3Public: {get_attr: [ SwiftS3Public, endpoint] }
       SwiftS3Admin: {get_attr: [ SwiftS3Admin, endpoint] }
+      SaharaInternal: {get_attr: [ SaharaInternal, endpoint] }
+      SaharaPublic: {get_attr: [ SaharaPublic, endpoint] }
+      SaharaAdmin: {get_attr: [ SaharaAdmin, endpoint] }
\ No newline at end of file
diff --git a/network/management.yaml b/network/management.yaml
new file mode 100644 (file)
index 0000000..9bfaafa
--- /dev/null
@@ -0,0 +1,64 @@
+heat_template_version: 2015-04-30
+
+description: >
+  Management network. System administration, SSH, DNS, NTP, etc. This network
+  would usually be the default gateway for the non-controller nodes.
+
+parameters:
+  # the defaults here work for static IP assignment (IPAM) only
+  ManagementNetCidr:
+    default: '10.0.1.0/24'
+    description: Cidr for the management network.
+    type: string
+  ManagementNetValueSpecs:
+    default: {'provider:physical_network': 'management', 'provider:network_type': 'flat'}
+    description: Value specs for the management network.
+    type: string
+  ManagementNetAdminStateUp:
+    default: false
+    description: This admin state of of the network.
+    type: boolean
+  ManagementNetEnableDHCP:
+    default: false
+    description: Whether to enable DHCP on the associated subnet.
+    type: boolean
+  ManagementNetShared:
+    default: false
+    description: Whether this network is shared across all tenants.
+    type: boolean
+  ManagementNetName:
+    default: management
+    description: The name of the management network.
+    type: string
+  ManagementSubnetName:
+    default: management_subnet
+    description: The name of the management subnet in Neutron.
+    type: string
+  ManagementAllocationPools:
+    default: [{'start': '10.0.1.4', 'end': '10.0.1.250'}]
+    description: Ip allocation pool range for the management network.
+    type: json
+
+resources:
+  ManagementNetwork:
+    type: OS::Neutron::Net
+    properties:
+      admin_state_up: {get_param: ManagementNetAdminStateUp}
+      name: {get_param: ManagementNetName}
+      shared: {get_param: ManagementNetShared}
+      value_specs: {get_param: ManagementNetValueSpecs}
+
+  ManagementSubnet:
+    type: OS::Neutron::Subnet
+    properties:
+      cidr: {get_param: ManagementNetCidr}
+      enable_dhcp: {get_param: ManagementNetEnableDHCP}
+      name: {get_param: ManagementSubnetName}
+      network: {get_resource: ManagementNetwork}
+      allocation_pools: {get_param: ManagementAllocationPools}
+
+outputs:
+  OS::stack_id:
+    description: Neutron management network
+    value: {get_resource: ManagementNetwork}
+
index 6618af3..ab50ae1 100644 (file)
@@ -18,3 +18,6 @@ resources:
 
   TenantNetwork:
     type: OS::TripleO::Network::Tenant
+
+  ManagementNetwork:
+    type: OS::TripleO::Network::Management
index ab6b18f..7a7043b 100644 (file)
@@ -5,6 +5,10 @@ description: >
   The IP address will be chosen automatically if FixedIPs is empty.
 
 parameters:
+  ServiceName: # Here for compatibility with from_service.yaml
+    description: Name of the service to lookup
+    default: ''
+    type: string
   NetworkName:
     description: # Here for compatibility with isolated networks
     default: ctlplane
index 4180a22..7624eb9 100644 (file)
@@ -27,6 +27,12 @@ parameters:
         [{'ip_address':'1.2.3.4'}]
     default: []
     type: json
+  IPPool: # Here for compatibility with from_pool.yaml
+    default: {}
+    type: json
+  NodeIndex: # Here for compatibility with from_pool.yaml
+    default: 0
+    type: number
 
 resources:
 
diff --git a/network/ports/external_from_pool.yaml b/network/ports/external_from_pool.yaml
new file mode 100644 (file)
index 0000000..8e9dc7c
--- /dev/null
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+  Returns an IP from a network mapped list of IPs
+
+parameters:
+  ExternalNetName:
+    description: Name of the external network
+    default: external
+    type: string
+  PortName:
+    description: Name of the port
+    default: ''
+    type: string
+  ControlPlaneIP: # Here for compatability with noop.yaml
+    description: IP address on the control plane
+    default: ''
+    type: string
+  IPPool:
+    default: {}
+    description: A network mapped list of IPs
+    type: json
+  NodeIndex:
+    default: 0
+    description: Index of the IP to get from Pool
+    type: number
+  ExternalNetCidr:
+    default: '10.0.0.0/24'
+    description: Cidr for the external network.
+    type: string
+
+outputs:
+  ip_address:
+    description: external network IP
+    value: {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+  ip_subnet:
+    # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+    description: IP/Subnet CIDR for the external network IP
+    value:
+      list_join:
+      - ''
+      - - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
+        - '/'
+        - {get_param: [ExternalNetCidr, -2]}
+        - {get_param: [ExternalNetCidr, -1]}
diff --git a/network/ports/from_service.yaml b/network/ports/from_service.yaml
new file mode 100644 (file)
index 0000000..6b669f4
--- /dev/null
@@ -0,0 +1,34 @@
+heat_template_version: 2015-04-30
+
+description: >
+  Returns an IP from a service mapped list of IPs
+
+parameters:
+  ServiceName:
+    description: Name of the service to lookup
+    default: ''
+    type: string
+  NetworkName: # Here for compatability with ctlplane_vip.yaml
+    description: Name of the network where the VIP will be created
+    default: ctlplane
+    type: string
+  PortName: # Here for compatability with ctlplane_vip.yaml
+    description: Name of the port
+    default: ''
+    type: string
+  ControlPlaneIP: # Here for compatability with ctlplane_vip.yaml
+    description: IP address on the control plane
+    default: ''
+    type: string
+  ControlPlaneNetwork: # Here for compatability with ctlplane_vip.yaml
+    description: The name of the undercloud Neutron control plane
+    default: ctlplane
+    type: string
+  ServiceVips:
+    default: {}
+    type: json
+
+outputs:
+  ip_address:
+    description: network IP
+    value: {get_param: [ServiceVips, {get_param: ServiceName}]}
index 01cdfe9..f84e8f7 100644 (file)
@@ -22,6 +22,12 @@ parameters:
         [{'ip_address':'1.2.3.4'}]
     default: []
     type: json
+  IPPool: # Here for compatibility with from_pool.yaml
+    default: {}
+    type: json
+  NodeIndex: # Here for compatibility with from_pool.yaml
+    default: 0
+    type: number
 
 resources:
 
diff --git a/network/ports/internal_api_from_pool.yaml b/network/ports/internal_api_from_pool.yaml
new file mode 100644 (file)
index 0000000..b98e1fb
--- /dev/null
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+  Returns an IP from a network mapped list of IPs
+
+parameters:
+  InternalApiNetName:
+    description: Name of the internal API network
+    default: internal_api
+    type: string
+  PortName:
+    description: Name of the port
+    default: ''
+    type: string
+  ControlPlaneIP: # Here for compatability with noop.yaml
+    description: IP address on the control plane
+    default: ''
+    type: string
+  IPPool:
+    default: {}
+    description: A network mapped list of IPs
+    type: json
+  NodeIndex:
+    default: 0
+    description: Index of the IP to get from Pool
+    type: number
+  InternalApiNetCidr:
+    default: '172.16.2.0/24'
+    description: Cidr for the internal API network.
+    type: string
+
+outputs:
+  ip_address:
+    description: internal API network IP
+    value: {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
+  ip_subnet:
+    # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+    description: IP/Subnet CIDR for the internal API network IP
+    value:
+      list_join:
+      - ''
+      - - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
+        - '/'
+        - {get_param: [InternalApiNetCidr, -2]}
+        - {get_param: [InternalApiNetCidr, -1]}
diff --git a/network/ports/management.yaml b/network/ports/management.yaml
new file mode 100644 (file)
index 0000000..1d15ca6
--- /dev/null
@@ -0,0 +1,42 @@
+heat_template_version: 2015-04-30
+
+description: >
+  Creates a port on the management network. The IP address will be chosen
+  automatically if FixedIPs is empty.
+
+parameters:
+  ManagementNetName:
+    description: Name of the management neutron network
+    default: management
+    type: string
+  PortName:
+    description: Name of the port
+    default: ''
+    type: string
+  ControlPlaneIP: # Here for compatibility with noop.yaml
+    description: IP address on the control plane
+    type: string
+
+resources:
+
+  ManagementPort:
+    type: OS::Neutron::Port
+    properties:
+      network: {get_param: ManagementNetName}
+      name: {get_param: PortName}
+      replacement_policy: AUTO
+
+outputs:
+  ip_address:
+    description: management network IP
+    value: {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
+  ip_subnet:
+    # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+    description: IP/Subnet CIDR for the management network IP
+    value:
+          list_join:
+            - ''
+            - - {get_attr: [ManagementPort, fixed_ips, 0, ip_address]}
+              - '/'
+              - {get_attr: [ManagementPort, subnets, 0, cidr, -2]}
+              - {get_attr: [ManagementPort, subnets, 0, cidr, -1]}
index 257d3f9..32272bd 100644 (file)
@@ -19,6 +19,9 @@ parameters:
   TenantIpList:
     default: []
     type: comma_delimited_list
+  ManagementIpList:
+    default: []
+    type: comma_delimited_list
 
 outputs:
   net_ip_map:
@@ -32,3 +35,4 @@ outputs:
       storage: {get_param: StorageIpList}
       storage_mgmt: {get_param: StorageMgmtIpList}
       tenant: {get_param: TenantIpList}
+      management: {get_param: ManagementIpList}
index 7aaed16..c638602 100644 (file)
@@ -19,6 +19,9 @@ parameters:
   TenantIp:
     default: ''
     type: string
+  ManagementIp:
+    default: ''
+    type: string
 
 outputs:
   net_ip_map:
@@ -32,3 +35,4 @@ outputs:
       storage: {get_param: StorageIp}
       storage_mgmt: {get_param: StorageMgmtIp}
       tenant: {get_param: TenantIp}
+      management: {get_param: ManagementIp}
index cf59adb..2f933ea 100644 (file)
@@ -19,6 +19,9 @@ parameters:
   TenantIpSubnet:
     default: ''
     type: string
+  ManagementIpSubnet:
+    default: ''
+    type: string
   ControlPlaneSubnetCidr: # Override this via parameter_defaults
     default: '24'
     description: The subnet CIDR of the control plane network.
@@ -41,3 +44,4 @@ outputs:
       storage: {get_param: StorageIpSubnet}
       storage_mgmt: {get_param: StorageMgmtIpSubnet}
       tenant: {get_param: TenantIpSubnet}
+      management: {get_param: ManagementIpSubnet}
index 36426b3..23e1f99 100644 (file)
@@ -2,7 +2,7 @@ heat_template_version: 2015-04-30
 
 parameters:
   # Set these via parameter defaults to configure external VIPs
-  ControlNetworkVip:
+  ControlPlaneIP:
     default: ''
     type: string
   ExternalNetworkVip:
@@ -43,7 +43,7 @@ outputs:
       A Hash containing a mapping of network names to assigned IPs
       for a specific machine.
     value:
-      ctlplane: {get_param: ControlNetworkVip}
+      ctlplane: {get_param: ControlPlaneIP}
       external: {get_param: ExternalNetworkVip}
       internal_api: {get_param: InternalApiNetworkVip}
       storage: {get_param: StorageNetworkVip}
index 028624f..ac946cd 100644 (file)
@@ -4,6 +4,10 @@ description: >
   Returns the control plane port (provisioning network) as the ip_address.
 
 parameters:
+  ServiceName: # Here for compatibility with from_service.yaml
+    description: Name of the service to lookup
+    default: ''
+    type: string
   ControlPlaneIP:
     description: IP address on the control plane
     type: string
@@ -27,6 +31,14 @@ parameters:
     default: '24'
     description: The subnet CIDR of the control plane network.
     type: string
+  IPPool: # Here for compatibility with from_pool.yaml
+    default: {}
+    description: A network mapped list of IPs
+    type: json
+  NodeIndex: # Here for compatibility with from_pool.yaml
+    default: 0
+    description: Index of the IP to get from Pool
+    type: number
 
 outputs:
   ip_address:
index 1d2384c..a07e5a4 100644 (file)
@@ -22,6 +22,12 @@ parameters:
         [{'ip_address':'1.2.3.4'}]
     default: []
     type: json
+  IPPool: # Here for compatibility with from_pool.yaml
+    default: {}
+    type: json
+  NodeIndex: # Here for compatibility with from_pool.yaml
+    default: 0
+    type: number
 
 resources:
 
diff --git a/network/ports/storage_from_pool.yaml b/network/ports/storage_from_pool.yaml
new file mode 100644 (file)
index 0000000..668bc6f
--- /dev/null
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+  Returns an IP from a network mapped list of IPs
+
+parameters:
+  StorageNetName:
+    description: Name of the storage network
+    default: storage
+    type: string
+  PortName:
+    description: Name of the port
+    default: ''
+    type: string
+  ControlPlaneIP: # Here for compatability with noop.yaml
+    description: IP address on the control plane
+    default: ''
+    type: string
+  IPPool:
+    default: {}
+    description: A network mapped list of IPs
+    type: json
+  NodeIndex:
+    default: 0
+    description: Index of the IP to get from Pool
+    type: number
+  StorageNetCidr:
+    default: '172.16.1.0/24'
+    description: Cidr for the storage network.
+    type: string
+
+outputs:
+  ip_address:
+    description: storage network IP
+    value: {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
+  ip_subnet:
+    # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+    description: IP/Subnet CIDR for the storage network IP
+    value:
+      list_join:
+      - ''
+      - - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
+        - '/'
+        - {get_param: [StorageNetCidr, -2]}
+        - {get_param: [StorageNetCidr, -1]}
index f10e358..4890bf5 100644 (file)
@@ -22,6 +22,12 @@ parameters:
         [{'ip_address':'1.2.3.4'}]
     default: []
     type: json
+  IPPool: # Here for compatibility with from_pool.yaml
+    default: {}
+    type: json
+  NodeIndex: # Here for compatibility with from_pool.yaml
+    default: 0
+    type: number
 
 resources:
 
diff --git a/network/ports/storage_mgmt_from_pool.yaml b/network/ports/storage_mgmt_from_pool.yaml
new file mode 100644 (file)
index 0000000..bea8710
--- /dev/null
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+  Returns an IP from a network mapped list of IPs
+
+parameters:
+  StorageMgmtNetName:
+    description: Name of the storage MGMT network
+    default: storage_mgmt
+    type: string
+  PortName:
+    description: Name of the port
+    default: ''
+    type: string
+  ControlPlaneIP: # Here for compatability with noop.yaml
+    description: IP address on the control plane
+    default: ''
+    type: string
+  IPPool:
+    default: {}
+    description: A network mapped list of IPs
+    type: json
+  NodeIndex:
+    default: 0
+    description: Index of the IP to get from Pool
+    type: number
+  StorageMgmtNetCidr:
+    default: '172.16.3.0/24'
+    description: Cidr for the storage MGMT network.
+    type: string
+
+outputs:
+  ip_address:
+    description: storage MGMT network IP
+    value: {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
+  ip_subnet:
+    # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+    description: IP/Subnet CIDR for the storage MGMT network IP
+    value:
+      list_join:
+      - ''
+      - - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
+        - '/'
+        - {get_param: [StorageMgmtNetCidr, -2]}
+        - {get_param: [StorageMgmtNetCidr, -1]}
index ccdc57e..86c58f2 100644 (file)
@@ -22,6 +22,12 @@ parameters:
         [{'ip_address':'1.2.3.4'}]
     default: []
     type: json
+  IPPool: # Here for compatibility with from_pool.yaml
+    default: {}
+    type: json
+  NodeIndex: # Here for compatibility with from_pool.yaml
+    default: 0
+    type: number
 
 resources:
 
diff --git a/network/ports/tenant_from_pool.yaml b/network/ports/tenant_from_pool.yaml
new file mode 100644 (file)
index 0000000..29303bb
--- /dev/null
@@ -0,0 +1,45 @@
+heat_template_version: 2015-04-30
+
+description: >
+  Returns an IP from a network mapped list of IPs
+
+parameters:
+  TenantNetName:
+    description: Name of the tenant network
+    default: tenant
+    type: string
+  PortName:
+    description: Name of the port
+    default: ''
+    type: string
+  ControlPlaneIP: # Here for compatability with noop.yaml
+    description: IP address on the control plane
+    default: ''
+    type: string
+  IPPool:
+    default: {}
+    description: A network mapped list of IPs
+    type: json
+  NodeIndex:
+    default: 0
+    description: Index of the IP to get from Pool
+    type: number
+  TenantNetCidr:
+    default: '172.16.0.0/24'
+    description: Cidr for the tenant network.
+    type: string
+
+outputs:
+  ip_address:
+    description: tenant network IP
+    value: {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
+  ip_subnet:
+    # FIXME: this assumes a 2 digit subnet CIDR (need more heat functions?)
+    description: IP/Subnet CIDR for the tenant network IP
+    value:
+      list_join:
+      - ''
+      - - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
+        - '/'
+        - {get_param: [TenantNetCidr, -2]}
+        - {get_param: [TenantNetCidr, -1]}
index ab6cd2c..9bb6cde 100644 (file)
@@ -5,6 +5,10 @@ description: >
   The IP address will be chosen automatically if FixedIPs is empty.
 
 parameters:
+  ServiceName: # Here for compatibility with from_service.yaml
+    description: Name of the service to lookup
+    default: ''
+    type: string
   NetworkName:
     description: Name of the network where the VIP will be created
     default: internal_api
index c072c29..888a3c8 100644 (file)
@@ -21,7 +21,11 @@ resource_registry:
   OS::TripleO::CephClusterConfig::SoftwareConfig: puppet/ceph-cluster-config.yaml
   OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
   OS::TripleO::BootstrapNode::SoftwareConfig: puppet/bootstrap-config.yaml
+
+  # Tasks (for internal TripleO usage)
   OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
+  OS::TripleO::Tasks::ControllerPrePuppet: extraconfig/tasks/noop.yaml
+  OS::TripleO::Tasks::ControllerPostPuppet: extraconfig/tasks/noop.yaml
 
   # This creates the "heat-admin" user for all OS images by default
   # To disable, replace with firstboot/userdata_default.yaml
@@ -57,40 +61,59 @@ resource_registry:
   OS::TripleO::Network::StorageMgmt: network/noop.yaml
   OS::TripleO::Network::Storage: network/noop.yaml
   OS::TripleO::Network::Tenant: network/noop.yaml
+  OS::TripleO::Network::Management: network/noop.yaml
 
   OS::TripleO::Network::Ports::NetVipMap: network/ports/net_ip_map.yaml
   OS::TripleO::Network::Ports::NetIpMap: network/ports/net_ip_map.yaml
   OS::TripleO::Network::Ports::NetIpSubnetMap: network/ports/net_ip_subnet_map.yaml
   OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
 
+  # Port assignments for the VIPs
+  OS::TripleO::Network::Ports::ExternalVipPort: network/ports/noop.yaml
+  OS::TripleO::Network::Ports::InternalApiVipPort: network/ports/noop.yaml
+  OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml
+  OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
+  OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
+
   # Port assignments for the controller role
   OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml
   OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml
   OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml
   OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml
   OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml
+  OS::TripleO::Controller::Ports::ManagementPort: network/ports/noop.yaml
 
   # Port assignments for the compute role
+  OS::TripleO::Compute::Ports::ExternalPort: network/ports/noop.yaml
   OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml
   OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml
+  OS::TripleO::Compute::Ports::StorageMgmtPort: network/ports/noop.yaml
   OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml
+  OS::TripleO::Compute::Ports::ManagementPort: network/ports/noop.yaml
 
   # Port assignments for the ceph storage role
+  OS::TripleO::CephStorage::Ports::ExternalPort: network/ports/noop.yaml
+  OS::TripleO::CephStorage::Ports::InternalApiPort: network/ports/noop.yaml
   OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml
   OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
+  OS::TripleO::CephStorage::Ports::TenantPort: network/ports/noop.yaml
+  OS::TripleO::CephStorage::Ports::ManagementPort: network/ports/noop.yaml
 
   # Port assignments for the swift storage role
+  OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml
   OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
   OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
   OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
+  OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml
+  OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml
 
   # Port assignments for the block storage role
+  OS::TripleO::BlockStorage::Ports::ExternalPort: network/ports/noop.yaml
   OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml
   OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml
   OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-
-  # Port assignments for service virtual IPs for the controller role
-  OS::TripleO::Controller::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
+  OS::TripleO::BlockStorage::Ports::TenantPort: network/ports/noop.yaml
+  OS::TripleO::BlockStorage::Ports::ManagementPort: network/ports/noop.yaml
 
   # Service Endpoint Mappings
   OS::TripleO::Endpoint: network/endpoints/endpoint.yaml
index b1eb62a..b397804 100644 (file)
@@ -13,7 +13,6 @@ parameters:
 
   # Common parameters (not specific to a role)
   AdminPassword:
-    default: unset
     description: The password for the keystone admin account, used for monitoring, querying neutron etc.
     type: string
     hidden: true
@@ -22,12 +21,10 @@ parameters:
     description: The ceilometer backend type.
     type: string
   CeilometerMeteringSecret:
-    default: unset
     description: Secret shared by the ceilometer services.
     type: string
     hidden: true
   CeilometerPassword:
-    default: unset
     description: The password for the ceilometer service account.
     type: string
     hidden: true
@@ -68,7 +65,7 @@ parameters:
     description: Whether to enable or not the Rbd backend for Cinder
     type: boolean
   CloudName:
-    default: ''
+    default: overcloud
     description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
     type: string
   ControlFixedIPs:
@@ -109,7 +106,7 @@ parameters:
       to create provider networks (and we use this for the default floating
       network) - if changing this either use different post-install network
       scripts or be sure to keep 'datacentre' as a mapping network name.
-    type: string
+    type: comma_delimited_list
     default: "datacentre:br-ex"
   NeutronControlPlaneID:
     default: 'ctlplane'
@@ -128,17 +125,16 @@ parameters:
         Enable/disable the L2 population feature in the Neutron agents.
     default: "False"
   NeutronFlatNetworks:
-    type: string
+    type: comma_delimited_list
     default: 'datacentre'
     description: >
       If set, flat networks to configure in neutron plugins. Defaults to
       'datacentre' to permit external network creation.
   NeutronNetworkType:
     default: 'vxlan'
-    description: The tenant network type for Neutron, either gre or vxlan.
-    type: string
+    description: The tenant network type for Neutron.
+    type: comma_delimited_list
   NeutronPassword:
-    default: unset
     description: The password for the neutron service account, used by neutron agents.
     type: string
     hidden: true
@@ -169,16 +165,14 @@ parameters:
     description: Whether to configure Neutron Distributed Virtual Routers
     type: string
   NeutronMetadataProxySharedSecret:
-    default: 'unset'
     description: Shared secret to prevent spoofing
     type: string
     hidden: true
   NeutronTunnelTypes:
     default: 'vxlan'
     description: |
-        The tunnel types for the Neutron tenant network. To specify multiple
-        values, use a comma separated string, like so: 'gre,vxlan'
-    type: string
+        The tunnel types for the Neutron tenant network.
+    type: comma_delimited_list
   NeutronTunnelIdRanges:
     description: |
         Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
@@ -198,7 +192,7 @@ parameters:
         from neutron.core_plugins namespace.
     type: string
   NeutronServicePlugins:
-    default: "router"
+    default: "router,qos"
     description: |
         Comma-separated list of service plugin entrypoints to be loaded from the
         neutron.service_plugins namespace.
@@ -211,9 +205,18 @@ parameters:
   NeutronMechanismDrivers:
     default: 'openvswitch'
     description: |
-        The mechanism drivers for the Neutron tenant network. To specify multiple
-        values, use a comma separated string, like so: 'openvswitch,l2_population'
-    type: string
+        The mechanism drivers for the Neutron tenant network.
+    type: comma_delimited_list
+  NeutronPluginExtensions:
+    default: "qos"
+    description: |
+        Comma-separated list of extensions enabled for the Neutron plugin.
+    type: comma_delimited_list
+  NeutronAgentExtensions:
+    default: "qos"
+    description: |
+        Comma-separated list of extensions enabled for the Neutron agents.
+    type: comma_delimited_list
   NeutronAllowL3AgentFailover:
     default: 'False'
     description: Allow automatic l3-agent failover
@@ -227,7 +230,6 @@ parameters:
     default: 1
     description: The number of neutron dhcp agents to schedule per network
   NovaPassword:
-    default: unset
     description: The password for the nova service account, used by nova-api.
     type: string
     hidden: true
@@ -279,10 +281,13 @@ parameters:
     description: The user name for SNMPd with readonly rights running on all Overcloud nodes
     type: string
   SnmpdReadonlyUserPassword:
-    default: unset
     description: The user password for SNMPd with readonly rights running on all Overcloud nodes
     type: string
     hidden: true
+  TimeZone:
+    default: 'UTC'
+    description: The timezone to be set on nodes.
+    type: string
   CloudDomain:
     default: 'localdomain'
     type: string
@@ -298,7 +303,6 @@ parameters:
 
   # Controller-specific params
   AdminToken:
-    default: unset
     description: The keystone auth secret.
     type: string
     hidden: true
@@ -319,7 +323,6 @@ parameters:
       CinderEnableNfsBackend is true.
     type: comma_delimited_list
   CinderPassword:
-    default: unset
     description: The password for the cinder service account, used by cinder-api.
     type: string
     hidden: true
@@ -340,11 +343,12 @@ parameters:
     type: json
   controllerImage:
     type: string
-    default: overcloud-control
+    default: overcloud-full
     constraints:
       - custom_constraint: glance.image
   OvercloudControlFlavor:
     description: Flavor for control nodes to request when deploying.
+    default: baremetal
     type: string
     constraints:
       - custom_constraint: nova.flavor
@@ -420,7 +424,6 @@ parameters:
     type: string
     default: noop
   GlancePassword:
-    default: unset
     description: The password for the glance service account, used by the glance services.
     type: string
     hidden: true
@@ -432,14 +435,12 @@ parameters:
     constraints:
     - allowed_values: ['swift', 'file', 'rbd']
   HeatPassword:
-    default: unset
     description: The password for the Heat service account, used by the Heat services.
     type: string
     hidden: true
   HeatStackDomainAdminPassword:
     description: Password for heat_domain_admin user.
     type: string
-    default: ''
     hidden: true
   InstanceNameTemplate:
     default: 'instance-%08x'
@@ -519,12 +520,10 @@ parameters:
         This should be int_public when a VLAN is being used.
     type: string
   SwiftHashSuffix:
-    default: unset
     description: A random string to be used as a salt when hashing to determine mappings in the ring.
     type: string
     hidden: true
   SwiftPassword:
-    default: unset
     description: The password for the swift service account, used by the swift proxy services.
     type: string
     hidden: true
@@ -544,6 +543,12 @@ parameters:
     type: number
     default: 3
     description: How many replicas to use in the swift rings.
+  SaharaPassword:
+    description: The password for the sahara service account.
+    # TODO(egafford): Remove default on merge of https://review.openstack.org/#/c/221418/ (added to avoid circular dep)
+    default: unset
+    type: string
+    hidden: true
 
 # Compute-specific params
   CeilometerComputeAgent:
@@ -567,7 +572,7 @@ parameters:
     description: What interface to add to the HypervisorNeutronPhysicalBridge.
     type: string
   NeutronNetworkVLANRanges:
-    default: 'datacentre'
+    default: 'datacentre:1:1000'
     description: >
       The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
       Neutron documentation for permitted values. Defaults to permitting any
@@ -583,7 +588,7 @@ parameters:
       structure as ExtraConfig.
     type: json
   NovaComputeLibvirtType:
-    default: ''
+    default: kvm
     type: string
   NovaComputeLibvirtVifDriver:
     default: ''
@@ -599,7 +604,7 @@ parameters:
     type: boolean
   NovaImage:
     type: string
-    default: overcloud-compute
+    default: overcloud-full
     constraints:
       - custom_constraint: glance.image
   NovaOVSBridge:
@@ -612,6 +617,7 @@ parameters:
     type: string
   OvercloudComputeFlavor:
     description: Use this flavor
+    default: baremetal
     type: string
     constraints:
       - custom_constraint: nova.flavor
@@ -633,6 +639,7 @@ parameters:
       NovaVncProxyNetwork: internal_api
       SwiftMgmtNetwork: storage_mgmt
       SwiftProxyNetwork: storage
+      SaharaApiNetwork: internal_api
       HorizonNetwork: internal_api
       MemcachedNetwork: internal_api
       RabbitMqNetwork: internal_api
@@ -654,10 +661,11 @@ parameters:
     type: number
     default: 0
   BlockStorageImage:
-    default: overcloud-cinder-volume
+    default: overcloud-full
     type: string
   OvercloudBlockStorageFlavor:
     description: Flavor for block storage nodes to request when deploying.
+    default: baremetal
     type: string
     constraints:
       - custom_constraint: nova.flavor
@@ -679,11 +687,12 @@ parameters:
     default: 0
   OvercloudSwiftStorageFlavor:
     description: Flavor for Swift storage nodes to request when deploying.
+    default: baremetal
     type: string
     constraints:
       - custom_constraint: nova.flavor
   SwiftStorageImage:
-    default: overcloud-swift-storage
+    default: overcloud-full
     type: string
   ObjectStorageExtraConfig:
     default: {}
@@ -701,7 +710,7 @@ parameters:
     type: number
     default: 0
   CephStorageImage:
-    default: overcloud-ceph-storage
+    default: overcloud-full
     type: string
   OvercloudCephStorageFlavor:
     default: baremetal
@@ -824,6 +833,7 @@ resources:
       MysqlVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
       NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
       NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+      SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
       SwiftProxyVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
       PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]}
 
@@ -849,7 +859,6 @@ resources:
           CinderEnableNfsBackend: {get_param: CinderEnableNfsBackend}
           CinderEnableIscsiBackend: {get_param: CinderEnableIscsiBackend}
           CinderEnableRbdBackend: {get_param: CinderEnableRbdBackend}
-          CloudName: {get_param: CloudName}
           CloudDomain: {get_param: CloudDomain}
           ControlVirtualInterface: {get_param: ControlVirtualInterface}
           ControllerExtraConfig: {get_param: controllerExtraConfig}
@@ -908,6 +917,8 @@ resources:
           NeutronServicePlugins: {get_param: NeutronServicePlugins}
           NeutronTypeDrivers: {get_param: NeutronTypeDrivers}
           NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers}
+          NeutronPluginExtensions: {get_param: NeutronPluginExtensions}
+          NeutronAgentExtensions: {get_param: NeutronAgentExtensions}
           NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover}
           NeutronL3HA: {get_param: NeutronL3HA}
           NeutronDhcpAgentsPerNetwork: {get_param: NeutronDhcpAgentsPerNetwork}
@@ -924,6 +935,7 @@ resources:
           RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
           RabbitClientPort: {get_param: RabbitClientPort}
           RabbitFDLimit: {get_param: RabbitFDLimit}
+          SaharaPassword: {get_param: SaharaPassword}
           SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
           SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword}
           RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
@@ -933,6 +945,7 @@ resources:
           SwiftPartPower: {get_param: SwiftPartPower}
           SwiftPassword: {get_param: SwiftPassword}
           SwiftReplicas: { get_param: SwiftReplicas}
+          TimeZone: {get_param: TimeZone}
           VirtualIP: {get_attr: [VipMap, net_ip_map, ctlplane]} # deprecated. Use per service VIP settings instead now.
           PublicVirtualIP: {get_attr: [VipMap, net_ip_map, external]}
           ServiceNetMap: {get_param: ServiceNetMap}
@@ -949,6 +962,7 @@ resources:
           KeystonePublicApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
           NeutronApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
           NovaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+          SaharaApiVirtualIP: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
           UpdateIdentifier: {get_param: UpdateIdentifier}
           Hostname:
             str_replace:
@@ -1002,6 +1016,7 @@ resources:
           NeutronServicePlugins: {get_param: NeutronServicePlugins}
           NeutronTypeDrivers: {get_param: NeutronTypeDrivers}
           NeutronMechanismDrivers: {get_param: NeutronMechanismDrivers}
+          NeutronAgentExtensions: {get_param: NeutronAgentExtensions}
           # L3 HA and Failover is not relevant for Computes, should be removed
           NeutronAllowL3AgentFailover: {get_param: NeutronAllowL3AgentFailover}
           NeutronL3HA: {get_param: NeutronL3HA}
@@ -1024,6 +1039,7 @@ resources:
           SnmpdReadonlyUserName: {get_param: SnmpdReadonlyUserName}
           SnmpdReadonlyUserPassword: {get_param: SnmpdReadonlyUserPassword}
           ServiceNetMap: {get_param: ServiceNetMap}
+          TimeZone: {get_param: TimeZone}
           EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
           UpdateIdentifier: {get_param: UpdateIdentifier}
           Hostname:
@@ -1059,6 +1075,7 @@ resources:
           RabbitUserName: {get_param: RabbitUserName}
           RabbitClientUseSSL: {get_param: RabbitClientUseSSL}
           RabbitClientPort: {get_param: RabbitClientPort}
+          TimeZone: {get_param: TimeZone}
           NtpServer: {get_param: NtpServer}
           UpdateIdentifier: {get_param: UpdateIdentifier}
           Hostname:
@@ -1092,6 +1109,7 @@ resources:
           PartPower: {get_param: SwiftPartPower}
           Image: {get_param: SwiftStorageImage}
           Replicas: { get_param: SwiftReplicas}
+          TimeZone: {get_param: TimeZone}
           NtpServer: {get_param: NtpServer}
           UpdateIdentifier: {get_param: UpdateIdentifier}
           ServiceNetMap: {get_param: ServiceNetMap}
@@ -1120,6 +1138,7 @@ resources:
           Flavor: {get_param: OvercloudCephStorageFlavor}
           NtpServer: {get_param: NtpServer}
           ServiceNetMap: {get_param: ServiceNetMap}
+          TimeZone: {get_param: TimeZone}
           UpdateIdentifier: {get_param: UpdateIdentifier}
           Hostname:
             str_replace:
@@ -1141,6 +1160,7 @@ resources:
       StorageIpList: {get_attr: [Controller, storage_ip_address]}
       StorageMgmtIpList: {get_attr: [Controller, storage_mgmt_ip_address]}
       TenantIpList: {get_attr: [Controller, tenant_ip_address]}
+      ManagementIpList: {get_attr: [Controller, management_ip_address]}
 
   allNodesConfig:
     type: OS::TripleO::AllNodes::SoftwareConfig
@@ -1169,6 +1189,9 @@ resources:
       neutron_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, NeutronApiNetwork]}]}
       keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}]}
       keystone_admin_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}]}
+      sahara_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
+      DeployIdentifier: {get_param: DeployIdentifier}
+      UpdateIdentifier: {get_param: UpdateIdentifier}
 
   MysqlRootPassword:
     type: OS::Heat::RandomString
@@ -1201,17 +1224,18 @@ resources:
 
   RedisVirtualIP:
     depends_on: Networks
-    type: OS::TripleO::Controller::Ports::RedisVipPort
+    type: OS::TripleO::Network::Ports::RedisVipPort
     properties:
       ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
       ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
       PortName: redis_virtual_ip
       NetworkName: {get_param: [ServiceNetMap, RedisNetwork]}
+      ServiceName: redis
 
   # The public VIP is on the External net, falls back to ctlplane
   PublicVirtualIP:
     depends_on: Networks
-    type: OS::TripleO::Controller::Ports::ExternalPort
+    type: OS::TripleO::Network::Ports::ExternalVipPort
     properties:
       ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
       ControlPlaneNetwork: {get_param: NeutronControlPlaneID}
@@ -1220,21 +1244,21 @@ resources:
 
   InternalApiVirtualIP:
     depends_on: Networks
-    type: OS::TripleO::Controller::Ports::InternalApiPort
+    type: OS::TripleO::Network::Ports::InternalApiVipPort
     properties:
       ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
       PortName: internal_api_virtual_ip
 
   StorageVirtualIP:
     depends_on: Networks
-    type: OS::TripleO::Controller::Ports::StoragePort
+    type: OS::TripleO::Network::Ports::StorageVipPort
     properties:
       ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
       PortName: storage_virtual_ip
 
   StorageMgmtVirtualIP:
     depends_on: Networks
-    type: OS::TripleO::Controller::Ports::StorageMgmtPort
+    type: OS::TripleO::Network::Ports::StorageMgmtVipPort
     properties:
       ControlPlaneIP: {get_attr: [ControlVirtualIP, fixed_ips, 0, ip_address]}
       PortName: storage_management_virtual_ip
@@ -1247,7 +1271,7 @@ resources:
       InternalApiIp: {get_attr: [InternalApiVirtualIP, ip_address]}
       StorageIp: {get_attr: [StorageVirtualIP, ip_address]}
       StorageMgmtIp: {get_attr: [StorageMgmtVirtualIP, ip_address]}
-      # No tenant VIP required
+      # No tenant or management VIP required
 
   VipConfig:
     type: OS::TripleO::VipConfig
@@ -1255,6 +1279,7 @@ resources:
   VipDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: VipDeployment
       config: {get_resource: VipConfig}
       servers: {get_attr: [Controller, attributes, nova_server_resource]}
       input_values:
@@ -1278,6 +1303,7 @@ resources:
         control_virtual_ip: {get_attr: [VipMap, net_ip_map, ctlplane]}
         public_virtual_ip: {get_attr: [VipMap, net_ip_map, external]}
         internal_api_virtual_ip: {get_attr: [VipMap, net_ip_map, internal_api]}
+        sahara_api_vip: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
         storage_virtual_ip: {get_attr: [VipMap, net_ip_map, storage]}
         storage_mgmt_virtual_ip: {get_attr: [VipMap, net_ip_map, storage_mgmt]}
 
@@ -1290,18 +1316,21 @@ resources:
   ControllerBootstrapNodeDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ControllerBootstrapNodeDeployment
       config: {get_attr: [ControllerBootstrapNodeConfig, config_id]}
       servers: {get_attr: [Controller, attributes, nova_server_resource]}
 
   ControllerSwiftDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ControllerSwiftDeployment
       config: {get_attr: [SwiftDevicesAndProxyConfig, config_id]}
       servers: {get_attr: [Controller, attributes, nova_server_resource]}
 
   ObjectStorageSwiftDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ObjectStorageSwiftDeployment
       config: {get_attr: [SwiftDevicesAndProxyConfig, config_id]}
       servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
 
@@ -1315,18 +1344,21 @@ resources:
   ComputeCephDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ComputeCephDeployment
       config: {get_attr: [CephClusterConfig, config_id]}
       servers: {get_attr: [Compute, attributes, nova_server_resource]}
 
   ControllerCephDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ControllerCephDeployment
       config: {get_attr: [CephClusterConfig, config_id]}
       servers: {get_attr: [Controller, attributes, nova_server_resource]}
 
   CephStorageCephDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: CephStorageCephDeployment
       config: {get_attr: [CephClusterConfig, config_id]}
       servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
 
@@ -1361,36 +1393,42 @@ resources:
   ControllerClusterDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ControllerClusterDeployment
       config: {get_resource: ControllerClusterConfig}
       servers: {get_attr: [Controller, attributes, nova_server_resource]}
 
   ControllerAllNodesDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ControllerAllNodesDeployment
       config: {get_attr: [allNodesConfig, config_id]}
       servers: {get_attr: [Controller, attributes, nova_server_resource]}
 
   ComputeAllNodesDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ComputeAllNodesDeployment
       config: {get_attr: [allNodesConfig, config_id]}
       servers: {get_attr: [Compute, attributes, nova_server_resource]}
 
   BlockStorageAllNodesDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: BlockStorageAllNodesDeployment
       config: {get_attr: [allNodesConfig, config_id]}
       servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
 
   ObjectStorageAllNodesDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ObjectStorageAllNodesDeployment
       config: {get_attr: [allNodesConfig, config_id]}
       servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
 
   CephStorageAllNodesDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: CephStorageAllNodesDeployment
       config: {get_attr: [allNodesConfig, config_id]}
       servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
 
@@ -1411,6 +1449,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: ControllerAllNodesDeployment
     properties:
+      name: ControllerAllNodesValidationDeployment
       config: {get_resource: AllNodesValidationConfig}
       servers: {get_attr: [Controller, attributes, nova_server_resource]}
 
@@ -1418,6 +1457,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: ComputeAllNodesDeployment
     properties:
+      name: ComputeAllNodesValidationDeployment
       config: {get_resource: AllNodesValidationConfig}
       servers: {get_attr: [Compute, attributes, nova_server_resource]}
 
@@ -1425,6 +1465,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: BlockStorageAllNodesDeployment
     properties:
+      name: BlockStorageAllNodesValidationDeployment
       config: {get_resource: AllNodesValidationConfig}
       servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
 
@@ -1432,6 +1473,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: ObjectStorageAllNodesDeployment
     properties:
+      name: ObjectStorageAllNodesValidationDeployment
       config: {get_resource: AllNodesValidationConfig}
       servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
 
@@ -1439,6 +1481,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: CephStorageAllNodesDeployment
     properties:
+      name: CephStorageAllNodesValidationDeployment
       config: {get_resource: AllNodesValidationConfig}
       servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
 
@@ -1535,12 +1578,15 @@ outputs:
   NovaInternalVip:
     description: VIP for Nova API internal endpoint
     value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, NovaApiNetwork]}]}
+  SaharaInternalVip:
+    description: VIP for Sahara API internal endpoint
+    value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
   SwiftInternalVip:
     description: VIP for Swift Proxy internal endpoint
     value: {get_attr: [VipMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
   HostsEntry:
     description: |
-      The content that should be appended to your /etc/hosts if you want do get
+      The content that should be appended to your /etc/hosts if you want to get
       hostname-based access to the deployed nodes (useful for testing without
       setting up a DNS).
     value: {get_attr: [allNodesConfig, hosts_entries]}
index 3908afe..3dd3d5c 100644 (file)
@@ -50,6 +50,19 @@ parameters:
     type: comma_delimited_list
   keystone_admin_api_node_ips:
     type: comma_delimited_list
+  sahara_api_node_ips:
+    type: comma_delimited_list
+
+  DeployIdentifier:
+    type: string
+    description: >
+      Setting this to a unique value will re-run any deployment tasks which
+      perform configuration on a Heat stack-update.
+  UpdateIdentifier:
+    type: string
+    description: >
+      Setting to a previously unused value during stack-update will trigger
+      package update on all nodes
 
 resources:
 
@@ -230,6 +243,14 @@ resources:
                         list_join:
                         - "','"
                         - {get_param: keystone_admin_api_node_ips}
+                sahara_api_node_ips:
+                  str_replace:
+                    template: "['SERVERS_LIST']"
+                    params:
+                      SERVERS_LIST:
+                        list_join:
+                        - "','"
+                        - {get_param: sahara_api_node_ips}
 
                 # NOTE(gfidente): interpolation with %{} in the
                 # hieradata file can't be used as it returns string
@@ -239,6 +260,10 @@ resources:
                 neutron::rabbit_hosts: *rabbit_nodes_array
                 nova::rabbit_hosts: *rabbit_nodes_array
                 keystone::rabbit_hosts: *rabbit_nodes_array
+                sahara::rabbit_hosts: *rabbit_nodes_array
+
+                deploy_identifier: {get_param: DeployIdentifier}
+                update_identifier: {get_param: UpdateIdentifier}
 
 outputs:
   config_id:
@@ -247,7 +272,7 @@ outputs:
       {get_resource: allNodesConfigImpl}
   hosts_entries:
     description: |
-      The content that should be appended to your /etc/hosts if you want do get
+      The content that should be appended to your /etc/hosts if you want to get
       hostname-based access to the deployed nodes (useful for testing without
       setting up a DNS).
     value: {get_attr: [allNodesConfigImpl, config, hosts]}
index 0f7dd36..f9c5346 100644 (file)
@@ -30,6 +30,7 @@ resources:
   CephStorageDeployment_Step1:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: CephStorageDeployment_Step1
       servers:  {get_param: servers}
       config: {get_resource: CephStoragePuppetConfig}
       input_values:
index b6a1007..e310e1f 100644 (file)
@@ -34,6 +34,10 @@ parameters:
     description: Mapping of service_name -> network name. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  TimeZone:
+    default: 'UTC'
+    description: The timezone to be set on Ceph nodes.
+    type: string
   UpdateIdentifier:
     default: ''
     type: string
@@ -59,6 +63,13 @@ parameters:
     description: >
       Heat action when to apply network configuration changes
     default: ['CREATE']
+  SoftwareConfigTransport:
+    default: POLL_SERVER_CFN
+    description: |
+      How the server should receive the metadata required for software configuration.
+    type: string
+    constraints:
+    - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
   CloudDomain:
     default: ''
     type: string
@@ -89,6 +100,7 @@ resources:
       user_data_format: SOFTWARE_CONFIG
       user_data: {get_resource: UserData}
       name: {get_param: Hostname}
+      software_config_transport: {get_param: SoftwareConfigTransport}
       metadata: {get_param: ServerMetadata}
       scheduler_hints: {get_param: SchedulerHints}
 
@@ -112,6 +124,16 @@ resources:
   NodeUserData:
     type: OS::TripleO::NodeUserData
 
+  ExternalPort:
+    type: OS::TripleO::CephStorage::Ports::ExternalPort
+    properties:
+      ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
+  InternalApiPort:
+    type: OS::TripleO::CephStorage::Ports::InternalApiPort
+    properties:
+      ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
   StoragePort:
     type: OS::TripleO::CephStorage::Ports::StoragePort
     properties:
@@ -122,30 +144,53 @@ resources:
     properties:
       ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
 
+  TenantPort:
+    type: OS::TripleO::CephStorage::Ports::TenantPort
+    properties:
+      ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
+  ManagementPort:
+    type: OS::TripleO::CephStorage::Ports::ManagementPort
+    properties:
+      ControlPlaneIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+
   NetworkConfig:
     type: OS::TripleO::CephStorage::Net::SoftwareConfig
     properties:
       ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+      InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
       StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
       StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+      TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
 
   NetIpMap:
     type: OS::TripleO::Network::Ports::NetIpMap
     properties:
       ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      ExternalIp: {get_attr: [ExternalPort, ip_address]}
+      InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
       StorageIp: {get_attr: [StoragePort, ip_address]}
       StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+      TenantIp: {get_attr: [TenantPort, ip_address]}
+      ManagementIp: {get_attr: [ManagementPort, ip_address]}
 
   NetIpSubnetMap:
     type: OS::TripleO::Network::Ports::NetIpSubnetMap
     properties:
       ControlPlaneIp: {get_attr: [CephStorage, networks, ctlplane, 0]}
+      ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+      InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
       StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
       StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+      TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
     properties:
+      name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: CephStorage}
       actions: {get_param: NetworkDeploymentActions}
@@ -154,10 +199,12 @@ resources:
     type: OS::Heat::StructuredDeployment
     depends_on: NetworkDeployment
     properties:
+      name: CephStorageDeployment
       config: {get_resource: CephStorageConfig}
       server: {get_resource: CephStorage}
       input_values:
         ntp_servers: {get_param: NtpServer}
+        timezone: {get_param: TimeZone}
         enable_package_install: {get_param: EnablePackageInstall}
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
         ceph_cluster_network: {get_attr: [NetIpSubnetMap, net_ip_subnet_map, {get_param: [ServiceNetMap, CephClusterNetwork]}]}
@@ -189,6 +236,7 @@ resources:
               raw_data: {get_file: hieradata/ceph.yaml}
               mapped_data:
                 ntp::servers: {get_input: ntp_servers}
+                timezone::timezone: {get_input: timezone}
                 tripleo::packages::enable_install: {get_input: enable_package_install}
                 tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
                 ceph::profile::params::cluster_network: {get_input: ceph_cluster_network}
@@ -241,12 +289,24 @@ outputs:
     description: Heat resource handle for the ceph storage server
     value:
       {get_resource: CephStorage}
+  external_ip_address:
+    description: IP address of the server in the external network
+    value: {get_attr: [ExternalPort, ip_address]}
+  internal_api_ip_address:
+    description: IP address of the server in the internal_api network
+    value: {get_attr: [InternalApiPort, ip_address]}
   storage_ip_address:
     description: IP address of the server in the storage network
     value: {get_attr: [StoragePort, ip_address]}
   storage_mgmt_ip_address:
     description: IP address of the server in the storage_mgmt network
     value: {get_attr: [StorageMgmtPort, ip_address]}
+  tenant_ip_address:
+    description: IP address of the server in the tenant network
+    value: {get_attr: [TenantPort, ip_address]}
+  management_ip_address:
+    description: IP address of the server in the management network
+    value: {get_attr: [ManagementPort, ip_address]}
   config_identifier:
     description: identifier which changes if the node configuration may need re-applying
     value:
index c97cfcf..9b7c752 100644 (file)
@@ -28,5 +28,14 @@ resources:
   VolumeDeployment_Step1:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: VolumeDeployment_Step1
       servers:  {get_param: servers}
       config: {get_resource: VolumePuppetConfig}
+
+  # Note, this should come last, so use depends_on to ensure
+  # this is created after any other resources.
+  ExtraConfig:
+    depends_on: VolumeDeployment_Step1
+    type: OS::TripleO::NodeExtraConfigPost
+    properties:
+        servers: {get_param: servers}
index fc19705..f7e8f90 100644 (file)
@@ -17,7 +17,6 @@ parameters:
     description: The size of the loopback file used by the cinder LVM driver.
     type: number
   CinderPassword:
-    default: unset
     description: The password for the cinder service and db account, used by cinder-api.
     type: string
     hidden: true
@@ -70,7 +69,6 @@ parameters:
     description: The user name for SNMPd with readonly rights running on all Overcloud nodes
     type: string
   SnmpdReadonlyUserPassword:
-    default: unset
     description: The user password for SNMPd with readonly rights running on all Overcloud nodes
     type: string
     hidden: true
@@ -101,6 +99,10 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  TimeZone:
+    default: 'UTC'
+    description: The timezone to be set on Cinder nodes.
+    type: string
   GlanceApiVirtualIP:
     type: string
     default: ''
@@ -112,6 +114,13 @@ parameters:
     description: >
       Heat action when to apply network configuration changes
     default: ['CREATE']
+  SoftwareConfigTransport:
+    default: POLL_SERVER_CFN
+    description: |
+      How the server should receive the metadata required for software configuration.
+    type: string
+    constraints:
+    - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
   CloudDomain:
     default: ''
     type: string
@@ -143,6 +152,7 @@ resources:
       user_data_format: SOFTWARE_CONFIG
       user_data: {get_resource: UserData}
       name: {get_param: Hostname}
+      software_config_transport: {get_param: SoftwareConfigTransport}
       metadata: {get_param: ServerMetadata}
       scheduler_hints: {get_param: SchedulerHints}
 
@@ -166,6 +176,11 @@ resources:
   NodeUserData:
     type: OS::TripleO::NodeUserData
 
+  ExternalPort:
+    type: OS::TripleO::BlockStorage::Ports::ExternalPort
+    properties:
+      ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
   InternalApiPort:
     type: OS::TripleO::BlockStorage::Ports::InternalApiPort
     properties:
@@ -181,25 +196,42 @@ resources:
     properties:
       ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
 
+  TenantPort:
+    type: OS::TripleO::BlockStorage::Ports::TenantPort
+    properties:
+      ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
+  ManagementPort:
+    type: OS::TripleO::BlockStorage::Ports::ManagementPort
+    properties:
+      ControlPlaneIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+
   NetworkConfig:
     type: OS::TripleO::BlockStorage::Net::SoftwareConfig
     properties:
       ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+      ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
       InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
       StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
       StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+      TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
 
   NetIpMap:
     type: OS::TripleO::Network::Ports::NetIpMap
     properties:
       ControlPlaneIp: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+      ExternalIp: {get_attr: [ExternalPort, ip_address]}
       InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
       StorageIp: {get_attr: [StoragePort, ip_address]}
       StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+      TenantIp: {get_attr: [TenantPort, ip_address]}
+      ManagementIp: {get_attr: [ManagementPort, ip_address]}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
     properties:
+      name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: BlockStorage}
       actions: {get_param: NetworkDeploymentActions}
@@ -208,11 +240,12 @@ resources:
     type: OS::Heat::StructuredDeployment
     depends_on: NetworkDeployment
     properties:
+      name: BlockStorageDeployment
       server: {get_resource: BlockStorage}
       config: {get_resource: BlockStorageConfig}
       input_values:
         debug: {get_param: Debug}
-        cinder_dsn: {list_join: ['', ['mysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]}
+        cinder_dsn: {list_join: ['', ['mysql+pymysql://cinder:', {get_param: CinderPassword}, '@', {get_param: MysqlVirtualIP} , '/cinder']]}
         snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
         snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
         cinder_lvm_loop_device_size:
@@ -229,6 +262,7 @@ resources:
         rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
         rabbit_client_port: {get_param: RabbitClientPort}
         ntp_servers: {get_param: NtpServer}
+        timezone: {get_param: TimeZone}
         enable_package_install: {get_param: EnablePackageInstall}
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
 
@@ -271,6 +305,7 @@ resources:
                 cinder_iscsi_ip_address: {get_input: cinder_iscsi_ip_address}
                 cinder::glance::glance_api_servers: {get_input: glance_api_servers}
                 ntp::servers: {get_input: ntp_servers}
+                timezone::timezone: {get_input: timezone}
                 tripleo::packages::enable_install: {get_input: enable_package_install}
                 tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
                 snmpd_readonly_user_name: {get_input: snmpd_readonly_user_name}
@@ -297,6 +332,7 @@ resources:
   UpdateDeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: UpdateDeployment
       config: {get_resource: UpdateConfig}
       server: {get_resource: BlockStorage}
       input_values:
@@ -316,6 +352,9 @@ outputs:
     description: Heat resource handle for the block storage server
     value:
       {get_resource: BlockStorage}
+  external_ip_address:
+    description: IP address of the server in the external network
+    value: {get_attr: [ExternalPort, ip_address]}
   internal_api_ip_address:
     description: IP address of the server in the internal_api network
     value: {get_attr: [InternalApiPort, ip_address]}
@@ -325,6 +364,12 @@ outputs:
   storage_mgmt_ip_address:
     description: IP address of the server in the storage_mgmt network
     value: {get_attr: [StorageMgmtPort, ip_address]}
+  tenant_ip_address:
+    description: IP address of the server in the tenant network
+    value: {get_attr: [TenantPort, ip_address]}
+  management_ip_address:
+    description: IP address of the server in the management network
+    value: {get_attr: [ManagementPort, ip_address]}
   config_identifier:
     description: identifier which changes if the node configuration may need re-applying
     value:
index b63b06b..3861e50 100644 (file)
@@ -31,6 +31,7 @@ resources:
   ComputePuppetDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: ComputePuppetDeployment
       servers:  {get_param: servers}
       config: {get_resource: ComputePuppetConfig}
       input_values:
index 247c032..a0af231 100644 (file)
@@ -1,11 +1,10 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
 
 description: >
   OpenStack hypervisor node configured via Puppet.
 
 parameters:
   AdminPassword:
-    default: unset
     description: The password for the keystone admin account, used for monitoring, querying neutron etc.
     type: string
     hidden: true
@@ -16,12 +15,10 @@ parameters:
     constraints:
     - allowed_values: ['', Present]
   CeilometerMeteringSecret:
-    default: unset
     description: Secret shared by the ceilometer services.
     type: string
     hidden: true
   CeilometerPassword:
-    default: unset
     description: The password for the ceilometer service account.
     type: string
     hidden: true
@@ -80,7 +77,7 @@ parameters:
       to create provider networks (and we use this for the default floating
       network) - if changing this either use different post-install network
       scripts or be sure to keep 'datacentre' as a mapping network name.
-    type: string
+    type: comma_delimited_list
     default: "datacentre:br-ex"
   NeutronEnableTunnelling:
     type: string
@@ -91,7 +88,7 @@ parameters:
         Enable/disable the L2 population feature in the Neutron agents.
     default: "False"
   NeutronFlatNetworks:
-    type: string
+    type: comma_delimited_list
     default: 'datacentre'
     description: >
       If set, flat networks to configure in neutron plugins.
@@ -99,18 +96,17 @@ parameters:
     type: string
     default: ''  # Has to be here because of the ignored empty value bug
   NeutronNetworkType:
-    type: string
-    description: The tenant network type for Neutron, either gre or vxlan.
+    type: comma_delimited_list
+    description: The tenant network type for Neutron.
     default: 'vxlan'
   NeutronNetworkVLANRanges:
-    default: 'datacentre'
+    default: 'datacentre:1:1000'
     description: >
       The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
       Neutron documentation for permitted values. Defaults to permitting any
       VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
     type: comma_delimited_list
   NeutronPassword:
-    default: unset
     description: The password for the neutron service account, used by neutron agents.
     type: string
     hidden: true
@@ -123,10 +119,9 @@ parameters:
     description: A port to add to the NeutronPhysicalBridge.
     type: string
   NeutronTunnelTypes:
-    type: string
+    type: comma_delimited_list
     description: |
-        The tunnel types for the Neutron tenant network. To specify multiple
-        values, use a comma separated string, like so: 'gre,vxlan'
+        The tunnel types for the Neutron tenant network.
     default: 'vxlan'
   NeutronTunnelIdRanges:
     description: |
@@ -147,7 +142,6 @@ parameters:
     default: 'False'
     type: string
   NeutronMetadataProxySharedSecret:
-    default: 'unset'
     description: Shared secret to prevent spoofing
     type: string
     hidden: true
@@ -158,7 +152,7 @@ parameters:
         from neutron.core_plugins namespace.
     type: string
   NeutronServicePlugins:
-    default: "router"
+    default: "router,qos"
     description: |
         Comma-separated list of service plugin entrypoints to be loaded from the
         neutron.service_plugins namespace.
@@ -171,9 +165,13 @@ parameters:
   NeutronMechanismDrivers:
     default: 'openvswitch'
     description: |
-        The mechanism drivers for the Neutron tenant network. To specify multiple
-        values, use a comma separated string, like so: 'openvswitch,l2_population'
-    type: string
+        The mechanism drivers for the Neutron tenant network.
+    type: comma_delimited_list
+  NeutronAgentExtensions:
+    default: "qos"
+    description: |
+        Comma-separated list of extensions enabled for the Neutron agents.
+    type: comma_delimited_list
   # Not relevant for Computes, should be removed
   NeutronAllowL3AgentFailover:
     default: 'True'
@@ -202,7 +200,7 @@ parameters:
     type: json
   NovaComputeLibvirtType:
     type: string
-    default: ''
+    default: kvm
   NovaComputeLibvirtVifDriver:
     default: ''
     description: Libvirt VIF driver configuration for the network
@@ -212,7 +210,6 @@ parameters:
     description: Whether to enable or not the Rbd backend for Nova
     type: boolean
   NovaPassword:
-    default: unset
     description: The password for the nova service account, used by nova-api.
     type: string
     hidden: true
@@ -258,7 +255,6 @@ parameters:
     description: The user name for SNMPd with readonly rights running on all Overcloud nodes
     type: string
   SnmpdReadonlyUserPassword:
-    default: unset
     description: The user password for SNMPd with readonly rights running on all Overcloud nodes
     type: string
     hidden: true
@@ -276,6 +272,10 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  TimeZone:
+    default: 'UTC'
+    description: The timezone to be set on compute nodes.
+    type: string
   UpdateIdentifier:
     default: ''
     type: string
@@ -290,6 +290,13 @@ parameters:
     description: >
       Heat action when to apply network configuration changes
     default: ['CREATE']
+  SoftwareConfigTransport:
+    default: POLL_SERVER_CFN
+    description: |
+      How the server should receive the metadata required for software configuration.
+    type: string
+    constraints:
+    - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
   CloudDomain:
     default: ''
     type: string
@@ -323,6 +330,7 @@ resources:
       user_data_format: SOFTWARE_CONFIG
       user_data: {get_resource: UserData}
       name: {get_param: Hostname}
+      software_config_transport: {get_param: SoftwareConfigTransport}
       metadata: {get_param: ServerMetadata}
       scheduler_hints: {get_param: SchedulerHints}
 
@@ -346,6 +354,11 @@ resources:
   NodeUserData:
     type: OS::TripleO::NodeUserData
 
+  ExternalPort:
+    type: OS::TripleO::Compute::Ports::ExternalPort
+    properties:
+      ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
   InternalApiPort:
     type: OS::TripleO::Compute::Ports::InternalApiPort
     properties:
@@ -356,30 +369,47 @@ resources:
     properties:
       ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
 
+  StorageMgmtPort:
+    type: OS::TripleO::Compute::Ports::StorageMgmtPort
+    properties:
+      ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
   TenantPort:
     type: OS::TripleO::Compute::Ports::TenantPort
     properties:
       ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
 
+  ManagementPort:
+    type: OS::TripleO::Compute::Ports::ManagementPort
+    properties:
+      ControlPlaneIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+
   NetIpMap:
     type: OS::TripleO::Network::Ports::NetIpMap
     properties:
       ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+      ExternalIp: {get_attr: [ExternalPort, ip_address]}
       InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
       StorageIp: {get_attr: [StoragePort, ip_address]}
+      StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
       TenantIp: {get_attr: [TenantPort, ip_address]}
+      ManagementIp: {get_attr: [ManagementPort, ip_address]}
 
   NetworkConfig:
     type: OS::TripleO::Compute::Net::SoftwareConfig
     properties:
       ControlPlaneIp: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+      ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
       InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
       StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
+      StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
       TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
     properties:
+      name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: NovaCompute}
       actions: {get_param: NetworkDeploymentActions}
@@ -406,6 +436,7 @@ resources:
             - common
             - cisco_n1kv_data  # Optionally provided by ComputeExtraConfigPre
             - nova_nuage_data  # Optionally provided by ComputeExtraConfigPre
+            - midonet_data # Optionally provided by AllNodesExtraConfig
           datafiles:
             compute_extraconfig:
               mapped_data: {get_param: NovaComputeExtraConfig}
@@ -452,16 +483,17 @@ resources:
                 neutron::rabbit_user: {get_input: rabbit_username}
                 neutron::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
                 neutron::rabbit_port: {get_input: rabbit_client_port}
-                neutron_flat_networks: {get_input: neutron_flat_networks}
+                neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
                 neutron_host: {get_input: neutron_host}
                 neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
 
-                neutron_tenant_network_type: {get_input: neutron_tenant_network_type}
-                neutron_tunnel_types: {get_input: neutron_tunnel_types}
+                neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
+                neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
+                neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions}
                 neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
                 neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
                 neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
-                neutron_bridge_mappings: {get_input: neutron_bridge_mappings}
+                neutron::agents::ml2::ovs::bridge_mappings: {get_input: neutron_bridge_mappings}
                 neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
                 neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
                 neutron_physical_bridge: {get_input: neutron_physical_bridge}
@@ -475,11 +507,12 @@ resources:
                 neutron::core_plugin: {get_input: neutron_core_plugin}
                 neutron::service_plugins: {get_input: neutron_service_plugins}
                 neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
-                neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers}
+                neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
                 neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
                 keystone_public_api_virtual_ip: {get_input: keystone_vip}
                 admin_password: {get_input: admin_password}
                 ntp::servers: {get_input: ntp_servers}
+                timezone::timezone: {get_input: timezone}
                 tripleo::packages::enable_install: {get_input: enable_package_install}
                 tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
 
@@ -487,6 +520,7 @@ resources:
     type: OS::TripleO::SoftwareDeployment
     depends_on: NetworkDeployment
     properties:
+      name: NovaComputeDeployment
       config: {get_resource: NovaComputeConfig}
       server: {get_resource: NovaCompute}
       input_values:
@@ -510,36 +544,43 @@ resources:
         snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
         snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
         glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
-        neutron_flat_networks: {get_param: NeutronFlatNetworks}
+        neutron_flat_networks:
+          str_replace:
+            template: NETWORKS
+            params:
+              NETWORKS: {get_param: NeutronFlatNetworks}
         neutron_host: {get_param: NeutronHost}
         neutron_local_ip: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, NeutronTenantNetwork]}]}
-        neutron_tenant_network_type: {get_param: NeutronNetworkType}
-        neutron_tunnel_types: {get_param: NeutronTunnelTypes}
         neutron_tunnel_id_ranges:
           str_replace:
-            template: "['RANGES']"
+            template: RANGES
             params:
-              RANGES:
-                list_join:
-                - "','"
-                - {get_param: NeutronTunnelIdRanges}
+              RANGES: {get_param: NeutronTunnelIdRanges}
         neutron_vni_ranges:
           str_replace:
-            template: "['RANGES']"
+            template: RANGES
             params:
-              RANGES:
-                list_join:
-                - "','"
-                - {get_param: NeutronVniRanges}
+              RANGES: {get_param: NeutronVniRanges}
+        neutron_tenant_network_types:
+          str_replace:
+            template: TYPES
+            params:
+              TYPES: {get_param: NeutronNetworkType}
+        neutron_tunnel_types:
+          str_replace:
+            template: TYPES
+            params:
+              TYPES: {get_param: NeutronTunnelTypes}
         neutron_network_vlan_ranges:
           str_replace:
-            template: "['RANGES']"
+            template: RANGES
+            params:
+              RANGES: {get_param: NeutronNetworkVLANRanges}
+        neutron_bridge_mappings:
+          str_replace:
+            template: MAPPINGS
             params:
-              RANGES:
-                list_join:
-                - "','"
-                - {get_param: NeutronNetworkVLANRanges}
-        neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
+              MAPPINGS: {get_param: NeutronBridgeMappings}
         neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
         neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
         neutron_physical_bridge: {get_param: NeutronPhysicalBridge}
@@ -551,21 +592,24 @@ resources:
         neutron_core_plugin: {get_param: NeutronCorePlugin}
         neutron_service_plugins:
           str_replace:
-            template: "['PLUGINS']"
+            template: PLUGINS
             params:
-              PLUGINS:
-                list_join:
-                - "','"
-                - {get_param: NeutronServicePlugins}
+              PLUGINS: {get_param: NeutronServicePlugins}
         neutron_type_drivers:
           str_replace:
-            template: "['DRIVERS']"
+            template: DRIVERS
+            params:
+              DRIVERS: {get_param: NeutronTypeDrivers}
+        neutron_mechanism_drivers:
+          str_replace:
+            template: MECHANISMS
+            params:
+              MECHANISMS: {get_param: NeutronMechanismDrivers}
+        neutron_agent_extensions:
+          str_replace:
+            template: AGENT_EXTENSIONS
             params:
-              DRIVERS:
-                list_join:
-                - "','"
-                - {get_param: NeutronTypeDrivers}
-        neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
+              AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
         neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
         neutron_internal_url: {get_param: [EndpointMap, NeutronInternal, uri]}
         neutron_admin_auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri]}
@@ -576,6 +620,7 @@ resources:
         rabbit_client_use_ssl: {get_param: RabbitClientUseSSL}
         rabbit_client_port: {get_param: RabbitClientPort}
         ntp_servers: {get_param: NtpServer}
+        timezone: {get_param: TimeZone}
         enable_package_install: {get_param: EnablePackageInstall}
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
 
@@ -607,6 +652,7 @@ resources:
   UpdateDeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: UpdateDeployment
       config: {get_resource: UpdateConfig}
       server: {get_resource: NovaCompute}
       input_values:
@@ -617,15 +663,24 @@ outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
     value: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+  external_ip_address:
+    description: IP address of the server in the external network
+    value: {get_attr: [ExternalPort, ip_address]}
   internal_api_ip_address:
     description: IP address of the server in the internal_api network
     value: {get_attr: [InternalApiPort, ip_address]}
   storage_ip_address:
     description: IP address of the server in the storage network
     value: {get_attr: [StoragePort, ip_address]}
+  storage_mgmt_ip_address:
+    description: IP address of the server in the storage_mgmt network
+    value: {get_attr: [StorageMgmtPort, ip_address]}
   tenant_ip_address:
     description: IP address of the server in the tenant network
     value: {get_attr: [TenantPort, ip_address]}
+  management_ip_address:
+    description: IP address of the server in the management network
+    value: {get_attr: [ManagementPort, ip_address]}
   hostname:
     description: Hostname of the server
     value: {get_attr: [NovaCompute, name]}
index 941e1ac..d250dd7 100644 (file)
@@ -17,6 +17,13 @@ parameters:
 
 resources:
 
+  ControllerPrePuppet:
+    type: OS::TripleO::Tasks::ControllerPrePuppet
+    properties:
+      servers:  {get_param: servers}
+      input_values:
+        update_identifier: {get_param: NodeConfigIdentifiers}
+
   ControllerPuppetConfig:
     type: OS::TripleO::ControllerConfig
 
@@ -26,7 +33,9 @@ resources:
   # e.g all Deployment resources should have a *Deployment_StepN suffix
   ControllerLoadBalancerDeployment_Step1:
     type: OS::Heat::StructuredDeployments
+    depends_on: ControllerPrePuppet
     properties:
+      name: ControllerLoadBalancerDeployment_Step1
       servers:  {get_param: servers}
       config: {get_resource: ControllerPuppetConfig}
       input_values:
@@ -38,6 +47,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: ControllerLoadBalancerDeployment_Step1
     properties:
+      name: ControllerServicesBaseDeployment_Step2
       servers:  {get_param: servers}
       config: {get_resource: ControllerPuppetConfig}
       input_values:
@@ -63,6 +73,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: ControllerServicesBaseDeployment_Step2
     properties:
+      name: ControllerRingbuilderDeployment_Step3
       servers:  {get_param: servers}
       config: {get_resource: ControllerRingbuilderPuppetConfig}
       input_values:
@@ -72,6 +83,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: ControllerRingbuilderDeployment_Step3
     properties:
+      name: ControllerOvercloudServicesDeployment_Step4
       servers:  {get_param: servers}
       config: {get_resource: ControllerPuppetConfig}
       input_values:
@@ -82,6 +94,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: ControllerOvercloudServicesDeployment_Step4
     properties:
+      name: ControllerOvercloudServicesDeployment_Step5
       servers:  {get_param: servers}
       config: {get_resource: ControllerPuppetConfig}
       input_values:
@@ -92,16 +105,25 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: ControllerOvercloudServicesDeployment_Step5
     properties:
+      name: ControllerOvercloudServicesDeployment_Step6
       servers:  {get_param: servers}
       config: {get_resource: ControllerPuppetConfig}
       input_values:
         step: 5
         update_identifier: {get_param: NodeConfigIdentifiers}
 
+  ControllerPostPuppet:
+    type: OS::TripleO::Tasks::ControllerPostPuppet
+    depends_on: ControllerOvercloudServicesDeployment_Step6
+    properties:
+      servers:  {get_param: servers}
+      input_values:
+        update_identifier: {get_param: NodeConfigIdentifiers}
+
   # Note, this should come last, so use depends_on to ensure
   # this is created after any other resources.
   ExtraConfig:
-    depends_on: ControllerOvercloudServicesDeployment_Step5
+    depends_on: ControllerPostPuppet
     type: OS::TripleO::NodeExtraConfigPost
     properties:
         servers: {get_param: servers}
index 5d39462..d039475 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2015-10-15
 
 description: >
   OpenStack controller node configured by Puppet.
@@ -10,12 +10,10 @@ parameters:
     type: string
     hidden: true
   AdminPassword:
-    default: unset
     description: The password for the keystone admin account, used for monitoring, querying neutron etc.
     type: string
     hidden: true
   AdminToken:
-    default: unset
     description: The keystone auth secret and db password.
     type: string
     hidden: true
@@ -27,18 +25,25 @@ parameters:
     description: The ceilometer backend type.
     type: string
   CeilometerMeteringSecret:
-    default: unset
     description: Secret shared by the ceilometer services.
     type: string
     hidden: true
   CeilometerPassword:
-    default: unset
     description: The password for the ceilometer service  and db account.
     type: string
     hidden: true
   CinderApiVirtualIP:
     type: string
     default: ''
+  CeilometerWorkers:
+    default: 0
+    description: Number of workers for Ceilometer service.
+    type: number
+  CinderEnableDBPurge:
+    default: true
+    description: |
+      Whether to create cron job for purging soft deleted rows in Cinder database.
+    type: boolean
   CinderEnableNfsBackend:
     default: false
     description: Whether to enable or not the NFS backend for Cinder
@@ -72,7 +77,6 @@ parameters:
       CinderEnableNfsBackend is true.
     type: comma_delimited_list
   CinderPassword:
-    default: unset
     description: The password for the cinder service and db account, used by cinder-api.
     type: string
     hidden: true
@@ -81,15 +85,24 @@ parameters:
     description: Contains parameters to configure Cinder backends. Typically
                  set via parameter_defaults in the resource registry.
     type: json
-  CloudName:
-    default: ''
-    description: The DNS name of this cloud. E.g. ci-overcloud.tripleo.org
-    type: string
+  CinderWorkers:
+    default: 0
+    description: Number of workers for Cinder service.
+    type: number
   ControllerExtraConfig:
     default: {}
     description: |
       Controller specific hiera configuration data to inject into the cluster.
     type: json
+  ControllerIPs:
+    default: {}
+    description: >
+      A network mapped list of IPs to assign to Controllers in the following form:
+      {
+        "internal_api": ["a.b.c.d", "e.f.g.h"],
+        ...
+      }
+    type: json
   ControlVirtualInterface:
     default: 'br-ex'
     description: Interface where virtual ip will be assigned.
@@ -170,7 +183,6 @@ parameters:
     type: string
     default: ''
   GlancePassword:
-    default: unset
     description: The password for the glance service and db account, used by the glance services.
     type: string
     hidden: true
@@ -209,15 +221,17 @@ parameters:
     default: /dev/log
     description: Syslog address where HAproxy will send its log
     type: string
+  GlanceWorkers:
+    default: 0
+    description: Number of workers for Glance service.
+    type: number
   HeatPassword:
-    default: unset
     description: The password for the Heat service and db account, used by the Heat services.
     type: string
     hidden: true
   HeatStackDomainAdminPassword:
     description: Password for heat_domain_admin user.
     type: string
-    default: ''
     hidden: true
   HeatAuthEncryptionKey:
     description: Auth encryption key for heat-engine
@@ -227,6 +241,10 @@ parameters:
     default: '*'
     description: A list of IP/Hostname allowed to connect to horizon
     type: comma_delimited_list
+  HeatWorkers:
+    default: 0
+    description: Number of workers for Heat service.
+    type: number
   HorizonSecret:
     description: Secret key for Django
     type: string
@@ -254,6 +272,11 @@ parameters:
     default: ''
     description: Keystone self-signed certificate authority certificate.
     type: string
+  KeystoneEnableDBPurge:
+    default: true
+    description: |
+        Whether to create cron job for purging soft deleted rows in Keystone database.
+    type: boolean
   KeystoneSigningCertificate:
     default: ''
     description: Keystone certificate for verifying token validity.
@@ -294,6 +317,18 @@ parameters:
     default: false
     description: Whether IPtables rules should be purged before setting up the new ones.
     type: boolean
+  KeystoneWorkers:
+    default: 0
+    description: Number of workers for Keystone service.
+    type: number
+  SaharaApiVirtualIP:
+    type: string
+    default: ''
+  SaharaPassword:
+    default: unset
+    description: The password for the sahara service account, used by sahara-api.
+    type: string
+    hidden: true
   MysqlClusterUniquePart:
     description: A unique identifier of the MySQL cluster the controller is in.
     type: string
@@ -328,7 +363,7 @@ parameters:
       to create provider networks (and we use this for the default floating
       network) - if changing this either use different post-install network
       scripts or be sure to keep 'datacentre' as a mapping network name.
-    type: string
+    type: comma_delimited_list
     default: "datacentre:br-ex"
   NeutronDnsmasqOptions:
     default: 'dhcp-option-force=26,1400'
@@ -367,7 +402,6 @@ parameters:
     description: Whether to configure Neutron Distributed Virtual Routers
     type: string
   NeutronMetadataProxySharedSecret:
-    default: 'unset'
     description: Shared secret to prevent spoofing
     type: string
     hidden: true
@@ -378,7 +412,7 @@ parameters:
         from neutron.core_plugins namespace.
     type: string
   NeutronServicePlugins:
-    default: "router"
+    default: "router,qos"
     description: |
         Comma-separated list of service plugin entrypoints to be loaded from the
         neutron.service_plugins namespace.
@@ -391,9 +425,8 @@ parameters:
   NeutronMechanismDrivers:
     default: 'openvswitch'
     description: |
-        The mechanism drivers for the Neutron tenant network. To specify multiple
-        values, use a comma separated string, like so: 'openvswitch,l2_population'
-    type: string
+        The mechanism drivers for the Neutron tenant network.
+    type: comma_delimited_list
   NeutronAllowL3AgentFailover:
     default: 'True'
     description: Allow automatic l3-agent failover
@@ -411,7 +444,7 @@ parameters:
         Enable/disable the L2 population feature in the Neutron agents.
     default: "False"
   NeutronFlatNetworks:
-    type: string
+    type: comma_delimited_list
     default: 'datacentre'
     description: If set, flat networks to configure in neutron plugins.
   NeutronL3HA:
@@ -420,17 +453,16 @@ parameters:
     type: string
   NeutronNetworkType:
     default: 'vxlan'
-    description: The tenant network type for Neutron, either gre or vxlan.
-    type: string
+    description: The tenant network type for Neutron.
+    type: comma_delimited_list
   NeutronNetworkVLANRanges:
-    default: 'datacentre'
+    default: 'datacentre:1:1000'
     description: >
       The Neutron ML2 and OpenVSwitch vlan mapping range to support. See the
       Neutron documentation for permitted values. Defaults to permitting any
       VLAN on the 'datacentre' physical network (See NeutronBridgeMappings).
     type: comma_delimited_list
   NeutronPassword:
-    default: unset
     description: The password for the neutron service and db account, used by neutron agents.
     type: string
     hidden: true
@@ -463,9 +495,8 @@ parameters:
   NeutronTunnelTypes:
     default: 'vxlan'
     description: |
-        The tunnel types for the Neutron tenant network. To specify multiple
-        values, use a comma separated string, like so: 'gre,vxlan'
-    type: string
+        The tunnel types for the Neutron tenant network.
+    type: comma_delimited_list
   NeutronTunnelIdRanges:
     description: |
         Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
@@ -478,14 +509,36 @@ parameters:
         of VXLAN VNI IDs that are available for tenant network allocation
     default: ["1:1000", ]
     type: comma_delimited_list
+  NeutronPluginExtensions:
+    default: "qos"
+    description: |
+        Comma-separated list of extensions enabled for the Neutron plugin.
+    type: comma_delimited_list
+  NeutronAgentExtensions:
+    default: "qos"
+    description: |
+        Comma-separated list of extensions enabled for the Neutron agents.
+    type: comma_delimited_list
   NovaApiVirtualIP:
     type: string
     default: ''
+  NeutronWorkers:
+    default: 0
+    description: Number of workers for Neutron service.
+    type: number
+  NovaEnableDBPurge:
+    default: true
+    description: |
+        Whether to create cron job for purging soft deleted rows in Nova database.
+    type: boolean
   NovaPassword:
-    default: unset
     description: The password for the nova service and db account, used by nova-api.
     type: string
     hidden: true
+  NovaWorkers:
+    default: 0
+    description: Number of workers for Nova service.
+    type: number
   MongoDbNoJournal:
     default: false
     description: Should MongoDb journaling be disabled
@@ -542,12 +595,10 @@ parameters:
     description: The user name for SNMPd with readonly rights running on all Overcloud nodes
     type: string
   SnmpdReadonlyUserPassword:
-    default: unset
     description: The user password for SNMPd with readonly rights running on all Overcloud nodes
     type: string
     hidden: true
   SwiftHashSuffix:
-    default: unset
     description: A random string to be used as a salt when hashing to determine mappings
       in the ring.
     hidden: true
@@ -565,7 +616,6 @@ parameters:
     description: Partition Power to use when building Swift rings
     type: number
   SwiftPassword:
-    default: unset
     description: The password for the swift service account, used by the swift proxy
       services.
     hidden: true
@@ -577,6 +627,14 @@ parameters:
     type: number
     default: 3
     description: How many replicas to use in the swift rings.
+  SwiftWorkers:
+    default: 0
+    description: Number of workers for Swift service.
+    type: number
+  TimeZone:
+    default: 'UTC'
+    description: The timezone to be set on controller nodes.
+    type: string
   VirtualIP: # DEPRECATED: use per service settings instead
     type: string
     default: ''  # Has to be here because of the ignored empty value bug
@@ -632,6 +690,13 @@ parameters:
   NodeIndex:
     type: number
     default: 0
+  SoftwareConfigTransport:
+    default: POLL_SERVER_CFN
+    description: |
+      How the server should receive the metadata required for software configuration.
+    type: string
+    constraints:
+    - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
   CloudDomain:
     default: ''
     type: string
@@ -663,6 +728,7 @@ resources:
       user_data_format: SOFTWARE_CONFIG
       user_data: {get_resource: UserData}
       name: {get_param: Hostname}
+      software_config_transport: {get_param: SoftwareConfigTransport}
       metadata: {get_param: ServerMetadata}
       scheduler_hints: {get_param: SchedulerHints}
 
@@ -689,25 +755,40 @@ resources:
   ExternalPort:
     type: OS::TripleO::Controller::Ports::ExternalPort
     properties:
+      IPPool: {get_param: ControllerIPs}
+      NodeIndex: {get_param: NodeIndex}
       ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
 
   InternalApiPort:
     type: OS::TripleO::Controller::Ports::InternalApiPort
     properties:
+      IPPool: {get_param: ControllerIPs}
+      NodeIndex: {get_param: NodeIndex}
       ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
 
   StoragePort:
     type: OS::TripleO::Controller::Ports::StoragePort
     properties:
+      IPPool: {get_param: ControllerIPs}
+      NodeIndex: {get_param: NodeIndex}
       ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
 
   StorageMgmtPort:
     type: OS::TripleO::Controller::Ports::StorageMgmtPort
     properties:
+      IPPool: {get_param: ControllerIPs}
+      NodeIndex: {get_param: NodeIndex}
       ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
 
   TenantPort:
     type: OS::TripleO::Controller::Ports::TenantPort
+    properties:
+      IPPool: {get_param: ControllerIPs}
+      NodeIndex: {get_param: NodeIndex}
+      ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
+
+  ManagementPort:
+    type: OS::TripleO::Controller::Ports::ManagementPort
     properties:
       ControlPlaneIP: {get_attr: [Controller, networks, ctlplane, 0]}
 
@@ -720,6 +801,7 @@ resources:
       StorageIp: {get_attr: [StoragePort, ip_address]}
       StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
       TenantIp: {get_attr: [TenantPort, ip_address]}
+      ManagementIp: {get_attr: [ManagementPort, ip_address]}
 
   NetIpSubnetMap:
     type: OS::TripleO::Network::Ports::NetIpSubnetMap
@@ -730,6 +812,7 @@ resources:
       StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
       StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
       TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
 
   NetworkConfig:
     type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -740,10 +823,12 @@ resources:
       StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
       StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
       TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
     properties:
+      name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: Controller}
       actions: {get_param: NetworkDeploymentActions}
@@ -766,14 +851,24 @@ resources:
       server: {get_resource: Controller}
       NodeIndex: {get_param: NodeIndex}
 
+
   ControllerDeployment:
     type: OS::TripleO::SoftwareDeployment
     depends_on: NetworkDeployment
     properties:
+      name: ControllerDeployment
       config: {get_resource: ControllerConfig}
       server: {get_resource: Controller}
       input_values:
         bootstack_nodeid: {get_attr: [Controller, name]}
+        ceilometer_workers: {get_param: CeilometerWorkers}
+        cinder_workers: {get_param: CinderWorkers}
+        glance_workers: {get_param: GlanceWorkers}
+        heat_workers: {get_param: HeatWorkers}
+        keystone_workers: {get_param: KeystoneWorkers}
+        nova_workers: {get_param: NovaWorkers}
+        neutron_workers: {get_param: NeutronWorkers}
+        swift_workers: {get_param: SwiftWorkers}
         neutron_enable_tunneling: {get_param: NeutronEnableTunnelling}
         neutron_enable_l2pop: {get_param: NeutronEnableL2Pop}
         neutron_enable_isolated_metadata: {get_param: NeutronEnableIsolatedMetadata}
@@ -804,17 +899,15 @@ resources:
         admin_token: {get_param: AdminToken}
         neutron_public_interface_ip: {get_param: NeutronPublicInterfaceIP}
         debug: {get_param: Debug}
+        cinder_enable_db_purge: {get_param: CinderEnableDBPurge}
         cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend}
         cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
         cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
         cinder_nfs_servers:
           str_replace:
-            template: "['SERVERS']"
+            template: SERVERS
             params:
-              SERVERS:
-                list_join:
-                - "','"
-                - {get_param: CinderNfsServers}
+              SERVERS: {get_param: CinderNfsServers}
         cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
         cinder_password: {get_param: CinderPassword}
         cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
@@ -823,7 +916,7 @@ resources:
         cinder_dsn:
           list_join:
             - ''
-            - - 'mysql://cinder:'
+            - - 'mysql+pymysql://cinder:'
               - {get_param: CinderPassword}
               - '@'
               - {get_param: MysqlVirtualIP}
@@ -840,7 +933,7 @@ resources:
         glance_dsn:
           list_join:
             - ''
-            - - 'mysql://glance:'
+            - - 'mysql+pymysql://glance:'
               - {get_param: GlancePassword}
               - '@'
               - {get_param: MysqlVirtualIP}
@@ -850,7 +943,7 @@ resources:
         heat_dsn:
           list_join:
             - ''
-            - - 'mysql://heat:'
+            - - 'mysql+pymysql://heat:'
               - {get_param: HeatPassword}
               - '@'
               - {get_param: MysqlVirtualIP}
@@ -862,10 +955,11 @@ resources:
         keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
         keystone_notification_driver: {get_param: KeystoneNotificationDriver}
         keystone_notification_format: {get_param: KeystoneNotificationFormat}
+        keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
         keystone_dsn:
           list_join:
             - ''
-            - - 'mysql://keystone:'
+            - - 'mysql+pymysql://keystone:'
               - {get_param: AdminToken}
               - '@'
               - {get_param: MysqlVirtualIP}
@@ -890,73 +984,88 @@ resources:
             template: tripleo-CLUSTER
             params:
               CLUSTER: {get_param: MysqlClusterUniquePart}
-        neutron_flat_networks: {get_param: NeutronFlatNetworks}
+        neutron_flat_networks:
+          str_replace:
+            template: NETWORKS
+            params:
+              NETWORKS: {get_param: NeutronFlatNetworks}
         neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
         neutron_agent_mode: {get_param: NeutronAgentMode}
         neutron_router_distributed: {get_param: NeutronDVR}
         neutron_core_plugin: {get_param: NeutronCorePlugin}
         neutron_service_plugins:
           str_replace:
-            template: "['PLUGINS']"
+            template: PLUGINS
             params:
-              PLUGINS:
-                list_join:
-                - "','"
-                - {get_param: NeutronServicePlugins}
+              PLUGINS: {get_param: NeutronServicePlugins}
         neutron_type_drivers:
           str_replace:
-            template: "['DRIVERS']"
+            template: DRIVERS
             params:
-              DRIVERS:
-                list_join:
-                - "','"
-                - {get_param: NeutronTypeDrivers}
+              DRIVERS: {get_param: NeutronTypeDrivers}
         neutron_enable_dhcp_agent: {get_param: NeutronEnableDHCPAgent}
         neutron_enable_l3_agent: {get_param: NeutronEnableL3Agent}
         neutron_enable_metadata_agent: {get_param: NeutronEnableMetadataAgent}
         neutron_enable_ovs_agent: {get_param: NeutronEnableOVSAgent}
-        neutron_mechanism_drivers: {get_param: NeutronMechanismDrivers}
+        neutron_mechanism_drivers:
+          str_replace:
+            template: MECHANISMS
+            params:
+              MECHANISMS: {get_param: NeutronMechanismDrivers}
         neutron_allow_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
         neutron_l3_ha: {get_param: NeutronL3HA}
         neutron_dhcp_agents_per_network: {get_param: NeutronDhcpAgentsPerNetwork}
         neutron_network_vlan_ranges:
           str_replace:
-            template: "['RANGES']"
+            template: RANGES
+            params:
+              RANGES: {get_param: NeutronNetworkVLANRanges}
+        neutron_bridge_mappings:
+          str_replace:
+            template: MAPPINGS
             params:
-              RANGES:
-                list_join:
-                - "','"
-                - {get_param: NeutronNetworkVLANRanges}
-        neutron_bridge_mappings: {get_param: NeutronBridgeMappings}
+              MAPPINGS: {get_param: NeutronBridgeMappings}
         neutron_external_network_bridge: {get_param: NeutronExternalNetworkBridge}
         neutron_public_interface: {get_param: NeutronPublicInterface}
         neutron_public_interface_raw_device: {get_param: NeutronPublicInterfaceRawDevice}
         neutron_public_interface_default_route: {get_param: NeutronPublicInterfaceDefaultRoute}
         neutron_public_interface_tag: {get_param: NeutronPublicInterfaceTag}
-        neutron_tenant_network_type: {get_param: NeutronNetworkType}
-        neutron_tunnel_types: {get_param: NeutronTunnelTypes}
         neutron_tunnel_id_ranges:
           str_replace:
-            template: "['RANGES']"
+            template: RANGES
             params:
-              RANGES:
-                list_join:
-                - "','"
-                - {get_param: NeutronTunnelIdRanges}
+              RANGES: {get_param: NeutronTunnelIdRanges}
         neutron_vni_ranges:
           str_replace:
-            template: "['RANGES']"
+            template: RANGES
+            params:
+              RANGES: {get_param: NeutronVniRanges}
+        neutron_tenant_network_types:
+          str_replace:
+            template: TYPES
             params:
-              RANGES:
-                list_join:
-                - "','"
-                - {get_param: NeutronVniRanges}
+              TYPES: {get_param: NeutronNetworkType}
+        neutron_tunnel_types:
+          str_replace:
+            template: TYPES
+            params:
+              TYPES: {get_param: NeutronTunnelTypes}
+        neutron_plugin_extensions:
+          str_replace:
+            template: PLUGIN_EXTENSIONS
+            params:
+              PLUGIN_EXTENSIONS: {get_param: NeutronPluginExtensions}
+        neutron_agent_extensions:
+          str_replace:
+            template: AGENT_EXTENSIONS
+            params:
+              AGENT_EXTENSIONS: {get_param: NeutronAgentExtensions}
         neutron_password: {get_param: NeutronPassword}
         neutron_dnsmasq_options: {get_param: NeutronDnsmasqOptions}
         neutron_dsn:
           list_join:
             - ''
-            - - 'mysql://neutron:'
+            - - 'mysql+pymysql://neutron:'
               - {get_param: NeutronPassword}
               - '@'
               - {get_param: MysqlVirtualIP}
@@ -964,7 +1073,7 @@ resources:
         neutron_internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
         neutron_public_url: { get_param: [ EndpointMap, NeutronPublic, uri ] }
         neutron_admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
-        neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri ] }
+        neutron_admin_auth_url: { get_param: [ EndpointMap, KeystoneAdmin, uri_no_suffix ] }
         nova_internal_url: { get_param: [ EndpointMap, NovaInternal, uri ] }
         ceilometer_backend: {get_param: CeilometerBackend}
         ceilometer_metering_secret: {get_param: CeilometerMeteringSecret}
@@ -978,18 +1087,19 @@ resources:
         ceilometer_dsn:
           list_join:
             - ''
-            - - 'mysql://ceilometer:'
+            - - 'mysql+pymysql://ceilometer:'
               - {get_param: CeilometerPassword}
               - '@'
               - {get_param: MysqlVirtualIP}
               - '/ceilometer'
         snmpd_readonly_user_name: {get_param: SnmpdReadonlyUserName}
         snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
+        nova_enable_db_purge: {get_param: NovaEnableDBPurge}
         nova_password: {get_param: NovaPassword}
         nova_dsn:
           list_join:
             - ''
-            - - 'mysql://nova:'
+            - - 'mysql+pymysql://nova:'
               - {get_param: NovaPassword}
               - '@'
               - {get_param: MysqlVirtualIP}
@@ -1012,6 +1122,7 @@ resources:
             params:
               LIMIT: {get_param: RabbitFDLimit}
         ntp_servers: {get_param: NtpServer}
+        timezone: {get_param: TimeZone}
         control_virtual_interface: {get_param: ControlVirtualInterface}
         public_virtual_interface: {get_param: PublicVirtualInterface}
         swift_hash_suffix: {get_param: SwiftHashSuffix}
@@ -1022,6 +1133,15 @@ resources:
         swift_mount_check: {get_param: SwiftMountCheck}
         enable_package_install: {get_param: EnablePackageInstall}
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+        sahara_password: {get_param: SaharaPassword}
+        sahara_dsn:
+          list_join:
+            - ''
+            - - 'mysql://sahara:'
+              - {get_param: SaharaPassword}
+              - '@'
+              - {get_param: MysqlVirtualIP}
+              - '/sahara'
         swift_proxy_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftProxyNetwork]}]}
         swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
         cinder_iscsi_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CinderIscsiNetwork]}]}
@@ -1044,6 +1164,7 @@ resources:
         rabbitmq_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RabbitMqNetwork]}]}
         redis_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, RedisNetwork]}]}
         redis_vip: {get_param: RedisVirtualIP}
+        sahara_api_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SaharaApiNetwork]}]}
         memcached_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MemcachedNetwork]}]}
         mysql_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, MysqlNetwork]}]}
         mysql_virtual_ip: {get_param: MysqlVirtualIP}
@@ -1074,11 +1195,14 @@ resources:
             - vip_data # provided by vip-config
             - '"%{::osfamily}"'
             - common
+            - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre
             - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
+            - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre
             - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
             - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
             - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
             - neutron_nuage_data # Optionally provided by ControllerExtraConfigPre
+            - midonet_data #Optionally provided by AllNodesExtraConfig
           datafiles:
             controller_extraconfig:
               mapped_data: {get_param: ControllerExtraConfig}
@@ -1114,6 +1238,7 @@ resources:
                 swift::storage::all::storage_local_net_ip: {get_input: swift_management_network}
                 swift::swift_hash_suffix: {get_input: swift_hash_suffix}
                 swift::proxy::authtoken::admin_password: {get_input: swift_password}
+                swift::proxy::workers: {get_input: swift_workers}
                 tripleo::ringbuilder::part_power: {get_input: swift_part_power}
                 tripleo::ringbuilder::replicas: {get_input: swift_replicas}
                 tripleo::ringbuilder::min_part_hours: {get_input: swift_min_part_hours}
@@ -1124,6 +1249,7 @@ resources:
                 tripleo::ringbuilder::build_ring: True
 
                 # Cinder
+                cinder_enable_db_purge: {get_input: cinder_enable_db_purge}
                 cinder_enable_nfs_backend: {get_input: cinder_enable_nfs_backend}
                 cinder_enable_rbd_backend: {get_input: cinder_enable_rbd_backend}
                 cinder_nfs_mount_options: {get_input: cinder_nfs_mount_options}
@@ -1154,6 +1280,7 @@ resources:
                 glance::api::registry_host: {get_input: glance_registry_host}
                 glance::api::keystone_password: {get_input: glance_password}
                 glance::api::debug: {get_input: debug}
+                glance::api::workers: {get_input: glance_workers}
                 glance_notifier_strategy: {get_input: glance_notifier_strategy}
                 glance_log_file: {get_input: glance_log_file}
                 glance_log_file: {get_input: glance_log_file}
@@ -1165,6 +1292,7 @@ resources:
                 glance::registry::identity_uri: {get_input: keystone_identity_uri}
                 glance::registry::debug: {get_input: debug}
                 glance::backend::swift::swift_store_auth_address: {get_input: keystone_auth_uri}
+                glance::registry::workers: {get_input: glance_workers}
                 glance::backend::swift::swift_store_user: service:glance
                 glance::backend::swift::swift_store_key: {get_input: glance_password}
                 glance_backend: {get_input: glance_backend}
@@ -1189,8 +1317,11 @@ resources:
                 heat::identity_uri: {get_input: keystone_identity_uri}
                 heat::keystone_password: {get_input: heat_password}
                 heat::api::bind_host: {get_input: heat_api_network}
+                heat::api::workers: {get_input: heat_workers}
                 heat::api_cloudwatch::bind_host: {get_input: heat_api_network}
+                heat::api_cloudwatch::workers: {get_input: heat_workers}
                 heat::api_cfn::bind_host: {get_input: heat_api_network}
+                heat::api_cfn::workers: {get_input: heat_workers}
                 heat::database_connection: {get_input: heat_dsn}
                 heat::debug: {get_input: debug}
                 heat::db::mysql::password: {get_input: heat_password}
@@ -1219,6 +1350,10 @@ resources:
                 keystone::endpoint::internal_url: {get_input: keystone_internal_url}
                 keystone::endpoint::admin_url: {get_input: keystone_identity_uri}
                 keystone::endpoint::region: {get_input: keystone_region}
+                keystone::admin_workers: {get_input: keystone_workers}
+                keystone::public_workers: {get_input: keystone_workers}
+                keystone_enable_db_purge: {get_input: keystone_enable_db_purge}
+
                 # MongoDB
                 mongodb::server::bind_ip: {get_input: mongo_db_network}
                 mongodb::server::nojournal: {get_input: mongodb_no_journal}
@@ -1244,14 +1379,16 @@ resources:
                 neutron::server::auth_uri: {get_input: keystone_auth_uri}
                 neutron::server::identity_uri: {get_input: keystone_identity_uri}
                 neutron::server::database_connection: {get_input: neutron_dsn}
+                neutron::server::api_workers: {get_input: neutron_workers}
                 neutron::agents::l3::external_network_bridge: {get_input: neutron_external_network_bridge}
                 neutron::agents::ml2::ovs::enable_tunneling: {get_input: neutron_enable_tunneling}
                 neutron::agents::ml2::ovs::l2_population: {get_input: neutron_enable_l2pop}
                 neutron::agents::dhcp::enable_isolated_metadata: {get_input: neutron_enable_isolated_metadata}
                 neutron::agents::ml2::ovs::local_ip: {get_input: neutron_local_ip}
-                neutron_flat_networks: {get_input: neutron_flat_networks}
+                neutron::plugins::ml2::flat_networks: {get_input: neutron_flat_networks}
                 neutron::agents::metadata::shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
                 neutron::agents::metadata::metadata_ip: {get_input: neutron_api_network}
+                neutron::agents::metadata::metadata_workers: {get_input: neutron_workers}
                 neutron_agent_mode: {get_input: neutron_agent_mode}
                 neutron_router_distributed: {get_input: neutron_router_distributed}
                 neutron::core_plugin: {get_input: neutron_core_plugin}
@@ -1261,20 +1398,22 @@ resources:
                 neutron::enable_metadata_agent: {get_input: neutron_enable_metadata_agent}
                 neutron::enable_ovs_agent: {get_input: neutron_enable_ovs_agent}
                 neutron::plugins::ml2::type_drivers: {get_input: neutron_type_drivers}
-                neutron_mechanism_drivers: {get_input: neutron_mechanism_drivers}
+                neutron::plugins::ml2::mechanism_drivers: {get_input: neutron_mechanism_drivers}
+                neutron::plugins::ml2::extension_drivers: {get_input: neutron_plugin_extensions}
                 neutron::server::allow_automatic_l3agent_failover: {get_input: neutron_allow_l3agent_failover}
                 neutron::server::l3_ha: {get_input: neutron_l3_ha}
                 neutron::dhcp_agents_per_network: {get_input: neutron_dhcp_agents_per_network}
                 neutron::plugins::ml2::network_vlan_ranges: {get_input: neutron_network_vlan_ranges}
                 neutron::plugins::ml2::tunnel_id_ranges: {get_input: neutron_tunnel_id_ranges}
                 neutron::plugins::ml2::vni_ranges: {get_input: neutron_vni_ranges}
-                neutron_bridge_mappings: {get_input: neutron_bridge_mappings}
+                neutron::agents::ml2::ovs:bridge_mappings: {get_input: neutron_bridge_mappings}
                 neutron_public_interface: {get_input: neutron_public_interface}
                 neutron_public_interface_raw_device: {get_input: neutron_public_interface_raw_device}
                 neutron_public_interface_default_route: {get_input: neutron_public_interface_default_route}
                 neutron_public_interface_tag: {get_input: neutron_public_interface_tag}
-                neutron_tenant_network_type: {get_input: neutron_tenant_network_type}
-                neutron_tunnel_types: {get_input: neutron_tunnel_types}
+                neutron::plugins::ml2::tenant_network_types: {get_input: neutron_tenant_network_types}
+                neutron::agents::ml2::ovs::tunnel_types: {get_input: neutron_tunnel_types}
+                neutron::agents::ml2::ovs::extensions: {get_input: neutron_agent_extensions}
                 neutron::server::auth_password: {get_input: neutron_password}
                 neutron::agents::metadata::auth_password: {get_input: neutron_password}
                 neutron_dnsmasq_options: {get_input: neutron_dnsmasq_options}
@@ -1289,6 +1428,7 @@ resources:
                 neutron::server::notifications::nova_url: {get_input: nova_internal_url}
                 neutron::server::notifications::auth_url: {get_input: neutron_admin_auth_url}
                 neutron::server::notifications::tenant_name: 'service'
+                neutron::server::notifications::project_name: 'service'
                 neutron::server::notifications::password: {get_input: nova_password}
 
                 # Ceilometer
@@ -1322,6 +1462,9 @@ resources:
                 nova::api::api_bind_address: {get_input: nova_api_network}
                 nova::api::metadata_listen: {get_input: nova_metadata_network}
                 nova::api::admin_password: {get_input: nova_password}
+                nova::api::osapi_compute_workers: {get_input: nova_workers}
+                nova::api::ec2_workers: {get_input: nova_workers}
+                nova::api::metadata_workers: {get_input: nova_workers}
                 nova::database_connection: {get_input: nova_dsn}
                 nova::glance_api_servers: {get_input: glance_api_servers}
                 nova::api::neutron_metadata_proxy_shared_secret: {get_input: neutron_metadata_proxy_shared_secret}
@@ -1331,6 +1474,7 @@ resources:
                 nova::network::neutron::neutron_admin_auth_url: {get_input: neutron_admin_auth_url}
                 nova::vncproxy::host: {get_input: nova_api_network}
                 nova::db::mysql::password: {get_input: nova_password}
+                nova_enable_db_purge: {get_input: nova_enable_db_purge}
 
                 # Horizon
                 apache::ip: {get_input: horizon_network}
@@ -1340,6 +1484,29 @@ resources:
                 horizon::bind_address: {get_input: horizon_network}
                 horizon::keystone_url: {get_input: keystone_auth_uri}
 
+                # Sahara
+                sahara::host: {get_input: sahara_api_network}
+                sahara::plugins:
+                  - cdh
+                  - hdp
+                  - mapr
+                  - vanilla
+                  - spark
+                  - storm
+                sahara::admin_password: {get_input: sahara_password}
+                sahara::auth_uri: {get_input: keystone_auth_uri}
+                sahara::admin_user: sahara
+                sahara::identity_uri: {get_input: keystone_identity_uri}
+                sahara::use_neutron: true
+                sahara::database_connection: {get_input: sahara_dsn}
+                sahara::debug: {get_input: debug}
+                sahara::rpc_backend: rabbit
+                sahara::rabbit_userid: {get_input: rabbit_username}
+                sahara::rabbit_password: {get_input: rabbit_password}
+                sahara::rabbit_use_ssl: {get_input: rabbit_client_use_ssl}
+                sahara::rabbit_port: {get_input: rabbit_client_port}
+                sahara::db::mysql::password: {get_input: sahara_password}
+
                 # Rabbit
                 rabbitmq::node_ip_address: {get_input: rabbitmq_network}
                 rabbitmq::erlang_cookie: {get_input: rabbit_cookie}
@@ -1356,17 +1523,13 @@ resources:
                 memcached::listen_ip: {get_input: memcached_network}
                 neutron_public_interface_ip: {get_input: neutron_public_interface_ip}
                 ntp::servers: {get_input: ntp_servers}
+                timezone::timezone: {get_input: timezone}
                 control_virtual_interface: {get_input: control_virtual_interface}
                 public_virtual_interface: {get_input: public_virtual_interface}
                 tripleo::loadbalancer::control_virtual_interface: {get_input: control_virtual_interface}
                 tripleo::loadbalancer::public_virtual_interface: {get_input: public_virtual_interface}
                 tripleo::loadbalancer::haproxy_log_address: {get_input: haproxy_log_address}
-                # NOTE(jaosorior): The service certificate configuration for
-                # HAProxy was left commented because to properly use this, we
-                # need to be able to set up the keystone endpoints. And
-                # currently that is not possible, but is being addressed by
-                # other commits.  A subsequent commit will uncomment this.
-                #tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
+                tripleo::loadbalancer::service_certificate: {get_attr: [NodeTLSData, deployed_ssl_certificate_path]}
                 tripleo::packages::enable_install: {get_input: enable_package_install}
                 tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
 
@@ -1391,6 +1554,7 @@ resources:
   UpdateDeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: UpdateDeployment
       config: {get_resource: UpdateConfig}
       server: {get_resource: Controller}
       input_values:
@@ -1416,6 +1580,9 @@ outputs:
   tenant_ip_address:
     description: IP address of the server in the tenant network
     value: {get_attr: [TenantPort, ip_address]}
+  management_ip_address:
+    description: IP address of the server in the management network
+    value: {get_attr: [ManagementPort, ip_address]}
   hostname:
     description: Hostname of the server
     value: {get_attr: [Controller, name]}
@@ -1431,12 +1598,11 @@ outputs:
       Server's IP address and hostname in the /etc/hosts format
     value:
       str_replace:
-        template: IP HOST.DOMAIN HOST CLOUDNAME
+        template: IP HOST.DOMAIN HOST
         params:
           IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
           DOMAIN: {get_param: CloudDomain}
           HOST: {get_attr: [Controller, name]}
-          CLOUDNAME: {get_param: CloudName}
   nova_server_resource:
     description: Heat resource handle for the Nova compute server
     value:
diff --git a/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml b/puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
new file mode 100644 (file)
index 0000000..26ce713
--- /dev/null
@@ -0,0 +1,119 @@
+heat_template_version: 2015-10-15
+
+description: Configure hieradata for all MidoNet nodes
+
+parameters:
+  # Parameters passed from the parent template
+  controller_servers:
+    type: json
+  compute_servers:
+    type: json
+  blockstorage_servers:
+    type: json
+  objectstorage_servers:
+    type: json
+  cephstorage_servers:
+    type: json
+
+  EnableZookeeperOnController:
+    label: Enable Zookeeper On Controller
+    description: 'Whether enable Zookeeper cluster on Controller'
+    type: boolean
+    default: false
+  EnableCassandraOnController:
+    label: Enable Cassandra On Controller
+    description: 'Whether enable Cassandra cluster on Controller'
+    type: boolean
+    default: false
+  CassandraStoragePort:
+    label: Cassandra Storage Port
+    description: 'The Cassandra port for inter-node communication'
+    type: string
+    default: '7000'
+  CassandraSslStoragePort:
+    label: Cassandra SSL Storage Port
+    description: 'The SSL port for encrypted communication. Unused unless enabled in encryption_options'
+    type: string
+    default: '7001'
+  CassandraClientPort:
+    label: Cassandra Client Port
+    description: 'Native Transport Port'
+    type: string
+    default: '9042'
+  CassandraClientPortThrift:
+    label: Cassandra Client Thrift Port
+    description: 'The port for the Thrift RPC service, which is used for client connections'
+    type: string
+    default: '9160'
+  TunnelZoneName:
+    label: Name of the Tunnelzone
+    description: 'Name of the tunnel zone used to tunnel packages'
+    type: string
+    default: 'tunnelzone_tripleo'
+  TunnelZoneType:
+    label: Type of the Tunnel
+    description: 'Type of the tunnels on the overlay. Choose between `gre` and `vxlan`'
+    type: string
+    default: 'vxlan'
+
+resources:
+
+  NetworkMidoNetConfig:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: os-apply-config
+      config:
+        hiera:
+          datafiles:
+            midonet_data:
+              mapped_data:
+                enable_zookeeper_on_controller: {get_param: EnableZookeeperOnController}
+                enable_cassandra_on_controller: {get_param: EnableCassandraOnController}
+                midonet_tunnelzone_name: {get_param: TunnelZoneName}
+                midonet_tunnelzone_type: {get_param: TunnelZoneType}
+                midonet_libvirt_qemu_data: |
+                    user = "root"
+                    group = "root"
+                    cgroup_device_acl = [
+                        "/dev/null", "/dev/full", "/dev/zero",
+                        "/dev/random", "/dev/urandom",
+                        "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+                        "/dev/rtc","/dev/hpet", "/dev/vfio/vfio",
+                        "/dev/net/tun"
+                    ]
+                tripleo::cluster::cassandra::storage_port: {get_param: CassandraStoragePort}
+                tripleo::cluster::cassandra::ssl_storage_port: {get_param: CassandraSslStoragePort}
+                tripleo::cluster::cassandra::client_port: {get_param: CassandraClientPort}
+                tripleo::cluster::cassandra::client_port_thrift: {get_param: CassandraClientPortThrift}
+                tripleo::loadbalancer::midonet_api: true
+                # Missed Neutron Puppet data
+                neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.MidonetInterfaceDriver'
+                neutron::agents::dhcp::dhcp_driver: 'midonet.neutron.agent.midonet_driver.DhcpNoOpDriver'
+                neutron::plugins::midonet::midonet_api_port: 8081
+                neutron::params::midonet_server_package: 'python-networking-midonet'
+
+                # Make sure the l3 agent does not run
+                l3_agent_service: false
+                neutron::agents::l3::manage_service: false
+                neutron::agents::l3::enabled: false
+
+
+  NetworkMidonetDeploymentControllers:
+    type: OS::Heat::StructuredDeploymentGroup
+    properties:
+      config: {get_resource: NetworkMidoNetConfig}
+      servers: {get_param: controller_servers}
+
+  NetworkMidonetDeploymentComputes:
+    type: OS::Heat::StructuredDeploymentGroup
+    properties:
+      config: {get_resource: NetworkMidoNetConfig}
+      servers: {get_param: compute_servers}
+
+outputs:
+  config_identifier:
+    value:
+      list_join:
+        - ' '
+        - - {get_attr: [NetworkMidonetDeploymentControllers, deploy_stdouts]}
+          - {get_attr: [NetworkMidonetDeploymentComputes, deploy_stdouts]}
index 2413f5a..655fd0f 100644 (file)
@@ -131,6 +131,7 @@ resources:
   NetworkCiscoDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: NetworkCiscoDeployment
       config: {get_resource: NetworkCiscoConfig}
       servers:  {get_param: controller_servers}
       input_values:
@@ -178,6 +179,7 @@ resources:
   CollectMacDeploymentsController:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsController
       servers:  {get_param: controller_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -185,6 +187,7 @@ resources:
   CollectMacDeploymentsCompute:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsCompute
       servers:  {get_param: compute_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -192,6 +195,7 @@ resources:
   CollectMacDeploymentsBlockStorage:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsBlockStorage
       servers:  {get_param: blockstorage_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -199,6 +203,7 @@ resources:
   CollectMacDeploymentsObjectStorage:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsObjectStorage
       servers:  {get_param: objectstorage_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -206,6 +211,7 @@ resources:
   CollectMacDeploymentsCephStorage:
     type: OS::Heat::SoftwareDeployments
     properties:
+      name: CollectMacDeploymentsCephStorage
       servers:  {get_param: cephstorage_servers}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
@@ -280,6 +286,7 @@ resources:
   MappingToNexusDeploymentsController:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: MappingToNexusDeploymentsController
       server:  {get_param: [controller_servers, '0']}
       config: {get_resource: MappingToNexusConfig}
       input_values:
@@ -323,6 +330,7 @@ resources:
     type: OS::Heat::SoftwareDeployment
     depends_on: MappingToNexusDeploymentsController
     properties:
+      name: MappingToUCSMDeploymentsController
       server:  {get_param: [controller_servers, '0']}
       config: {get_resource: MappingToUCSMConfig}
       input_values:
index 96368e3..5561c74 100644 (file)
@@ -70,6 +70,7 @@ resources:
   NovaNuageDeployment:
     type: OS::Heat::StructuredDeployment
     properties:
+      name: NovaNuageDeployment
       config: {get_resource: NovaNuageConfig}
       server: {get_param: server}
       input_values:
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-dellsc.yaml
new file mode 100644 (file)
index 0000000..905f196
--- /dev/null
@@ -0,0 +1,87 @@
+heat_template_version: 2015-11-12
+
+description: Configure hieradata for Cinder Dell Storage Center configuration
+
+parameters:
+  server:
+    description: ID of the controller node to apply this config to
+    type: string
+
+  # Config specific parameters, to be provided via parameter_defaults
+  CinderEnableDellScBackend:
+    type: boolean
+    default: true
+  CinderDellScBackendName:
+    type: string
+    default: 'tripleo_dellsc'
+  CinderDellScSanIp:
+    type: string
+  CinderDellScSanLogin:
+    type: string
+    default: 'Admin'
+  CinderDellScSanPassword:
+    type: string
+    hidden: true
+  CinderDellScSsn:
+    type: string
+    default: '64702'
+  CinderDellScIscsiIpAddress:
+    type: string
+    default: ''
+  CinderDellScIscsiPort:
+    type: string
+    default: '3260'
+  CinderDellScApiPort:
+    type: string
+    default: '3033'
+  CinderDellScServerFolder:
+    type: string
+    default: 'dellsc_server'
+  CinderDellScVolumeFolder:
+    type: string
+    default: 'dellsc_volume'
+
+resources:
+  CinderDellScConfig:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: os-apply-config
+      config:
+        hiera:
+          datafiles:
+            cinder_dellsc_data:
+              mapped_data:
+                cinder_enable_dellsc_backend: {get_input: EnableDellScBackend}
+                cinder::backend::dellsc_iscsi::volume_backend_name: {get_input: DellScBackendName}
+                cinder::backend::dellsc_iscsi::san_ip: {get_input: DellScSanIp}
+                cinder::backend::dellsc_iscsi::san_login: {get_input: DellScSanLogin}
+                cinder::backend::dellsc_iscsi::san_password: {get_input: DellScSanPassword}
+                cinder::backend::dellsc_iscsi::dell_sc_ssn: {get_input: DellScSsn}
+                cinder::backend::dellsc_iscsi::iscsi_ip_address: {get_input: DellScIscsiIpAddress}
+                cinder::backend::dellsc_iscsi::iscsi_port: {get_input: DellScIscsiPort}
+                cinder::backend::dellsc_iscsi::dell_sc_api_port: {get_input: DellScApiPort}
+                cinder::backend::dellsc_iscsi::dell_sc_server_folder: {get_input: DellScServerFolder}
+                cinder::backend::dellsc_iscsi::dell_sc_volume_folder: {get_input: DellScVolumeFolder}
+
+  CinderDellScDeployment:
+    type: OS::Heat::StructuredDeployment
+    properties:
+      config: {get_resource: CinderDellScConfig}
+      server: {get_param: server}
+      input_values:
+        EnableDellScBackend: {get_param: CinderEnableDellScBackend}
+        DellScBackendName: {get_param: CinderDellScBackendName}
+        DellScSanIp: {get_param: CinderDellScSanIp}
+        DellScSanLogin: {get_param: CinderDellScSanLogin}
+        DellScSanPassword: {get_param: CinderDellScSanPassword}
+        DellScSsn: {get_param: CinderDellScSsn}
+        DellScIscsiIpAddress: {get_param: CinderDellScIscsiIpAddress}
+        DellScIscsiPort: {get_param: CinderDellScIscsiPort}
+        DellScApiPort: {get_param: CinderDellScApiPort}
+        DellScServerFolder: {get_param: CinderDellScServerFolder}
+        DellScVolumeFolder: {get_param: CinderDellScVolumeFolder}
+
+outputs:
+  deploy_stdout:
+    description: Deployment reference, used to trigger puppet apply on changes
+    value: {get_attr: [CinderDellScDeployment, deploy_stdout]}
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
new file mode 100644 (file)
index 0000000..c73608f
--- /dev/null
@@ -0,0 +1,86 @@
+heat_template_version: 2015-11-06
+
+description: Configure hieradata for Cinder Eqlx configuration
+
+parameters:
+  server:
+    description: ID of the controller node to apply this config to
+    type: string
+
+  # Config specific parameters, to be provided via parameter_defaults
+  CinderEnableEqlxBackend:
+    type: boolean
+    default: true
+  CinderEqlxBackendName:
+    type: string
+    default: 'tripleo_eqlx'
+  CinderEqlxSanIp:
+    type: string
+  CinderEqlxSanLogin:
+    type: string
+  CinderEqlxSanPassword:
+    type: string
+    hidden: true
+  CinderEqlxSanThinProvision:
+    type: boolean
+    default: true
+  CinderEqlxGroupname:
+    type: string
+    default: 'group-0'
+  CinderEqlxPool:
+    type: string
+    default: 'default'
+  CinderEqlxChapLogin:
+    type: string
+    default: ''
+  CinderEqlxChapPassword:
+    type: string
+    default: ''
+  CinderEqlxUseChap:
+    type: boolean
+    default: false
+
+resources:
+  CinderEqlxConfig:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: os-apply-config
+      config:
+        hiera:
+          datafiles:
+            cinder_eqlx_data:
+              mapped_data:
+                cinder_enable_eqlx_backend: {get_input: EnableEqlxBackend}
+                cinder::backend::eqlx::volume_backend_name: {get_input: EqlxBackendName}
+                cinder::backend::eqlx::san_ip: {get_input: EqlxSanIp}
+                cinder::backend::eqlx::san_login: {get_input: EqlxSanLogin}
+                cinder::backend::eqlx::san_password: {get_input: EqlxSanPassword}
+                cinder::backend::eqlx::san_thin_provision: {get_input: EqlxSanThinProvision}
+                cinder::backend::eqlx::eqlx_group_name: {get_input: EqlxGroupname}
+                cinder::backend::eqlx::eqlx_pool: {get_input: EqlxPool}
+                cinder::backend::eqlx::eqlx_use_chap: {get_input: EqlxUseChap}
+                cinder::backend::eqlx::eqlx_chap_login: {get_input: EqlxChapLogin}
+                cinder::backend::eqlx::eqlx_chap_password: {get_input: EqlxChapPassword}
+
+  CinderEqlxDeployment:
+    type: OS::Heat::StructuredDeployment
+    properties:
+      config: {get_resource: CinderEqlxConfig}
+      server: {get_param: server}
+      input_values:
+        EnableEqlxBackend: {get_param: CinderEnableEqlxBackend}
+        EqlxBackendName: {get_param: CinderEqlxBackendName}
+        EqlxSanIp: {get_param: CinderEqlxSanIp}
+        EqlxSanLogin: {get_param: CinderEqlxSanLogin}
+        EqlxSanPassword: {get_param: CinderEqlxSanPassword}
+        EqlxSanThinProvision: {get_param: CinderEqlxSanThinProvision}
+        EqlxGroupname: {get_param: CinderEqlxGroupname}
+        EqlxPool: {get_param: CinderEqlxPool}
+        EqlxUseChap: {get_param: CinderEqlxUseChap}
+        EqlxChapLogin: {get_param: CinderEqlxChapLogin}
+        EqlxChapPassword: {get_param: CinderEqlxChapPassword}
+
+outputs:
+  deploy_stdout:
+    description: Deployment reference, used to trigger puppet apply on changes
+    value: {get_attr: [CinderEqlxDeployment, deploy_stdout]}
index 7ec2190..ab442f2 100644 (file)
@@ -114,6 +114,7 @@ resources:
   CinderNetappDeployment:
     type: OS::Heat::StructuredDeployment
     properties:
+      name: CinderNetappDeployment
       config: {get_resource: CinderNetappConfig}
       server: {get_param: server}
       input_values:
index bf06d25..1e65296 100644 (file)
@@ -56,6 +56,7 @@ resources:
   NeutronBigswitchDeployment:
     type: OS::Heat::StructuredDeployment
     properties:
+      name: NeutronBigswitchDeployment
       config: {get_resource: NeutronBigswitchConfig}
       server: {get_param: server}
       input_values:
index 6730ddf..cec885c 100644 (file)
@@ -142,6 +142,7 @@ resources:
   CiscoN1kvDeployment:
     type: OS::Heat::StructuredDeployment
     properties:
+      name: CiscoN1kvDeployment
       config: {get_resource: CiscoN1kvConfig}
       server: {get_param: server}
       input_values:
index 8378d2f..a4cfea0 100644 (file)
@@ -71,6 +71,7 @@ resources:
   NeutronNuageDeployment:
     type: OS::Heat::StructuredDeployment
     properties:
+      name: NeutronNuageDeployment
       config: {get_resource: NeutronNuageConfig}
       server: {get_param: server}
       input_values:
index 80c8ad6..e236e33 100644 (file)
@@ -45,6 +45,7 @@ resources:
   NodeSpecificDeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: NodeSpecificDeployment
       config: {get_resource: NodeSpecificConfig}
       server: {get_param: server}
       input_values:
index 7e34f07..5a36e95 100644 (file)
@@ -53,6 +53,7 @@ resources:
   CADeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: CADeployment
       config: {get_resource: CAConfig}
       server: {get_param: server}
       input_values:
index ce524ba..20bb373 100644 (file)
@@ -67,6 +67,7 @@ resources:
   ControllerTLSDeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: ControllerTLSDeployment
       config: {get_resource: ControllerTLSConfig}
       server: {get_param: server}
       input_values:
index b0e6ae9..f8ef640 100644 (file)
@@ -38,11 +38,18 @@ cinder::api::keystone_tenant: 'service'
 swift::proxy::authtoken::admin_tenant_name: 'service'
 ceilometer::api::keystone_tenant: 'service'
 heat::keystone_tenant: 'service'
+sahara::admin_tenant_name: 'service'
 
 # keystone
 keystone::cron::token_flush::maxdelay: 3600
 keystone::roles::admin::service_tenant: 'service'
 keystone::roles::admin::admin_tenant: 'admin'
+keystone::cron::token_flush::destination: '/dev/null'
+keystone::config::keystone_config:
+  DEFAULT/secure_proxy_ssl_header:
+    value: 'HTTP_X_FORWARDED_PROTO'
+  ec2/driver:
+    value: 'keystone.contrib.ec2.backends.sql.Ec2'
 
 #swift
 swift::proxy::pipeline:
@@ -77,12 +84,15 @@ nova::notify_on_state_change: 'vm_and_task_state'
 nova::api::default_floating_pool: 'public'
 nova::api::osapi_v3: true
 nova::scheduler::filter::ram_allocation_ratio: '1.0'
+nova::cron::archive_deleted_rows::hour: '*/12'
+nova::cron::archive_deleted_rows::destination: '/dev/null'
 
 # ceilometer
 ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
 
 # cinder
 cinder::scheduler::scheduler_driver: cinder.scheduler.filter_scheduler.FilterScheduler
+cinder::cron::db_purge::destination: '/dev/null'
 
 # heat
 heat::engine::configure_delegated_roles: false
@@ -118,6 +128,7 @@ tripleo::loadbalancer::nova_metadata: true
 tripleo::loadbalancer::nova_novncproxy: true
 tripleo::loadbalancer::mysql: true
 tripleo::loadbalancer::redis: true
+tripleo::loadbalancer::sahara: true
 tripleo::loadbalancer::swift_proxy_server: true
 tripleo::loadbalancer::ceilometer: true
 tripleo::loadbalancer::heat_api: true
index 7e925d9..8957750 100644 (file)
@@ -53,3 +53,10 @@ ceilometer::db::mysql::dbname: ceilometer
 ceilometer::db::mysql::allowed_hosts:
   - '%'
   - "%{hiera('mysql_bind_host')}"
+
+sahara::db::mysql::user: sahara
+sahara::db::mysql::host: "%{hiera('mysql_virtual_ip')}"
+sahara::db::mysql::dbname: sahara
+sahara::db::mysql::allowed_hosts:
+  - '%'
+  - "%{hiera('mysql_bind_host')}"
index 7f8970c..7444155 100644 (file)
@@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
   include ::ntp
 }
 
+include ::timezone
+
 if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
   exec { 'set selinux to permissive on boot':
     command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
index f3a02eb..bb3575c 100644 (file)
@@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
   include ::ntp
 }
 
+include ::timezone
+
 file { ['/etc/libvirt/qemu/networks/autostart/default.xml',
         '/etc/libvirt/qemu/networks/default.xml']:
   ensure => absent,
@@ -68,11 +70,19 @@ if hiera('cinder_enable_nfs_backend', false) {
 }
 
 include ::nova::compute::libvirt
+if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+  file {'/etc/libvirt/qemu.conf':
+    ensure  => present,
+    content => hiera('midonet_libvirt_qemu_data')
+  }
+}
 include ::nova::network::neutron
 include ::neutron
 
 # If the value of core plugin is set to 'nuage',
 # include nuage agent,
+# If the value of core plugin is set to 'midonet',
+# include midonet agent,
 # else use the default value of 'ml2'
 if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
   include ::nuage::vrs
@@ -84,18 +94,24 @@ if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
     nova_metadata_ip    => hiera('nova_metadata_node_ips'),
     nova_auth_ip        => hiera('keystone_public_api_virtual_ip'),
   }
-} else {
-  class { '::neutron::plugins::ml2':
-    flat_networks        => split(hiera('neutron_flat_networks'), ','),
-    tenant_network_types => [hiera('neutron_tenant_network_type')],
-  }
+}
+elsif hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+  # TODO(devvesa) provide non-controller ips for these services
+  $zookeeper_node_ips = hiera('neutron_api_node_ips')
+  $cassandra_node_ips = hiera('neutron_api_node_ips')
 
-  class { '::neutron::agents::ml2::ovs':
-    bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-    tunnel_types    => split(hiera('neutron_tunnel_types'), ','),
+  class {'::tripleo::network::midonet::agent':
+    zookeeper_servers => $zookeeper_node_ips,
+    cassandra_seeds   => $cassandra_node_ips
   }
+}
+else {
+
+  include ::neutron::plugins::ml2
+  include ::neutron::agents::ml2::ovs
 
-  if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+  if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
     class { '::neutron::agents::n1kv_vem':
       n1kv_source  => hiera('n1kv_vem_source', undef),
       n1kv_version => hiera('n1kv_vem_version', undef),
index 683c121..ea63b1a 100644 (file)
@@ -39,6 +39,8 @@ if hiera('step') >= 2 {
     include ::ntp
   }
 
+  include ::timezone
+
   # MongoDB
   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
     include ::mongodb::globals
@@ -101,6 +103,7 @@ if hiera('step') >= 2 {
   include ::neutron::db::mysql
   include ::cinder::db::mysql
   include ::heat::db::mysql
+  include ::sahara::db::mysql
   if downcase(hiera('ceilometer_backend')) == 'mysql' {
     include ::ceilometer::db::mysql
   }
@@ -128,7 +131,7 @@ if hiera('step') >= 2 {
   # pre-install swift here so we can build rings
   include ::swift
 
-  $enable_ceph = hiera('ceph_storage_count', 0) > 0
+  $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
 
   if $enable_ceph {
     class { '::ceph::profile::params':
@@ -164,13 +167,12 @@ if hiera('step') >= 2 {
 if hiera('step') >= 3 {
 
   include ::keystone
+  include ::keystone::config
   include ::keystone::roles::admin
   include ::keystone::endpoint
 
   #TODO: need a cleanup-keystone-tokens.sh solution here
-  keystone_config {
-    'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
-  }
+
   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
     ensure  => 'directory',
     owner   => 'keystone',
@@ -230,13 +232,61 @@ if hiera('step') >= 3 {
   include ::nova::scheduler
   include ::nova::scheduler::filter
 
-  include ::neutron
+  if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+    # TODO(devvesa) provide non-controller ips for these services
+    $zookeeper_node_ips = hiera('neutron_api_node_ips')
+    $cassandra_node_ips = hiera('neutron_api_node_ips')
+
+    # Run zookeeper in the controller if configured
+    if hiera('enable_zookeeper_on_controller') {
+      class {'::tripleo::cluster::zookeeper':
+        zookeeper_server_ips => $zookeeper_node_ips,
+        zookeeper_client_ip  => $ipaddress,
+        zookeeper_hostnames  => hiera('controller_node_names')
+      }
+    }
+
+    # Run cassandra in the controller if configured
+    if hiera('enable_cassandra_on_controller') {
+      class {'::tripleo::cluster::cassandra':
+        cassandra_servers => $cassandra_node_ips,
+        cassandra_ip      => $ipaddress
+      }
+    }
+
+    class {'::tripleo::network::midonet::agent':
+      zookeeper_servers => $zookeeper_node_ips,
+      cassandra_seeds   => $cassandra_node_ips
+    }
+
+    class {'::tripleo::network::midonet::api':
+      zookeeper_servers    => $zookeeper_node_ips,
+      vip                  => $ipaddress,
+      keystone_ip          => $ipaddress,
+      keystone_admin_token => hiera('keystone::admin_token'),
+      bind_address         => $ipaddress,
+      admin_password       => hiera('admin_password')
+    }
+
+    # TODO: find a way to get an empty list from hiera
+    class {'::neutron':
+      service_plugins => []
+    }
+
+  }
+  else {
+
+    # ML2 plugin
+    include ::neutron
+  }
+
   include ::neutron::server
   include ::neutron::server::notifications
 
   # If the value of core plugin is set to 'nuage',
-  # include nuage core plugin,
-  # else use the default value of 'ml2'
+  # include nuage core plugin, and it does not
+  # need the l3, dhcp and metadata agents
   if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
     include ::neutron::plugins::nuage
   } else {
@@ -252,51 +302,57 @@ if hiera('step') >= 3 {
       require => Package['neutron'],
     }
 
-    class { '::neutron::plugins::ml2':
-      flat_networks        => split(hiera('neutron_flat_networks'), ','),
-      tenant_network_types => [hiera('neutron_tenant_network_type')],
-      mechanism_drivers    => [hiera('neutron_mechanism_drivers')],
-    }
-    class { '::neutron::agents::ml2::ovs':
-      bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-      tunnel_types    => split(hiera('neutron_tunnel_types'), ','),
-    }
-    if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
-      include ::neutron::plugins::ml2::cisco::nexus1000v
+    # If the value of core plugin is set to 'midonet',
+    # skip all the ML2 configuration
+    if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
 
-      class { '::neutron::agents::n1kv_vem':
-        n1kv_source  => hiera('n1kv_vem_source', undef),
-        n1kv_version => hiera('n1kv_vem_version', undef),
+      class {'::neutron::plugins::midonet':
+        midonet_api_ip    => $ipaddress,
+        keystone_tenant   => hiera('neutron::server::auth_tenant'),
+        keystone_password => hiera('neutron::server::auth_password')
       }
+    } else {
+
+      include ::neutron::plugins::ml2
+      include ::neutron::agents::ml2::ovs
+
+      if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+        include ::neutron::plugins::ml2::cisco::nexus1000v
 
-      class { '::n1k_vsm':
-        n1kv_source       => hiera('n1kv_vsm_source', undef),
-        n1kv_version      => hiera('n1kv_vsm_version', undef),
-        pacemaker_control => false,
+        class { '::neutron::agents::n1kv_vem':
+          n1kv_source  => hiera('n1kv_vem_source', undef),
+          n1kv_version => hiera('n1kv_vem_version', undef),
+        }
+
+        class { '::n1k_vsm':
+          n1kv_source       => hiera('n1kv_vsm_source', undef),
+          n1kv_version      => hiera('n1kv_vsm_version', undef),
+          pacemaker_control => false,
+        }
       }
-    }
 
-    if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
-      include ::neutron::plugins::ml2::cisco::ucsm
-    }
-    if 'cisco_nexus' in hiera('neutron_mechanism_drivers') {
-      include ::neutron::plugins::ml2::cisco::nexus
-      include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
-    }
+      if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+        include ::neutron::plugins::ml2::cisco::ucsm
+      }
+      if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
+        include ::neutron::plugins::ml2::cisco::nexus
+        include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
+      }
 
-    if hiera('neutron_enable_bigswitch_ml2', false) {
-      include ::neutron::plugins::ml2::bigswitch::restproxy
-    }
-    neutron_l3_agent_config {
-      'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
-    }
-    neutron_dhcp_agent_config {
-      'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+      if hiera('neutron_enable_bigswitch_ml2', false) {
+        include ::neutron::plugins::ml2::bigswitch::restproxy
+      }
+      neutron_l3_agent_config {
+        'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+      }
+      neutron_dhcp_agent_config {
+        'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
+      }
+      Service['neutron-server'] -> Service['neutron-ovs-agent-service']
     }
 
     Service['neutron-server'] -> Service['neutron-dhcp-service']
     Service['neutron-server'] -> Service['neutron-l3']
-    Service['neutron-server'] -> Service['neutron-ovs-agent-service']
     Service['neutron-server'] -> Service['neutron-metadata']
   }
 
@@ -345,6 +401,48 @@ if hiera('step') >= 3 {
     }
   }
 
+  if hiera('cinder_enable_eqlx_backend', false) {
+    $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
+
+    cinder_config {
+      "${cinder_eqlx_backend}/host": value => 'hostgroup';
+    }
+
+    cinder::backend::eqlx { $cinder_eqlx_backend :
+      volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
+      san_ip              => hiera('cinder::backend::eqlx::san_ip', undef),
+      san_login           => hiera('cinder::backend::eqlx::san_login', undef),
+      san_password        => hiera('cinder::backend::eqlx::san_password', undef),
+      san_thin_provision  => hiera('cinder::backend::eqlx::san_thin_provision', undef),
+      eqlx_group_name     => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
+      eqlx_pool           => hiera('cinder::backend::eqlx::eqlx_lpool', undef),
+      eqlx_use_chap       => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
+      eqlx_chap_login     => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
+      eqlx_chap_password  => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
+    }
+  }
+
+  if hiera('cinder_enable_dellsc_backend', false) {
+    $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
+
+    cinder_config {
+      "${cinder_dellsc_backend}/host": value => 'hostgroup';
+    }
+
+    cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
+      volume_backend_name   => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
+      san_ip                => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
+      san_login             => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
+      san_password          => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
+      dell_sc_ssn           => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
+      iscsi_ip_address      => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
+      iscsi_port            => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
+      dell_sc_port          => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef),
+      dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
+      dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
+    }
+  }
+
   if hiera('cinder_enable_netapp_backend', false) {
     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
 
@@ -398,7 +496,7 @@ if hiera('step') >= 3 {
     }
   }
 
-  $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend])
+  $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
   class { '::cinder::backends' :
     enabled_backends => $cinder_enabled_backends,
   }
@@ -466,8 +564,13 @@ if hiera('step') >= 3 {
   include ::heat::api_cloudwatch
   include ::heat::engine
 
+  # Sahara
+  include ::sahara
+  include ::sahara::service::api
+  include ::sahara::service::engine
+
   # Horizon
-  if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+  if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
     $_profile_support = 'cisco'
   } else {
     $_profile_support = 'None'
@@ -494,7 +597,19 @@ if hiera('step') >= 3 {
 } #END STEP 3
 
 if hiera('step') >= 4 {
-  include ::keystone::cron::token_flush
+  $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
+  $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
+  $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
+
+  if $keystone_enable_db_purge {
+    include ::keystone::cron::token_flush
+  }
+  if $nova_enable_db_purge {
+    include ::nova::cron::archive_deleted_rows
+  }
+  if $cinder_enable_db_purge {
+    include ::cinder::cron::db_purge
+  }
 } #END STEP 4
 
 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
index 6c8530f..f8d3fd7 100644 (file)
@@ -41,6 +41,8 @@ if hiera('step') >= 1 {
 
   create_resources(sysctl::value, hiera('sysctl_settings'), {})
 
+  include ::timezone
+
   if count(hiera('ntp::servers')) > 0 {
     include ::ntp
   }
@@ -78,11 +80,11 @@ if hiera('step') >= 1 {
     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
   }
 
-  # FIXME(gfidente): sets 100secs as default start timeout op
+  # FIXME(gfidente): sets 200secs as default start timeout op
   # param; until we can use pcmk global defaults we'll still
   # need to add it to every resource which redefines op params
   Pacemaker::Resource::Service {
-    op_params => 'start timeout=100s stop timeout=100s',
+    op_params => 'start timeout=200s stop timeout=200s',
   }
 
   # Only configure RabbitMQ in this step, don't start it yet to
@@ -352,7 +354,7 @@ if hiera('step') >= 2 {
 
     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
       pacemaker::resource::service { $::mongodb::params::service_name :
-        op_params    => 'start timeout=120s stop timeout=100s',
+        op_params    => 'start timeout=370s stop timeout=200s',
         clone_params => true,
         require      => Class['::mongodb::server'],
       }
@@ -443,13 +445,17 @@ MYSQL_HOST=localhost\n",
         require => Exec['galera-ready'],
       }
     }
+
+    class { '::sahara::db::mysql':
+      require       => Exec['galera-ready'],
+    }
   }
 
   # pre-install swift here so we can build rings
   include ::swift
 
   # Ceph
-  $enable_ceph = hiera('ceph_storage_count', 0) > 0
+  $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
 
   if $enable_ceph {
     class { '::ceph::profile::params':
@@ -490,11 +496,10 @@ if hiera('step') >= 3 {
     manage_service => false,
     enabled        => false,
   }
+  include ::keystone::config
 
   #TODO: need a cleanup-keystone-tokens.sh solution here
-  keystone_config {
-    'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
-  }
+
   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
     ensure  => 'directory',
     owner   => 'keystone',
@@ -592,8 +597,54 @@ if hiera('step') >= 3 {
   }
   include ::nova::network::neutron
 
-  # Neutron class definitions
-  include ::neutron
+  if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+
+    # TODO(devvesa) provide non-controller ips for these services
+    $zookeeper_node_ips = hiera('neutron_api_node_ips')
+    $cassandra_node_ips = hiera('neutron_api_node_ips')
+
+    # Run zookeeper in the controller if configured
+    if hiera('enable_zookeeper_on_controller') {
+      class {'::tripleo::cluster::zookeeper':
+        zookeeper_server_ips => $zookeeper_node_ips,
+        zookeeper_client_ip  => $ipaddress,
+        zookeeper_hostnames  => hiera('controller_node_names')
+      }
+    }
+
+    # Run cassandra in the controller if configured
+    if hiera('enable_cassandra_on_controller') {
+      class {'::tripleo::cluster::cassandra':
+        cassandra_servers => $cassandra_node_ips,
+        cassandra_ip      => $ipaddress
+      }
+    }
+
+    class {'::tripleo::network::midonet::agent':
+      zookeeper_servers => $zookeeper_node_ips,
+      cassandra_seeds   => $cassandra_node_ips
+    }
+
+    class {'::tripleo::network::midonet::api':
+      zookeeper_servers    => hiera('neutron_api_node_ips'),
+      vip                  => $public_vip,
+      keystone_ip          => $public_vip,
+      keystone_admin_token => hiera('keystone::admin_token'),
+      bind_address         => $ipaddress,
+      admin_password       => hiera('admin_password')
+    }
+
+    # Configure Neutron
+    class {'::neutron':
+      service_plugins => []
+    }
+
+  }
+  else {
+    # Neutron class definitions
+    include ::neutron
+  }
+
   class { '::neutron::server' :
     sync_db        => $sync_db,
     manage_service => false,
@@ -603,6 +654,13 @@ if hiera('step') >= 3 {
   if  hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
     include ::neutron::plugins::nuage
   }
+  if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+    class {'::neutron::plugins::midonet':
+      midonet_api_ip    => $public_vip,
+      keystone_tenant   => hiera('neutron::server::auth_tenant'),
+      keystone_password => hiera('neutron::server::auth_password')
+    }
+  }
   if hiera('neutron::enable_dhcp_agent',true) {
     class { '::neutron::agents::dhcp' :
       manage_service => false,
@@ -628,27 +686,20 @@ if hiera('step') >= 3 {
       enabled        => false,
     }
   }
-  if hiera('neutron::core_plugin') == 'ml2' {
-    class { '::neutron::plugins::ml2':
-      flat_networks        => split(hiera('neutron_flat_networks'), ','),
-      tenant_network_types => [hiera('neutron_tenant_network_type')],
-      mechanism_drivers    => [hiera('neutron_mechanism_drivers')],
-    }
-    class { '::neutron::agents::ml2::ovs':
-      manage_service  => false,
-      enabled         => false,
-      bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
-      tunnel_types    => split(hiera('neutron_tunnel_types'), ','),
-    }
+  include ::neutron::plugins::ml2
+  class { '::neutron::agents::ml2::ovs':
+    manage_service => false,
+    enabled        => false,
   }
-  if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
+
+  if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
     include ::neutron::plugins::ml2::cisco::ucsm
   }
-  if 'cisco_nexus' in hiera('neutron_mechanism_drivers') {
+  if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
     include ::neutron::plugins::ml2::cisco::nexus
     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
   }
-  if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+  if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
     include ::neutron::plugins::ml2::cisco::nexus1000v
 
     class { '::neutron::agents::n1kv_vem':
@@ -727,6 +778,48 @@ if hiera('step') >= 3 {
     }
   }
 
+  if hiera('cinder_enable_eqlx_backend', false) {
+    $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
+
+    cinder_config {
+      "${cinder_eqlx_backend}/host": value => 'hostgroup';
+    }
+
+    cinder::backend::eqlx { $cinder_eqlx_backend :
+      volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
+      san_ip              => hiera('cinder::backend::eqlx::san_ip', undef),
+      san_login           => hiera('cinder::backend::eqlx::san_login', undef),
+      san_password        => hiera('cinder::backend::eqlx::san_password', undef),
+      san_thin_provision  => hiera('cinder::backend::eqlx::san_thin_provision', undef),
+      eqlx_group_name     => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
+      eqlx_pool           => hiera('cinder::backend::eqlx::eqlx_lpool', undef),
+      eqlx_use_chap       => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
+      eqlx_chap_login     => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
+      eqlx_chap_password  => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
+    }
+  }
+
+  if hiera('cinder_enable_dellsc_backend', false) {
+    $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
+
+    cinder_config {
+      "${cinder_dellsc_backend}/host": value => 'hostgroup';
+    }
+
+    cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
+      volume_backend_name   => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
+      san_ip                => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
+      san_login             => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
+      san_password          => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
+      dell_sc_ssn           => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
+      iscsi_ip_address      => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
+      iscsi_port            => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
+      dell_sc_port          => hiera('cinder::backend::dellsc_iscsi::dell_sc_port', undef),
+      dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
+      dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
+    }
+  }
+
   if hiera('cinder_enable_netapp_backend', false) {
     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
 
@@ -780,11 +873,23 @@ if hiera('step') >= 3 {
     }
   }
 
-  $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend])
+  $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
   class { '::cinder::backends' :
     enabled_backends => $cinder_enabled_backends,
   }
 
+  class { '::sahara':
+    sync_db => $sync_db,
+  }
+  class { '::sahara::service::api':
+    manage_service => false,
+    enabled        => false,
+  }
+  class { '::sahara::service::engine':
+    manage_service => false,
+    enabled        => false,
+  }
+
   # swift proxy
   class { '::swift::proxy' :
     manage_service => $non_pcmk_start,
@@ -896,7 +1001,7 @@ if hiera('step') >= 3 {
     # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
   }
   include ::apache::mod::status
-  if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+  if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
     $_profile_support = 'cisco'
   } else {
     $_profile_support = 'None'
@@ -922,7 +1027,19 @@ if hiera('step') >= 3 {
 } #END STEP 3
 
 if hiera('step') >= 4 {
-  include ::keystone::cron::token_flush
+  $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
+  $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
+  $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
+
+  if $keystone_enable_db_purge {
+    include ::keystone::cron::token_flush
+  }
+  if $nova_enable_db_purge {
+    include ::nova::cron::archive_deleted_rows
+  }
+  if $cinder_enable_db_purge {
+    include ::cinder::cron::db_purge
+  }
 
   if $pacemaker_master {
 
@@ -1025,6 +1142,24 @@ if hiera('step') >= 4 {
                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
     }
 
+    # Sahara
+    pacemaker::resource::service { $::sahara::params::api_service_name :
+      clone_params => 'interleave=true',
+      require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
+    }
+    pacemaker::resource::service { $::sahara::params::engine_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::keystone::params::service_name}-clone",
+      second_resource => "${::sahara::params::api_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
+                          Pacemaker::Resource::Service[$::keystone::params::service_name]],
+    }
+
     # Glance
     pacemaker::resource::service { $::glance::params::registry_service_name :
       clone_params => 'interleave=true',
@@ -1060,15 +1195,32 @@ if hiera('step') >= 4 {
                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
     }
 
-    # Neutron
-    # NOTE(gfidente): Neutron will try to populate the database with some data
-    # as soon as neutron-server is started; to avoid races we want to make this
-    # happen only on one node, before normal Pacemaker initialization
-    # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
-    exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
-    pacemaker::resource::service { $::neutron::params::server_service:
-      clone_params => 'interleave=true',
-      require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
+    if hiera('step') == 4 {
+      # Neutron
+      # NOTE(gfidente): Neutron will try to populate the database with some data
+      # as soon as neutron-server is started; to avoid races we want to make this
+      # happen only on one node, before normal Pacemaker initialization
+      # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
+      # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
+      # will try to start the service while it's already started by Pacemaker
+      # It would result to a deployment failure since systemd would return 1 to Puppet
+      # and the overcloud would fail to deploy (6 would be returned).
+      # This conditional prevents from a race condition during the deployment.
+      # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
+      exec { 'neutron-server-systemd-start-sleep' :
+        command => 'systemctl start neutron-server && /usr/bin/sleep 5',
+        path    => '/usr/bin',
+        unless  => '/sbin/pcs resource show neutron-server',
+      } ->
+      pacemaker::resource::service { $::neutron::params::server_service:
+        clone_params => 'interleave=true',
+        require      => Pacemaker::Resource::Service[$::keystone::params::service_name]
+      }
+    } else {
+      pacemaker::resource::service { $::neutron::params::server_service:
+        clone_params => 'interleave=true',
+        require      => Pacemaker::Resource::Service[$::keystone::params::service_name]
+      }
     }
     if hiera('neutron::enable_l3_agent', true) {
       pacemaker::resource::service { $::neutron::params::l3_agent_service:
@@ -1085,6 +1237,11 @@ if hiera('step') >= 4 {
         clone_params => 'interleave=true',
       }
     }
+    if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+      pacemaker::resource::service {'tomcat':
+        clone_params => 'interleave=true',
+      }
+    }
     if hiera('neutron::enable_metadata_agent', true) {
       pacemaker::resource::service { $::neutron::params::metadata_agent_service:
         clone_params => 'interleave=true',
@@ -1135,7 +1292,6 @@ if hiera('step') >= 4 {
       }
     }
 
-    #another chain keystone-->neutron-server-->ovs-agent-->dhcp-->l3
     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
       constraint_type => 'order',
       first_resource  => "${::keystone::params::service_name}-clone",
@@ -1211,28 +1367,65 @@ if hiera('step') >= 4 {
                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
       }
     }
+    if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
+      #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
+      pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
+        constraint_type => 'order',
+        first_resource  => "${::neutron::params::server_service}-clone",
+        second_resource => "${::neutron::params::dhcp_agent_service}-clone",
+        first_action    => 'start',
+        second_action   => 'start',
+        require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
+                            Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
+      }
+      pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
+        constraint_type => 'order',
+        first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
+        second_resource => "${::neutron::params::metadata_agent_service}-clone",
+        first_action    => 'start',
+        second_action   => 'start',
+        require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+                            Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
+      }
+      pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
+        constraint_type => 'order',
+        first_resource  => "${::neutron::params::metadata_agent_service}-clone",
+        second_resource => 'tomcat-clone',
+        first_action    => 'start',
+        second_action   => 'start',
+        require         => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
+                            Pacemaker::Resource::Service['tomcat']],
+      }
+      pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
+        source  => "${::neutron::params::metadata_agent_service}-clone",
+        target  => "${::neutron::params::dhcp_agent_service}-clone",
+        score   => 'INFINITY',
+        require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
+                    Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
+      }
+    }
 
     # Nova
     pacemaker::resource::service { $::nova::params::api_service_name :
       clone_params => 'interleave=true',
-      op_params    => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+      op_params    => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
     }
     pacemaker::resource::service { $::nova::params::conductor_service_name :
       clone_params => 'interleave=true',
-      op_params    => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+      op_params    => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
     }
     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
       clone_params => 'interleave=true',
-      op_params    => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+      op_params    => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
     }
     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
       clone_params => 'interleave=true',
-      op_params    => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+      op_params    => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
     }
     pacemaker::resource::service { $::nova::params::scheduler_service_name :
       clone_params => 'interleave=true',
-      op_params    => 'start timeout=100s stop timeout=100s monitor start-delay=10s',
+      op_params    => 'start timeout=200s stop timeout=200s monitor start-delay=10s',
     }
 
     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
@@ -1503,7 +1696,7 @@ if hiera('step') >= 4 {
     }
 
     #VSM
-    if 'cisco_n1kv' in hiera('neutron_mechanism_drivers') {
+    if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
       pacemaker::resource::ocf { 'vsm-p' :
         ocf_agent_name  => 'heartbeat:VirtualDomain',
         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
index 1eabddf..63ac396 100644 (file)
@@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
   include ::ntp
 }
 
+include ::timezone
+
 include ::swift
 class { '::swift::storage::all':
   mount_check => str2bool(hiera('swift_mount_check')),
index 2bdd8a9..5a69725 100644 (file)
@@ -22,6 +22,8 @@ if count(hiera('ntp::servers')) > 0 {
   include ::ntp
 }
 
+include ::timezone
+
 include ::cinder
 include ::cinder::config
 include ::cinder::glance
index d22f538..a55b395 100644 (file)
@@ -29,6 +29,7 @@ resources:
   StorageDeployment_Step1:
     type: OS::Heat::StructuredDeployments
     properties:
+      name: StorageDeployment_Step1
       servers:  {get_param: servers}
       config: {get_resource: StoragePuppetConfig}
       input_values:
@@ -49,6 +50,7 @@ resources:
     type: OS::Heat::StructuredDeployments
     depends_on: StorageDeployment_Step1
     properties:
+      name: StorageRingbuilderDeployment_Step2
       servers:  {get_param: servers}
       config: {get_resource: StorageRingbuilderPuppetConfig}
       input_values:
index 721dcba..142e47c 100644 (file)
@@ -7,7 +7,6 @@ parameters:
     constraints:
       - custom_constraint: nova.flavor
   HashSuffix:
-    default: unset
     description: A random string to be used as a salt when hashing to determine mappings
       in the ring.
     hidden: true
@@ -40,7 +39,6 @@ parameters:
     description: The user name for SNMPd with readonly rights running on all Overcloud nodes
     type: string
   SnmpdReadonlyUserPassword:
-    default: unset
     description: The user password for SNMPd with readonly rights running on all Overcloud nodes
     type: string
     hidden: true
@@ -63,6 +61,10 @@ parameters:
     description: Mapping of service_name -> network name. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  TimeZone:
+    default: 'UTC'
+    description: The timezone to be set on Ceph nodes.
+    type: string
   Hostname:
     type: string
     default: '' # Defaults to Heat created hostname
@@ -82,6 +84,13 @@ parameters:
     description: >
       Heat action when to apply network configuration changes
     default: ['CREATE']
+  SoftwareConfigTransport:
+    default: POLL_SERVER_CFN
+    description: |
+      How the server should receive the metadata required for software configuration.
+    type: string
+    constraints:
+    - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
   CloudDomain:
     default: ''
     type: string
@@ -112,6 +121,7 @@ resources:
       user_data_format: SOFTWARE_CONFIG
       user_data: {get_resource: UserData}
       name: {get_param: Hostname}
+      software_config_transport: {get_param: SoftwareConfigTransport}
       metadata: {get_param: ServerMetadata}
       scheduler_hints: {get_param: SchedulerHints}
 
@@ -135,6 +145,11 @@ resources:
   NodeUserData:
     type: OS::TripleO::NodeUserData
 
+  ExternalPort:
+    type: OS::TripleO::SwiftStorage::Ports::ExternalPort
+    properties:
+      ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
   InternalApiPort:
     type: OS::TripleO::SwiftStorage::Ports::InternalApiPort
     properties:
@@ -150,25 +165,42 @@ resources:
     properties:
       ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
 
+  TenantPort:
+    type: OS::TripleO::SwiftStorage::Ports::TenantPort
+    properties:
+      ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
+  ManagementPort:
+    type: OS::TripleO::SwiftStorage::Ports::ManagementPort
+    properties:
+      ControlPlaneIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+
   NetworkConfig:
     type: OS::TripleO::ObjectStorage::Net::SoftwareConfig
     properties:
       ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+      ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
       InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
       StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
       StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+      TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
 
   NetIpMap:
     type: OS::TripleO::Network::Ports::NetIpMap
     properties:
       ControlPlaneIp: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+      ExternalIp: {get_attr: [ExternalPort, ip_address]}
       InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
       StorageIp: {get_attr: [StoragePort, ip_address]}
       StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+      TenantIp: {get_attr: [TenantPort, ip_address]}
+      ManagementIp: {get_attr: [ManagementPort, ip_address]}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
     properties:
+      name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: SwiftStorage}
       actions: {get_param: NetworkDeploymentActions}
@@ -207,6 +239,7 @@ resources:
                 swift_mount_check: {get_input: swift_mount_check }
                 tripleo::ringbuilder::min_part_hours: { get_input: swift_min_part_hours }
                 ntp::servers: {get_input: ntp_servers}
+                timezone::timezone: {get_input: timezone}
                 # NOTE(dprince): build_ring support is currently not wired in.
                 # See: https://review.openstack.org/#/c/109225/
                 tripleo::ringbuilder::build_ring: True
@@ -220,6 +253,7 @@ resources:
     type: OS::Heat::StructuredDeployment
     depends_on: NetworkDeployment
     properties:
+      name: SwiftStorageHieraDeploy
       server: {get_resource: SwiftStorage}
       config: {get_resource: SwiftStorageHieraConfig}
       input_values:
@@ -232,6 +266,7 @@ resources:
         swift_part_power: {get_param: PartPower}
         swift_replicas: { get_param: Replicas}
         ntp_servers: {get_param: NtpServer}
+        timezone: {get_param: TimeZone}
         enable_package_install: {get_param: EnablePackageInstall}
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
         swift_management_network: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
@@ -283,6 +318,9 @@ outputs:
         template: 'r1z1-IP:%PORT%/d1'
         params:
           IP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, SwiftMgmtNetwork]}]}
+  external_ip_address:
+    description: IP address of the server in the external network
+    value: {get_attr: [ExternalPort, ip_address]}
   internal_api_ip_address:
     description: IP address of the server in the internal_api network
     value: {get_attr: [InternalApiPort, ip_address]}
@@ -292,6 +330,12 @@ outputs:
   storage_mgmt_ip_address:
     description: IP address of the server in the storage_mgmt network
     value: {get_attr: [StorageMgmtPort, ip_address]}
+  tenant_ip_address:
+    description: IP address of the server in the tenant network
+    value: {get_attr: [TenantPort, ip_address]}
+  management_ip_address:
+    description: IP address of the server in the management network
+    value: {get_attr: [ManagementPort, ip_address]}
   config_identifier:
     description: identifier which changes if the node configuration may need re-applying
     value:
index 1dec489..c49a104 100644 (file)
@@ -19,6 +19,7 @@ resources:
                 cinder_api_vip: {get_input: cinder_api_vip}
                 glance_api_vip: {get_input: glance_api_vip}
                 glance_registry_vip: {get_input: glance_registry_vip}
+                sahara_api_vip: {get_input: sahara_api_vip}
                 swift_proxy_vip: {get_input: swift_proxy_vip}
                 nova_api_vip: {get_input: nova_api_vip}
                 nova_metadata_vip: {get_input: nova_metadata_vip}
diff --git a/tox.ini b/tox.ini
index bc14bda..974971f 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -10,5 +10,5 @@ deps = -r{toxinidir}/requirements.txt
 [testenv:venv]
 commands = {posargs}
 
-[testenv:validate]
-commands = python ./tools/yaml-validate.py .
\ No newline at end of file
+[testenv:linters]
+commands = python ./tools/yaml-validate.py .