Merge "Add sample usage of StorageMgmt network for compute nodes"
authorJenkins <jenkins@review.openstack.org>
Fri, 7 Jul 2017 18:22:23 +0000 (18:22 +0000)
committerGerrit Code Review <review@openstack.org>
Fri, 7 Jul 2017 18:22:23 +0000 (18:22 +0000)
184 files changed:
capabilities-map.yaml
ci/environments/README.rst [new file with mode: 0644]
ci/environments/ceph-min-osds.yaml [new file with mode: 0644]
ci/environments/multinode.yaml
ci/environments/scenario001-multinode-containers.yaml
ci/environments/scenario002-multinode-containers.yaml
ci/environments/scenario003-multinode-containers.yaml
ci/environments/scenario004-multinode-containers.yaml
deployed-server/deployed-server.yaml
docker/deploy-steps-playbook.yaml
docker/docker-puppet.py
docker/docker-steps.j2
docker/docker-toool
docker/firstboot/setup_docker_host.yaml
docker/services/aodh-api.yaml
docker/services/aodh-evaluator.yaml
docker/services/aodh-listener.yaml
docker/services/aodh-notifier.yaml
docker/services/ceilometer-agent-central.yaml
docker/services/ceilometer-agent-compute.yaml
docker/services/ceilometer-agent-ipmi.yaml
docker/services/ceilometer-agent-notification.yaml
docker/services/cinder-api.yaml
docker/services/cinder-backup.yaml
docker/services/cinder-scheduler.yaml
docker/services/cinder-volume.yaml
docker/services/collectd.yaml
docker/services/congress-api.yaml
docker/services/database/mongodb.yaml
docker/services/database/mysql-client.yaml [new file with mode: 0644]
docker/services/database/mysql.yaml
docker/services/database/redis.yaml
docker/services/ec2-api.yaml
docker/services/etcd.yaml
docker/services/glance-api.yaml
docker/services/gnocchi-api.yaml
docker/services/gnocchi-metricd.yaml
docker/services/gnocchi-statsd.yaml
docker/services/haproxy.yaml
docker/services/heat-api-cfn.yaml
docker/services/heat-api.yaml
docker/services/heat-engine.yaml
docker/services/horizon.yaml
docker/services/ironic-api.yaml
docker/services/ironic-conductor.yaml
docker/services/ironic-pxe.yaml
docker/services/iscsid.yaml
docker/services/keystone.yaml
docker/services/manila-api.yaml
docker/services/manila-scheduler.yaml
docker/services/memcached.yaml
docker/services/mistral-api.yaml
docker/services/mistral-engine.yaml
docker/services/mistral-executor.yaml
docker/services/multipathd.yaml
docker/services/neutron-api.yaml
docker/services/neutron-dhcp.yaml
docker/services/neutron-l3.yaml
docker/services/neutron-metadata.yaml
docker/services/neutron-ovs-agent.yaml
docker/services/neutron-plugin-ml2.yaml
docker/services/nova-api.yaml
docker/services/nova-compute.yaml
docker/services/nova-conductor.yaml
docker/services/nova-consoleauth.yaml
docker/services/nova-ironic.yaml
docker/services/nova-libvirt.yaml
docker/services/nova-placement.yaml
docker/services/nova-scheduler.yaml
docker/services/nova-vnc-proxy.yaml
docker/services/octavia-api.yaml [new file with mode: 0644]
docker/services/octavia-health-manager.yaml [new file with mode: 0644]
docker/services/octavia-housekeeping.yaml [new file with mode: 0644]
docker/services/octavia-worker.yaml [new file with mode: 0644]
docker/services/opendaylight-api.yaml [new file with mode: 0644]
docker/services/pacemaker/cinder-backup.yaml [new file with mode: 0644]
docker/services/pacemaker/cinder-volume.yaml
docker/services/pacemaker/clustercheck.yaml
docker/services/pacemaker/database/mysql.yaml
docker/services/pacemaker/database/redis.yaml
docker/services/pacemaker/haproxy.yaml
docker/services/pacemaker/rabbitmq.yaml
docker/services/panko-api.yaml
docker/services/rabbitmq.yaml
docker/services/sahara-api.yaml
docker/services/sahara-engine.yaml
docker/services/sensu-client.yaml
docker/services/swift-proxy.yaml
docker/services/swift-ringbuilder.yaml
docker/services/swift-storage.yaml
docker/services/tacker.yaml
docker/services/zaqar.yaml
environments/docker-centos-tripleoupstream.yaml [new file with mode: 0644]
environments/docker-ha.yaml [new file with mode: 0644]
environments/docker.yaml
environments/major-upgrade-composable-steps-docker.yaml
environments/neutron-ml2-cisco-nexus-ucsm.yaml
environments/neutron-opendaylight-dpdk.yaml [new file with mode: 0644]
environments/neutron-ovs-dpdk.yaml
environments/nonha-arch.yaml [new file with mode: 0644]
environments/overcloud-baremetal.j2.yaml [new file with mode: 0644]
environments/overcloud-services.yaml [new file with mode: 0644]
environments/services-docker/neutron-opendaylight.yaml [new file with mode: 0644]
environments/services-docker/octavia.yaml [new file with mode: 0644]
environments/services/ironic.yaml
environments/services/neutron-lbaasv2.yaml [new file with mode: 0644]
environments/storage/cinder-netapp-config.yaml
extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
extraconfig/pre_network/ansible_host_config.yaml
extraconfig/pre_network/config_then_reboot.yaml
extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
extraconfig/pre_network/host_config_and_reboot.yaml
extraconfig/tasks/ssh/host_public_key.yaml
net-config-bond.yaml
network/config/bond-with-vlans/ceph-storage.yaml
network/config/bond-with-vlans/cinder-storage.yaml
network/config/bond-with-vlans/compute-dpdk.yaml
network/config/bond-with-vlans/compute.yaml
network/config/bond-with-vlans/controller-no-external.yaml
network/config/bond-with-vlans/controller-v6.yaml
network/config/bond-with-vlans/controller.yaml
network/config/bond-with-vlans/swift-storage.yaml
network/ports/ctlplane_vip.yaml
network/ports/net_ip_list_map.yaml
network/scripts/run-os-net-config.sh
network/service_net_map.j2.yaml
overcloud-resource-registry-puppet.j2.yaml
overcloud.j2.yaml
puppet/blockstorage-role.yaml
puppet/cephstorage-role.yaml
puppet/compute-role.yaml
puppet/controller-role.yaml
puppet/deploy-artifacts.sh
puppet/major_upgrade_steps.j2.yaml
puppet/objectstorage-role.yaml
puppet/post-upgrade.j2.yaml
puppet/post.j2.yaml
puppet/puppet-steps.j2
puppet/role.role.j2.yaml
puppet/services/README.rst
puppet/services/ceph-base.yaml
puppet/services/ceph-external.yaml
puppet/services/database/mysql.yaml
puppet/services/disabled/ceilometer-expirer-disabled.yaml
puppet/services/glance-api.yaml
puppet/services/haproxy.yaml
puppet/services/horizon.yaml
puppet/services/ironic-api.yaml
puppet/services/neutron-lbaas.yaml [new file with mode: 0644]
puppet/services/neutron-ovs-agent.yaml
puppet/services/neutron-ovs-dpdk-agent.yaml
puppet/services/nova-api.yaml
puppet/services/nova-compute.yaml
puppet/services/nova-conductor.yaml
puppet/services/nova-metadata.yaml
puppet/services/nova-placement.yaml
puppet/services/nova-scheduler.yaml
puppet/services/opendaylight-ovs.yaml
puppet/services/openvswitch-upgrade.yaml [deleted file]
puppet/services/openvswitch.yaml [new file with mode: 0644]
puppet/services/pacemaker/database/mysql.yaml
puppet/services/swift-proxy.yaml
releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml [new file with mode: 0644]
releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml [new file with mode: 0644]
releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml [new file with mode: 0644]
releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml [new file with mode: 0644]
releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml [new file with mode: 0644]
releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml [new file with mode: 0644]
releasenotes/notes/enable-neutron-lbaas-integration-b72126f2c7e71cee.yaml [new file with mode: 0644]
releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml [new file with mode: 0644]
releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml [new file with mode: 0644]
releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml [new file with mode: 0644]
releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml [new file with mode: 0644]
releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml [new file with mode: 0644]
releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml [new file with mode: 0644]
releasenotes/notes/vipmap-output-4a9ce99930960346.yaml [new file with mode: 0644]
roles/Controller.yaml
roles/ControllerOpenstack.yaml
roles/Networker.yaml
roles_data.yaml
services.yaml
test-requirements.txt
tools/process-templates.py
tools/yaml-validate.py

index 1fe7790..d0ec015 100644 (file)
@@ -305,6 +305,11 @@ topics:
             description: Enables Neutron BGPVPN Service Plugin
             requires:
               - overcloud-resource-registry-puppet.yaml
+          - file: environments/services/neutron-lbaasv2.yaml
+            title: Neutron LBaaSv2 Service Plugin
+            description: Enables Neutron LBaaSv2 Service Plugin and Agent
+            requires:
+              - overcloud-resource-registry-puppet.yaml
           - file: environments/neutron-ml2-bigswitch.yaml
             title: BigSwitch Extensions
             description: >
diff --git a/ci/environments/README.rst b/ci/environments/README.rst
new file mode 100644 (file)
index 0000000..4a3cb9d
--- /dev/null
@@ -0,0 +1,4 @@
+This directory contains environments that are used in tripleo-ci.  They may change from
+release to release or within a release, and should not be relied upon in a production
+environment.  The top-level ``environments`` directory in tripleo-heat-templates
+contains the production-ready environment files.
diff --git a/ci/environments/ceph-min-osds.yaml b/ci/environments/ceph-min-osds.yaml
new file mode 100644 (file)
index 0000000..4e72d31
--- /dev/null
@@ -0,0 +1,2 @@
+parameter_defaults:
+  CephPoolDefaultSize: 1
index 20e37e3..102787a 100644 (file)
@@ -52,6 +52,8 @@ parameter_defaults:
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::Horizon
+    - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::Sshd
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
index c142922..7c32381 100644 (file)
@@ -6,15 +6,17 @@
 resource_registry:
   OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
   OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/
   OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
   OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
   OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml
-  OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
-  OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml
-  OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml
-  OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml
+  OS::TripleO::Services::PankoApi: ../../docker/services/panko-api.yaml
+  OS::TripleO::Services::Collectd: ../../docker/services/collectd.yaml
+  OS::TripleO::Services::Tacker: ../../docker/services/tacker.yaml
+  OS::TripleO::Services::Congress: ../../docker/services/congress-api.yaml
+  # TODO fluentd is being containerized: https://review.openstack.org/#/c/467072/
   OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
-  OS::TripleO::Services::SensuClient: ../../puppet/services/monitoring/sensu-client.yaml
+  OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
   # NOTE: This is needed because of upgrades from Ocata to Pike. We
   # deploy the initial environment with Ocata templates, and
   # overcloud-resource-registry.yaml there doesn't have this Docker
index 7191dea..92c834b 100644 (file)
@@ -6,9 +6,10 @@
 resource_registry:
   OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
   OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
-  OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
-  OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
-  OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
+  # TODO: Barbican is not yet containerized: https://review.openstack.org/#/c/474327
+  # OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
+  OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
+  OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
   # NOTE: This is needed because of upgrades from Ocata to Pike. We
   # deploy the initial environment with Ocata templates, and
   # overcloud-resource-registry.yaml there doesn't have this Docker
index cfb0507..7b917ae 100644 (file)
@@ -6,11 +6,11 @@
 resource_registry:
   OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
   OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
-  OS::TripleO::Services::SaharaApi: ../../puppet/services/sahara-api.yaml
-  OS::TripleO::Services::SaharaEngine: ../../puppet/services/sahara-engine.yaml
-  OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml
-  OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml
-  OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml
+  OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+  OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
+  OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+  OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+  OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
   # NOTE: This is needed because of upgrades from Ocata to Pike. We
   # deploy the initial environment with Ocata templates, and
   # overcloud-resource-registry.yaml there doesn't have this Docker
index 7a6724d..1d6d591 100644 (file)
@@ -6,6 +6,7 @@
 resource_registry:
   OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
   OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/
   OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml
   OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
   OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
@@ -13,10 +14,12 @@ resource_registry:
   OS::TripleO::Services::SwiftProxy: OS::Heat::None
   OS::TripleO::Services::SwiftStorage: OS::Heat::None
   OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
-  OS::TripleO::Services::ManilaApi: ../../puppet/services/manila-api.yaml
-  OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
+  OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+  OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
+  # NOTE: being containerized here: https://review.openstack.org/#/c/471527/
   OS::TripleO::Services::ManilaShare: ../../puppet/services/manila-share.yaml
   OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+  # TODO: containerize NeutronBgpVpnApi
   OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
   # NOTE: This is needed because of upgrades from Ocata to Pike. We
   # deploy the initial environment with Ocata templates, and
index 0847bfb..16deb7d 100644 (file)
@@ -44,6 +44,9 @@ parameters:
       Command or script snippet to run on all overcloud nodes to
       initialize the upgrade process. E.g. a repository switch.
     default: ''
+  deployment_swift_data:
+    type: json
+    default: {}
 
 resources:
   deployed-server:
@@ -51,6 +54,7 @@ resources:
     properties:
       name: {get_param: name}
       software_config_transport: {get_param: software_config_transport}
+      deployment_swift_data: {get_param: deployment_swift_data}
 
   UpgradeInitConfig:
     type: OS::Heat::SoftwareConfig
@@ -133,3 +137,5 @@ outputs:
         - {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
   name:
     value: {get_attr: [HostsEntryDeployment, hostname]}
+  os_collect_config:
+    value: {get_attr: [deployed-server, os_collect_config]}
index b3cb500..87587a4 100644 (file)
@@ -17,6 +17,7 @@
       shell: python /var/lib/docker-puppet/docker-puppet.py
       environment:
         NET_HOST: 'true'
+        DEBUG: '{{docker_puppet_debug}}'
       when: step == "1"
       changed_when: false
       check_mode: no
index 1321167..7ca6333 100755 (executable)
@@ -29,9 +29,13 @@ import tempfile
 import multiprocessing
 
 log = logging.getLogger()
-log.setLevel(logging.DEBUG)
 ch = logging.StreamHandler(sys.stdout)
-ch.setLevel(logging.DEBUG)
+if os.environ.get('DEBUG', False):
+    log.setLevel(logging.DEBUG)
+    ch.setLevel(logging.DEBUG)
+else:
+    log.setLevel(logging.INFO)
+    ch.setLevel(logging.INFO)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 ch.setFormatter(formatter)
 log.addHandler(ch)
@@ -145,11 +149,11 @@ for service in (json_data or []):
     if not manifest or not config_image:
         continue
 
-    log.debug('config_volume %s' % config_volume)
-    log.debug('puppet_tags %s' % puppet_tags)
-    log.debug('manifest %s' % manifest)
-    log.debug('config_image %s' % config_image)
-    log.debug('volumes %s' % volumes)
+    log.info('config_volume %s' % config_volume)
+    log.info('puppet_tags %s' % puppet_tags)
+    log.info('manifest %s' % manifest)
+    log.info('config_image %s' % config_image)
+    log.info('volumes %s' % volumes)
     # We key off of config volume for all configs.
     if config_volume in configs:
         # Append puppet tags and manifest.
@@ -199,7 +203,7 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
 
         # Disables archiving
         if [ -z "$NO_ARCHIVE" ]; then
-            archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
+            archivedirs=("/etc" "/root" "/opt" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
             rsync_srcs=""
             for d in "${archivedirs[@]}"; do
                 if [ -d "$d" ]; then
@@ -217,7 +221,7 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
 
             # Write a checksum of the config-data dir, this is used as a
             # salt to trigger container restart when the config changes
-            tar cf - /var/lib/config-data/${NAME} | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
+            tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
         fi
         """)
 
@@ -272,13 +276,17 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
         subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE, env=env)
         cmd_stdout, cmd_stderr = subproc.communicate()
-        if cmd_stdout:
-            log.debug(cmd_stdout)
-        if cmd_stderr:
-            log.debug(cmd_stderr)
         if subproc.returncode != 0:
             log.error('Failed running docker-puppet.py for %s' % config_volume)
+            if cmd_stdout:
+                log.error(cmd_stdout)
+            if cmd_stderr:
+                log.error(cmd_stderr)
         else:
+            if cmd_stdout:
+                log.debug(cmd_stdout)
+            if cmd_stderr:
+                log.debug(cmd_stderr)
             # only delete successful runs, for debugging
             rm_container('docker-puppet-%s' % config_volume)
         return subproc.returncode
index 3dd963b..73a3cb7 100644 (file)
@@ -21,6 +21,9 @@ parameters:
   servers:
     type: json
     description: Mapping of Role name e.g Controller to a list of servers
+  stack_name:
+    type: string
+    description: Name of the topmost stack
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
@@ -35,6 +38,25 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  DockerPuppetDebug:
+    type: string
+    default: ''
+    description: Set to True to enable debug logging with docker-puppet.py
+  ctlplane_service_ips:
+    type: json
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+  WorkflowTasks_Step{{step}}_Enabled:
+    or:
+    {% for role in roles %}
+      - not:
+          equals:
+            - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+            - ''
+      - False
+    {% endfor %}
+{% endfor %}
 
 resources:
 
@@ -66,8 +88,56 @@ resources:
         - name: role_name
         - name: update_identifier
         - name: bootstrap_server_id
+        - name: docker_puppet_debug
       config: {get_file: deploy-steps-playbook.yaml}
 
+{%- for step in range(1, deploy_steps_max) %}
+# BEGIN service_workflow_tasks handling
+  WorkflowTasks_Step{{step}}:
+    type: OS::Mistral::Workflow
+    condition: WorkflowTasks_Step{{step}}_Enabled
+    depends_on:
+    {% if step == 1 %}
+    {% for dep in roles %}
+      - {{dep.name}}PreConfig
+      - {{dep.name}}ArtifactsDeploy
+    {% endfor %}
+    {% else %}
+    {% for dep in roles %}
+      - {{dep.name}}Deployment_Step{{step -1}}
+    {% endfor %}
+    {% endif %}
+    properties:
+      name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+      type: direct
+      tasks:
+        yaql:
+          expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+          data:
+          {% for role in roles %}
+            - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+          {% endfor %}
+
+  WorkflowTasks_Step{{step}}_Execution:
+    type: OS::Mistral::ExternalResource
+    condition: WorkflowTasks_Step{{step}}_Enabled
+    depends_on: WorkflowTasks_Step{{step}}
+    properties:
+      actions:
+        CREATE:
+          workflow: { get_resource: WorkflowTasks_Step{{step}} }
+          params:
+            env:
+              service_ips: { get_param: ctlplane_service_ips }
+        UPDATE:
+          workflow: { get_resource: WorkflowTasks_Step{{step}} }
+          params:
+            env:
+              service_ips: { get_param: ctlplane_service_ips }
+      always_update: true
+# END service_workflow_tasks handling
+{% endfor %}
+
 {% for role in roles %}
   # Post deployment steps for all roles
   # A single config is re-applied with an incrementing step number
@@ -195,14 +265,23 @@ resources:
 
   {{role.name}}Deployment_Step{{step}}:
     type: OS::Heat::StructuredDeploymentGroup
-  {% if step == 1 %}
-    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
-  {% else %}
     depends_on:
-      {% for dep in roles %}
+      - WorkflowTasks_Step{{step}}_Execution
+    # TODO(gfidente): the following if/else condition
+    # replicates what is already defined for the
+    # WorkflowTasks_StepX resource and can be remove
+    # if https://bugs.launchpad.net/heat/+bug/1700569
+    # is fixed.
+    {% if step == 1 %}
+    {% for dep in roles %}
+      - {{dep.name}}PreConfig
+      - {{dep.name}}ArtifactsDeploy
+    {% endfor %}
+    {% else %}
+    {% for dep in roles %}
       - {{dep.name}}Deployment_Step{{step -1}}
-      {% endfor %}
-  {% endif %}
+    {% endfor %}
+    {% endif %}
     properties:
       name: {{role.name}}Deployment_Step{{step}}
       servers: {get_param: [servers, {{role.name}}]}
@@ -212,6 +291,7 @@ resources:
         role_name: {{role.name}}
         update_identifier: {get_param: DeployIdentifier}
         bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+        docker_puppet_debug: {get_param: DockerPuppetDebug}
 
   {% endfor %}
   # END CONFIG STEPS
index 0b87ea9..a1ffe34 100755 (executable)
@@ -69,10 +69,15 @@ def parse_opts(argv):
                         action='store_true',
                         help="""Start docker container interactively (-ti).""",
                         default=False)
+    parser.add_argument('-d', '--detach',
+                        action='store_true',
+                        help="""Start container detached.""",
+                        default=False)
     opts = parser.parse_args(argv[1:])
 
     return opts
 
+
 def docker_arg_map(key, value):
     value = str(value).encode('ascii', 'ignore')
     if len(value) == 0:
@@ -84,12 +89,12 @@ def docker_arg_map(key, value):
         'net': "--net=%s" % value,
         'pid': "--pid=%s" % value,
         'privileged': "--privileged=%s" % value.lower(),
-        #'restart': "--restart=%s" % "false",
         'user': "--user=%s" % value,
         'volumes': "--volume=%s" % value,
         'volumes_from': "--volumes-from=%s" % value,
     }.get(key, None)
 
+
 def run_docker_container(opts, container_name):
     container_found = False
 
@@ -142,13 +147,15 @@ def run_docker_container(opts, container_name):
                             if opts.user:
                                 continue
                         arg = docker_arg_map(container_data,
-                                json_data[step][container][container_data])
+                                             json_data[step][container][container_data])
                         if arg:
                             cmd.append(arg)
 
                 if opts.user:
                     cmd.append('--user')
                     cmd.append(opts.user)
+                if opts.detach:
+                    cmd.append('--detach')
                 if opts.interactive:
                     cmd.append('-ti')
                     # May as well remove it when we're done too
@@ -167,19 +174,17 @@ def run_docker_container(opts, container_name):
     if not container_found:
         print("Container '%s' not found!" % container_name)
 
+
 def list_docker_containers(opts):
-    print opts
     with open(opts.config) as f:
         json_data = json.load(f)
 
     for step in (json_data or []):
         if step is None:
             continue
-        print step
         for container in (json_data[step] or []):
             print('\tcontainer: %s' % container)
             for container_data in (json_data[step][container] or []):
-                #print('\t\tcontainer_data: %s' % container_data)
                 if container_data == "start_order":
                     print('\t\tstart_order: %s' % json_data[step][container][container_data])
 
@@ -189,4 +194,3 @@ if opts.container:
     run_docker_container(opts, opts.container)
 else:
     list_docker_containers(opts)
-
index 4b061e1..41b036d 100644 (file)
@@ -4,6 +4,7 @@ parameters:
   DockerNamespace:
     type: string
     default: tripleoupstream
+    description: namespace
   DockerNamespaceIsRegistry:
     type: boolean
     default: false
index bda5469..3be0f18 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-aodh-api:latest'
     type: string
+  DockerAodhConfigImage:
+    description: The container image to use for the aodh config_volume
+    default: 'centos-binary-aodh-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -73,10 +77,10 @@ outputs:
         config_volume: aodh
         puppet_tags: aodh_api_paste_ini,aodh_config
         step_config: *step_config
-        config_image: &aodh_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/aodh_api.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -88,14 +92,17 @@ outputs:
         # db sync runs before permissions set by kolla_config
         step_2:
           aodh_init_log:
-            image: *aodh_image
+            image: &aodh_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
             user: root
             volumes:
               - /var/log/containers/aodh:/var/log/aodh
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
         step_3:
           aodh_db_sync:
-            image: *aodh_image
+            image: *aodh_api_image
             net: host
             privileged: false
             detach: false
@@ -109,7 +116,7 @@ outputs:
             command: "/usr/bin/bootstrap_host_exec aodh_api su aodh -s /bin/bash -c /usr/bin/aodh-dbsync"
         step_4:
           aodh_api:
-            image: *aodh_image
+            image: *aodh_api_image
             net: host
             privileged: false
             restart: always
index 74ac635..108a552 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-aodh-evaluator:latest'
     type: string
+  DockerAodhConfigImage:
+    description: The container image to use for the aodh config_volume
+    default: 'centos-binary-aodh-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,10 +69,10 @@ outputs:
         config_volume: aodh
         puppet_tags: aodh_config
         step_config: *step_config
-        config_image: &aodh_evaluator_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/aodh_evaluator.json:
           command: /usr/bin/aodh-evaluator
@@ -79,7 +83,10 @@ outputs:
       docker_config:
         step_4:
           aodh_evaluator:
-            image: *aodh_evaluator_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
             net: host
             privileged: false
             restart: always
index 0930f42..d78af5b 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-aodh-listener:latest'
     type: string
+  DockerAodhConfigImage:
+    description: The container image to use for the aodh config_volume
+    default: 'centos-binary-aodh-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,10 +69,10 @@ outputs:
         config_volume: aodh
         puppet_tags: aodh_config
         step_config: *step_config
-        config_image: &aodh_listener_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/aodh_listener.json:
           command: /usr/bin/aodh-listener
@@ -79,7 +83,10 @@ outputs:
       docker_config:
         step_4:
           aodh_listener:
-            image: *aodh_listener_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
             net: host
             privileged: false
             restart: always
index 607d999..abfb374 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-aodh-notifier:latest'
     type: string
+  DockerAodhConfigImage:
+    description: The container image to use for the aodh config_volume
+    default: 'centos-binary-aodh-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,10 +69,10 @@ outputs:
         config_volume: aodh
         puppet_tags: aodh_config
         step_config: *step_config
-        config_image: &aodh_notifier_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/aodh_notifier.json:
           command: /usr/bin/aodh-notifier
@@ -79,7 +83,10 @@ outputs:
       docker_config:
         step_4:
           aodh_notifier:
-            image: *aodh_notifier_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
             net: host
             privileged: false
             restart: always
index 9cec4a6..af1f47a 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-ceilometer-central:latest'
     type: string
+  DockerCeilometerConfigImage:
+    description: The container image to use for the ceilometer config_volume
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,10 +67,10 @@ outputs:
         config_volume: ceilometer
         puppet_tags: ceilometer_config
         step_config: *step_config
-        config_image: &ceilometer_agent_central_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/ceilometer_agent_central.json:
           command: /usr/bin/ceilometer-polling --polling-namespaces central
@@ -74,7 +78,10 @@ outputs:
         step_3:
           ceilometer_init_log:
             start_order: 0
-            image: *ceilometer_agent_central_image
+            image: &ceilometer_agent_central_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
             user: root
             command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
             volumes:
index 8d06d09..3cc440b 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-ceilometer-compute:latest'
     type: string
+  DockerCeilometerConfigImage:
+    description: The container image to use for the ceilometer config_volume
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,17 +67,20 @@ outputs:
         config_volume: ceilometer
         puppet_tags: ceilometer_config
         step_config: *step_config
-        config_image: &ceilometer_agent_compute_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/ceilometer_agent_compute.json:
           command: /usr/bin/ceilometer-polling --polling-namespaces compute
       docker_config:
         step_4:
           ceilometer_agent_compute:
-            image: *ceilometer_agent_compute_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
             net: host
             privileged: false
             restart: always
index 02793e4..7d02939 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-ceilometer-ipmi:latest'
     type: string
+  DockerCeilometerConfigImage:
+    description: The container image to use for the ceilometer config_volume
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,10 +67,10 @@ outputs:
         config_volume: ceilometer
         puppet_tags: ceilometer_config
         step_config: *step_config
-        config_image: &ceilometer_agent_ipmi_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerIpmiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:
           command: /usr/bin/ceilometer-polling --polling-namespaces ipmi
@@ -74,7 +78,10 @@ outputs:
         step_3:
           ceilometer_init_log:
             start_order: 0
-            image: *ceilometer_agent_ipmi_image
+            image: &ceilometer_agent_ipmi_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerIpmiImage} ]
             user: root
             command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
             volumes:
@@ -93,20 +100,6 @@ outputs:
                   - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-        step_5:
-          ceilometer_gnocchi_upgrade:
-            start_order: 1
-            image: *ceilometer_agent_ipmi_image
-            net: host
-            detach: false
-            privileged: false
-            volumes:
-              list_concat:
-                - {get_attr: [ContainersCommon, volumes]}
-                -
-                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
-                  - /var/log/containers/ceilometer:/var/log/ceilometer
-            command: "/usr/bin/bootstrap_host_exec ceilometer su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'"
       upgrade_tasks:
         - name: Stop and disable ceilometer agent ipmi service
           tags: step2
index 36424e9..b2e85bb 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-ceilometer-notification:latest'
     type: string
+  DockerCeilometerConfigImage:
+    description: The container image to use for the ceilometer config_volume
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,10 +67,10 @@ outputs:
         config_volume: ceilometer
         puppet_tags: ceilometer_config
         step_config: *step_config
-        config_image: &ceilometer_agent_notification_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/ceilometer_agent_notification.json:
           command: /usr/bin/ceilometer-agent-notification
@@ -74,7 +78,10 @@ outputs:
         step_3:
           ceilometer_init_log:
             start_order: 0
-            image: *ceilometer_agent_notification_image
+            image: &ceilometer_agent_notification_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
             user: root
             command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
             volumes:
@@ -93,20 +100,6 @@ outputs:
                   - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-        step_5:
-          ceilometer_gnocchi_upgrade:
-            start_order: 1
-            image: *ceilometer_agent_notification_image
-            net: host
-            detach: false
-            privileged: false
-            volumes:
-              list_concat:
-                - {get_attr: [ContainersCommon, volumes]}
-                -
-                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
-                  - /var/log/containers/ceilometer:/var/log/ceilometer
-            command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
       upgrade_tasks:
         - name: Stop and disable ceilometer agent notification service
           tags: step2
index 94bd66d..6a5d74b 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-cinder-api:latest'
     type: string
-  # we configure all cinder services in the same cinder base container
   DockerCinderConfigImage:
-    description: image
+    description: The container image to use for the cinder config_volume
     default: 'centos-binary-cinder-api:latest'
     type: string
   EndpointMap:
index 0958a7e..2cde6f1 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-cinder-backup:latest'
     type: string
-  # we configure all cinder services in the same cinder base container
   DockerCinderConfigImage:
-    description: image
+    description: The container image to use for the cinder config_volume
     default: 'centos-binary-cinder-api:latest'
     type: string
   EndpointMap:
index 8199c34..bcf32b2 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-cinder-scheduler:latest'
     type: string
-  # we configure all cinder services in the same cinder base container
   DockerCinderConfigImage:
-    description: image
+    description: The container image to use for the cinder config_volume
     default: 'centos-binary-cinder-api:latest'
     type: string
   EndpointMap:
index 26eb10e..5517384 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-cinder-volume:latest'
     type: string
-  # we configure all cinder services in the same cinder base container
   DockerCinderConfigImage:
-    description: image
+    description: The container image to use for the cinder config_volume
     default: 'centos-binary-cinder-api:latest'
     type: string
   EndpointMap:
index 7354898..e674115 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-collectd:latest'
     type: string
+  DockerCollectdConfigImage:
+    description: The container image to use for the collectd config_volume
+    default: 'centos-binary-collectd:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -55,7 +59,11 @@ outputs:
     description: Role data for the collectd role.
     value:
       service_name: {get_attr: [CollectdBase, role_data, service_name]}
-      config_settings: {get_attr: [CollectdBase, role_data, config_settings]}
+      config_settings:
+        map_merge:
+          - get_attr: [CollectdBase, role_data, config_settings]
+          - tripleo::profile::base::metrics::collectd::enable_file_logging: true
+            collectd::plugin::logfile::log_file: /var/log/collectd/collectd.log
       step_config: &step_config
         get_attr: [CollectdBase, role_data, step_config]
       service_config_settings: {get_attr: [CollectdBase, role_data, service_config_settings]}
@@ -64,17 +72,24 @@ outputs:
         config_volume: collectd
         puppet_tags:  collectd_client_config
         step_config: *step_config
-        config_image: &collectd_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerCollectdImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerCollectdConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/collectd.json:
           command: /usr/sbin/collectd -f
+          permissions:
+            - path: /var/log/collectd
+              owner: collectd:collectd
+              recurse: true
       docker_config:
         step_3:
           collectd:
-            image: *collectd_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCollectdImage} ]
             net: host
             privileged: true
             restart: always
@@ -84,11 +99,17 @@ outputs:
                 -
                   - /var/run/docker.sock:/var/run/docker.sock:rw
                   - /var/lib/kolla/config_files/collectd.json:/var/lib/kolla/config_files/config.json:ro
-                  - /var/lib/config-data/collectd/etc/collectd/:/etc/collectd/:ro
+                  - /var/lib/config-data/collectd/etc/collectd.conf:/etc/collectd.conf:ro
+                  - /var/lib/config-data/collectd/etc/collectd.d:/etc/collectd.d:ro
+                  - /var/log/containers/collectd:/var/log/collectd:rw
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/collectd
+            state: directory
       upgrade_tasks:
         - name: Stop and disable collectd service
           tags: step2
           service: name=collectd.service state=stopped enabled=no
-
index 92b0eeb..52395d5 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-congress-api:latest'
     type: string
   DockerCongressConfigImage:
-    description: image
+    description: The container image to use for the congress config_volume
     default: 'centos-binary-congress-api:latest'
     type: string
   EndpointMap:
@@ -84,7 +84,7 @@ outputs:
         # db sync runs before permissions set by kolla_config
         step_2:
           congress_init_logs:
-            image: &congress_image
+            image: &congress_api_image
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerCongressApiImage} ]
@@ -95,7 +95,7 @@ outputs:
             command: ['/bin/bash', '-c', 'chown -R congress:congress /var/log/congress']
         step_3:
           congress_db_sync:
-            image: *congress_image
+            image: *congress_api_image
             net: host
             privileged: false
             detach: false
@@ -110,7 +110,7 @@ outputs:
         step_4:
           congress_api:
             start_order: 15
-            image: *congress_image
+            image: *congress_api_image
             net: host
             privileged: false
             restart: always
index 5d0eb79..7b620c5 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-mongodb:latest'
     type: string
+  DockerMongodbConfigImage:
+    description: The container image to use for the mongodb config_volume
+    default: 'centos-binary-mongodb:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,10 +69,10 @@ outputs:
         config_volume: mongodb
         puppet_tags: file # set this even though file is the default
         step_config: *step_config
-        config_image: &mongodb_image
+        config_image: &mongodb_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerMongodbConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/mongodb.json:
           command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run
@@ -82,7 +86,10 @@ outputs:
       docker_config:
         step_2:
           mongodb:
-            image: *mongodb_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
             net: host
             privileged: false
             volumes: &mongodb_volumes
@@ -100,7 +107,7 @@ outputs:
           config_volume: 'mongodb_init_tasks'
           puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset'
           step_config: 'include ::tripleo::profile::base::database::mongodb'
-          config_image: *mongodb_image
+          config_image: *mongodb_config_image
           volumes:
             - /var/lib/mongodb:/var/lib/mongodb
             - /var/log/containers/mongodb:/var/log/mongodb
diff --git a/docker/services/database/mysql-client.yaml b/docker/services/database/mysql-client.yaml
new file mode 100644 (file)
index 0000000..38a31e2
--- /dev/null
@@ -0,0 +1,66 @@
+heat_template_version: pike
+
+description: >
+  Configuration for containerized MySQL clients
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerMysqlClientConfigImage:
+    description: The container image to use for the mysql_client config_volume
+    default: 'centos-binary-mariadb:latest'
+    type: string
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
+
+outputs:
+  role_data:
+    description: Role for setting mysql client parameters
+    value:
+      service_name: mysql_client
+      config_settings:
+        tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
+        tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+        tripleo::profile::base::database::mysql::client::ssl_ca: {get_param: InternalTLSCAFile}
+      # BEGIN DOCKER SETTINGS #
+      step_config: ""
+      puppet_config:
+        config_volume: mysql_client
+        puppet_tags: file # set this even though file is the default
+        step_config: "include ::tripleo::profile::base::database::mysql::client"
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlClientConfigImage} ]
+      # no need for a docker config, this service only generates configuration files
+      docker_config: {}
index 9eabb71..725b2b4 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-mariadb:latest'
     type: string
+  DockerMysqlConfigImage:
+    description: The container image to use for the mysql config_volume
+    default: 'centos-binary-mariadb:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -74,10 +78,10 @@ outputs:
         config_volume: mysql
         puppet_tags: file # set this even though file is the default
         step_config: *step_config
-        config_image: &mysql_image
+        config_image: &mysql_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/mysql.json:
           command: /usr/bin/mysqld_safe
@@ -89,7 +93,10 @@ outputs:
         # Kolla_bootstrap runs before permissions set by kolla_config
         step_1:
           mysql_init_logs:
-            image: *mysql_image
+            image: &mysql_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
             privileged: false
             user: root
             volumes:
@@ -139,7 +146,7 @@ outputs:
           config_volume: 'mysql_init_tasks'
           puppet_tags: 'mysql_database,mysql_grant,mysql_user'
           step_config: 'include ::tripleo::profile::base::database::mysql'
-          config_image: *mysql_image
+          config_image: *mysql_config_image
           volumes:
             - /var/lib/mysql:/var/lib/mysql/:ro
             - /var/log/containers/mysql:/var/log/mariadb
index 9d0d30c..0a490cd 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-redis:latest'
     type: string
+  DockerRedisConfigImage:
+    description: The container image to use for the redis config_volume
+    default: 'centos-binary-redis:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -64,10 +68,10 @@ outputs:
         # https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763
         puppet_tags: 'exec'
         step_config: *step_config
-        config_image: &redis_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerRedisConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/redis.json:
           command: /usr/bin/redis-server /etc/redis.conf
@@ -80,7 +84,10 @@ outputs:
           redis_init_logs:
             start_order: 0
             detach: false
-            image: *redis_image
+            image: &redis_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
             privileged: false
             user: root
             volumes:
index bc3654b..d4cfe49 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-ec2-api:latest'
     type: string
+  DockerEc2ApiConfigImage:
+    description: The container image to use for the ec2_api config_volume
+    default: 'centos-binary-ec2-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -60,13 +64,13 @@ outputs:
       service_config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
-        config_volume: ec2api
+        config_volume: ec2_api
         puppet_tags: ec2api_api_paste_ini,ec2api_config
         step_config: *step_config
-        config_image: &ec2_api_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/ec2_api.json:
           command: /usr/bin/ec2-api
@@ -84,7 +88,10 @@ outputs:
         # db sync runs before permissions set by kolla_config
         step_2:
           ec2_api_init_logs:
-            image: *ec2_api_image
+            image: &ec2_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiImage} ]
             privileged: false
             user: root
             volumes:
index 818bddd..3c7c81b 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-etcd:latest'
     type: string
+  DockerEtcdConfigImage:
+    description: The container image to use for the etcd config_volume
+    default: 'centos-binary-etcd:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -67,10 +71,10 @@ outputs:
       puppet_config:
         config_volume: etcd
         step_config: *step_config
-        config_image: &etcd_image
+        config_image: &etcd_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerEtcdConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/etcd.json:
           command: /usr/bin/etcd --config-file /etc/etcd/etcd.yml
@@ -81,7 +85,10 @@ outputs:
       docker_config:
         step_2:
           etcd:
-            image: *etcd_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
             net: host
             privileged: false
             restart: always
@@ -98,7 +105,7 @@ outputs:
           config_volume: 'etcd_init_tasks'
           puppet_tags: 'etcd_key'
           step_config: 'include ::tripleo::profile::base::etcd'
-          config_image: *etcd_image
+          config_image: *etcd_config_image
           volumes:
             - /var/lib/config-data/etcd/etc/etcd/:/etc/etcd:ro
             - /var/lib/etcd:/var/lib/etcd:ro
index 5c24401..4fadef9 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-glance-api:latest'
     type: string
+  DockerGlanceApiConfigImage:
+    description: The container image to use for the glance_api config_volume
+    default: 'centos-binary-glance-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -74,10 +78,10 @@ outputs:
         config_volume: glance_api
         puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
         step_config: *step_config
-        config_image: &glance_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/glance_api.json:
           command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
@@ -87,7 +91,10 @@ outputs:
         # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
         step_2:
           glance_init_logs:
-            image: *glance_image
+            image: &glance_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
             privileged: false
             user: root
             volumes:
@@ -95,7 +102,7 @@ outputs:
             command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
         step_3:
           glance_api_db_sync:
-            image: *glance_image
+            image: *glance_api_image
             net: host
             privileged: false
             detach: false
@@ -115,7 +122,7 @@ outputs:
           map_merge:
             - glance_api:
                 start_order: 2
-                image: *glance_image
+                image: *glance_api_image
                 net: host
                 privileged: false
                 restart: always
@@ -126,7 +133,7 @@ outputs:
                 - internal_tls_enabled
                 - glance_api_tls_proxy:
                     start_order: 2
-                    image: *glance_image
+                    image: *glance_api_image
                     net: host
                     user: root
                     restart: always
index bd1c316..cf31d25 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-gnocchi-api:latest'
     type: string
+  DockerGnocchiConfigImage:
+    description: The container image to use for the gnocchi config_volume
+    default: 'centos-binary-gnocchi-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -73,10 +77,10 @@ outputs:
         config_volume: gnocchi
         puppet_tags: gnocchi_api_paste_ini,gnocchi_config
         step_config: *step_config
-        config_image: &gnocchi_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/gnocchi_api.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -88,14 +92,17 @@ outputs:
         # db sync runs before permissions set by kolla_config
         step_2:
           gnocchi_init_log:
-            image: *gnocchi_image
+            image: &gnocchi_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
             user: root
             volumes:
               - /var/log/containers/gnocchi:/var/log/gnocchi
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
         step_3:
           gnocchi_db_sync:
-            image: *gnocchi_image
+            image: *gnocchi_api_image
             net: host
             detach: false
             privileged: false
@@ -109,7 +116,7 @@ outputs:
             command: "/usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c '/usr/bin/gnocchi-upgrade --skip-storage'"
         step_4:
           gnocchi_api:
-            image: *gnocchi_image
+            image: *gnocchi_api_image
             net: host
             privileged: false
             restart: always
index ea26d83..3a05d57 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-gnocchi-metricd:latest'
     type: string
+  DockerGnocchiConfigImage:
+    description: The container image to use for the gnocchi config_volume
+    default: 'centos-binary-gnocchi-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,10 +67,10 @@ outputs:
         config_volume: gnocchi
         puppet_tags: gnocchi_config
         step_config: *step_config
-        config_image: &gnocchi_metricd_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/gnocchi_metricd.json:
           command: /usr/bin/gnocchi-metricd
@@ -77,7 +81,10 @@ outputs:
       docker_config:
         step_4:
           gnocchi_metricd:
-            image: *gnocchi_metricd_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
             net: host
             privileged: false
             restart: always
index a8ae857..c3523b5 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-gnocchi-statsd:latest'
     type: string
+  DockerGnocchiConfigImage:
+    description: The container image to use for the gnocchi config_volume
+    default: 'centos-binary-gnocchi-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,10 +67,10 @@ outputs:
         config_volume: gnocchi
         puppet_tags: gnocchi_config
         step_config: *step_config
-        config_image: &gnocchi_statsd_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/gnocchi_statsd.json:
           command: /usr/bin/gnocchi-statsd
@@ -77,7 +81,10 @@ outputs:
       docker_config:
         step_4:
           gnocchi_statsd:
-            image: *gnocchi_statsd_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
             net: host
             privileged: false
             restart: always
index 1f8bcfa..5831fe8 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-haproxy:latest'
     type: string
+  DockerHAProxyConfigImage:
+    description: The container image to use for the haproxy config_volume
+    default: 'centos-binary-haproxy:latest'
+    type: string
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -38,6 +42,11 @@ parameters:
     default: /dev/log
     description: Syslog address where HAproxy will send its log
     type: string
+  DeployedSSLCertificatePath:
+    default: '/etc/pki/tls/private/overcloud_endpoint.pem'
+    description: >
+        The filepath of the certificate as it will be stored in the controller.
+    type: string
   RedisPassword:
     description: The password for Redis
     type: string
@@ -85,23 +94,33 @@ outputs:
         config_volume: haproxy
         puppet_tags: haproxy_config
         step_config: *step_config
-        config_image: &haproxy_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyConfigImage} ]
+        volumes: &deployed_cert_mount
+          - list_join:
+            - ':'
+            - - {get_param: DeployedSSLCertificatePath}
+              - {get_param: DeployedSSLCertificatePath}
+              - 'ro'
       kolla_config:
         /var/lib/kolla/config_files/haproxy.json:
           command: haproxy -f /etc/haproxy/haproxy.cfg
       docker_config:
         step_1:
           haproxy:
-            image: *haproxy_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
             net: host
             privileged: false
             restart: always
             volumes:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
+                - *deployed_cert_mount
                 -
                   - /var/lib/kolla/config_files/haproxy.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/haproxy/etc/:/etc/:ro
index 89ba8cb..1905281 100644 (file)
@@ -13,8 +13,8 @@ parameters:
     default: 'centos-binary-heat-api-cfn:latest'
     type: string
   # puppet needs the heat-wsgi-api-cfn binary from centos-binary-heat-api-cfn
-  DockerHeatConfigImage:
-    description: image
+  DockerHeatApiCfnConfigImage:
+    description: The container image to use for the heat_api_cfn config_volume
     default: 'centos-binary-heat-api-cfn:latest'
     type: string
   EndpointMap:
@@ -81,7 +81,7 @@ outputs:
         config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/heat_api_cfn.json:
           command: /usr/sbin/httpd -DFOREGROUND
index 834f2a0..c0cec93 100644 (file)
@@ -13,8 +13,8 @@ parameters:
     default: 'centos-binary-heat-api:latest'
     type: string
   # puppet needs the heat-wsgi-api binary from centos-binary-heat-api
-  DockerHeatConfigImage:
-    description: image
+  DockerHeatApiConfigImage:
+    description: The container image to use for the heat_api config_volume
     default: 'centos-binary-heat-api:latest'
     type: string
   EndpointMap:
@@ -81,7 +81,7 @@ outputs:
         config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/heat_api.json:
           command: /usr/sbin/httpd -DFOREGROUND
index 7a3312d..676dbb1 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-heat-engine:latest'
     type: string
+  DockerHeatConfigImage:
+    description: The container image to use for the heat config_volume
+    default: 'centos-binary-heat-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -67,10 +71,10 @@ outputs:
         config_volume: heat
         puppet_tags: heat_config,file,concat,file_line
         step_config: *step_config
-        config_image: &heat_engine_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/heat_engine.json:
           command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
@@ -82,7 +86,10 @@ outputs:
         # db sync runs before permissions set by kolla_config
         step_2:
           heat_init_log:
-            image: *heat_engine_image
+            image: &heat_engine_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
             user: root
             volumes:
               - /var/log/containers/heat:/var/log/heat
index 13bd091..5797b20 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-horizon:latest'
     type: string
+  DockerHorizonConfigImage:
+    description: The container image to use for the horizon config_volume
+    default: 'centos-binary-horizon:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -70,10 +74,10 @@ outputs:
         config_volume: horizon
         puppet_tags: horizon_config
         step_config: {get_attr: [HorizonBase, role_data, step_config]}
-        config_image: &horizon_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHorizonImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHorizonConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/horizon.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -88,7 +92,10 @@ outputs:
       docker_config:
         step_2:
           horizon_fix_perms:
-            image: *horizon_image
+            image: &horizon_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHorizonImage} ]
             user: root
             # NOTE Set ownership for /var/log/horizon/horizon.log file here,
             # otherwise it's created by root when generating django cache.
index a32176a..183ed5c 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-ironic-api:latest'
     type: string
   DockerIronicConfigImage:
-    description: image
+    description: The container image to use for the ironic config_volume
     default: 'centos-binary-ironic-pxe:latest'
     type: string
   EndpointMap:
@@ -61,6 +61,7 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [IronicApiBase, role_data, config_settings]
+          - apache::default_vhost: false
       step_config: &step_config
         get_attr: [IronicApiBase, role_data, step_config]
       service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
@@ -75,7 +76,7 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/ironic_api.json:
-          command: /usr/bin/ironic-api
+          command: /usr/sbin/httpd -DFOREGROUND
           permissions:
             - path: /var/log/ironic
               owner: ironic:ironic
@@ -84,7 +85,7 @@ outputs:
         # db sync runs before permissions set by kolla_config
         step_2:
           ironic_init_logs:
-            image: &ironic_image
+            image: &ironic_api_image
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
@@ -96,7 +97,7 @@ outputs:
         step_3:
           ironic_db_sync:
             start_order: 1
-            image: *ironic_image
+            image: *ironic_api_image
             net: host
             privileged: false
             detach: false
@@ -111,9 +112,9 @@ outputs:
         step_4:
           ironic_api:
             start_order: 10
-            image: *ironic_image
+            image: *ironic_api_image
             net: host
-            privileged: false
+            user: root
             restart: always
             volumes:
               list_concat:
@@ -121,6 +122,10 @@ outputs:
                 -
                   - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/ironic/etc/ironic:/etc/ironic:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
+                  - /var/lib/config-data/ironic/var/www/:/var/www/:ro
                   - /var/log/containers/ironic:/var/log/ironic
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
index 360eb66..f47a3e4 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-ironic-conductor:latest'
     type: string
   DockerIronicConfigImage:
-    description: image
+    description: The container image to use for the ironic config_volume
     default: 'centos-binary-ironic-pxe:latest'
     type: string
   EndpointMap:
index 75c7082..f518b9d 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-ironic-pxe:latest'
     type: string
   DockerIronicConfigImage:
-    description: image
+    description: The container image to use for the ironic config_volume
     default: 'centos-binary-ironic-pxe:latest'
     type: string
   EndpointMap:
index 53f5aff..86f2d3b 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-iscsid:latest'
     type: string
+  DockerIscsidConfigImage:
+    description: The container image to use for the iscsid config_volume
+    default: 'centos-binary-iscsid:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -53,10 +57,10 @@ outputs:
         config_volume: iscsid
         #puppet_tags: file
         step_config: ''
-        config_image: &iscsid_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerIscsidImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerIscsidConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/iscsid.json:
           command: /usr/sbin/iscsid -f
@@ -64,7 +68,10 @@ outputs:
         step_3:
           iscsid:
             start_order: 2
-            image: *iscsid_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerIscsidImage} ]
             net: host
             privileged: true
             restart: always
index 4cd44f2..b6cfa21 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-keystone:latest'
     type: string
+  DockerKeystoneConfigImage:
+    description: The container image to use for the keystone config_volume
+    default: 'centos-binary-keystone:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -86,10 +90,10 @@ outputs:
         config_volume: keystone
         puppet_tags: keystone_config
         step_config: *step_config
-        config_image: &keystone_image
+        config_image: &keystone_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/keystone.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -97,7 +101,10 @@ outputs:
         # Kolla_bootstrap/db sync runs before permissions set by kolla_config
         step_2:
           keystone_init_log:
-            image: *keystone_image
+            image: &keystone_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
             user: root
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
             volumes:
@@ -106,6 +113,7 @@ outputs:
           keystone_db_sync:
             image: *keystone_image
             net: host
+            user: root
             privileged: false
             detach: false
             volumes: &keystone_volumes
@@ -145,6 +153,7 @@ outputs:
           keystone_bootstrap:
             start_order: 3
             action: exec
+            user: root
             command:
               [ 'keystone', '/usr/bin/bootstrap_host_exec', 'keystone' ,'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
       docker_puppet_tasks:
@@ -153,7 +162,7 @@ outputs:
           config_volume: 'keystone_init_tasks'
           puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
           step_config: 'include ::tripleo::profile::base::keystone'
-          config_image: *keystone_image
+          config_image: *keystone_config_image
       host_prep_tasks:
         - name: create persistent logs directory
           file:
index a203d43..66dc6c3 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-manila-api:latest'
     type: string
   DockerManilaConfigImage:
-    description: image
+    description: The container image to use for the manila config_volume
     default: 'centos-binary-manila-api:latest'
     type: string
   EndpointMap:
@@ -94,6 +94,7 @@ outputs:
             net: host
             detach: false
             volumes:
+              list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
                   - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
index fbc80fc..d4170e4 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-manila-scheduler:latest'
     type: string
   DockerManilaConfigImage:
-    description: image
+    description: The container image to use for the manila config_volume
     default: 'centos-binary-manila-api:latest'
     type: string
   EndpointMap:
index d453964..3d41c17 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-memcached:latest'
     type: string
+  DockerMemcachedConfigImage:
+    description: The container image to use for the memcached config_volume
+    default: 'centos-binary-memcached:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,17 +67,20 @@ outputs:
         config_volume: 'memcached'
         puppet_tags: 'file'
         step_config: *step_config
-        config_image: &memcached_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedConfigImage} ]
       kolla_config: {}
       docker_config:
         step_1:
           memcached_init_logs:
             start_order: 0
             detach: false
-            image: *memcached_image
+            image: &memcached_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
             privileged: false
             user: root
             volumes:
index 30c3cde..f128428 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-mistral-api:latest'
     type: string
   DockerMistralConfigImage:
-    description: image
+    description: The container image to use for the mistral config_volume
     default: 'centos-binary-mistral-api:latest'
     type: string
   EndpointMap:
@@ -84,7 +84,7 @@ outputs:
         # db sync runs before permissions set by kolla_config
         step_2:
           mistral_init_logs:
-            image: &mistral_image
+            image: &mistral_api_image
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
@@ -96,7 +96,7 @@ outputs:
         step_3:
           mistral_db_sync:
             start_order: 0
-            image: *mistral_image
+            image: *mistral_api_image
             net: host
             privileged: false
             detach: false
@@ -110,7 +110,7 @@ outputs:
             command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head'"
           mistral_db_populate:
             start_order: 1
-            image: *mistral_image
+            image: *mistral_api_image
             net: host
             privileged: false
             detach: false
@@ -127,7 +127,7 @@ outputs:
         step_4:
           mistral_api:
             start_order: 15
-            image: *mistral_image
+            image: *mistral_api_image
             net: host
             privileged: false
             restart: always
index d60d847..712f4ba 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-mistral-engine:latest'
     type: string
   DockerMistralConfigImage:
-    description: image
+    description: The container image to use for the mistral config_volume
     default: 'centos-binary-mistral-api:latest'
     type: string
   EndpointMap:
index 76ae052..5a35ba9 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-mistral-executor:latest'
     type: string
   DockerMistralConfigImage:
-    description: image
+    description: The container image to use for the mistral config_volume
     default: 'centos-binary-mistral-api:latest'
     type: string
   EndpointMap:
index d8927d4..61b0557 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-multipathd:latest'
     type: string
+  DockerMultipathdConfigImage:
+    description: The container image to use for the multipathd config_volume
+    default: 'centos-binary-multipathd:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -53,10 +57,10 @@ outputs:
         config_volume: multipathd
         #puppet_tags: file
         step_config: ''
-        config_image: &multipathd_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/multipathd.json:
           command: /usr/sbin/multipathd -d
@@ -64,7 +68,10 @@ outputs:
         step_3:
           multipathd:
             start_order: 1
-            image: *multipathd_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdImage} ]
             net: host
             privileged: true
             restart: always
index 6c2d4ca..2890dec 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-neutron-server:latest'
     type: string
-  # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   EndpointMap:
@@ -83,7 +82,7 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron_api.json:
-          command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
+          command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-server
           permissions:
             - path: /var/log/neutron
               owner: neutron:neutron
index d14f525..460b2ee 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-neutron-dhcp-agent:latest'
     type: string
-  # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   EndpointMap:
@@ -76,7 +75,7 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron_dhcp.json:
-          command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
+          command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-dhcp-agent
           permissions:
             - path: /var/log/neutron
               owner: neutron:neutron
index f3a284f..b692f73 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-neutron-l3-agent:latest'
     type: string
-  # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   ServiceNetMap:
@@ -72,7 +71,7 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron_l3_agent.json:
-          command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
+          command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-l3-agent
           permissions:
             - path: /var/log/neutron
               owner: neutron:neutron
index 69bf0c4..493b97b 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-neutron-metadata-agent:latest'
     type: string
-  # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   ServiceNetMap:
index 65ad21e..27919a3 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-neutron-openvswitch-agent:latest'
     type: string
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   ServiceNetMap:
@@ -71,7 +71,7 @@ outputs:
           - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron_ovs_agent.json:
-          command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+          command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-dir /etc/neutron/conf.d/common
           permissions:
             - path: /var/log/neutron
               owner: neutron:neutron
@@ -79,7 +79,7 @@ outputs:
       docker_config:
         step_4:
           neutron_ovs_agent:
-            image: &neutron_ovs_agent_image
+            image:
               list_join:
               - '/'
               - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
index 1739a5b..aa62bde 100644 (file)
@@ -20,7 +20,7 @@ parameters:
     default: 'tripleoupstream'
     type: string
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   DefaultPasswords:
@@ -38,7 +38,7 @@ parameters:
 resources:
 
   NeutronBase:
-    type: ../../puppet/services/neutron-plugin-ml2.yaml
+    type: OS::TripleO::Docker::NeutronMl2PluginBase
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
@@ -60,7 +60,7 @@ outputs:
       # BEGIN DOCKER SETTINGS
       puppet_config:
         config_volume: 'neutron'
-        puppet_tags: ''
+        puppet_tags: neutron_plugin_ml2
         step_config: *step_config
         config_image:
           list_join:
index c97f45d..5d410fb 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-nova-api:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   EndpointMap:
@@ -62,6 +62,9 @@ outputs:
         map_merge:
           - get_attr: [NovaApiBase, role_data, config_settings]
           - apache::default_vhost: false
+            nova_wsgi_enabled: false
+            nova::api::service_name: '%{::nova::params::api_service_name}'
+            nova::wsgi::apache_api::ssl: false
       step_config: &step_config
         list_join:
           - "\n"
index 9f647eb..1277a8f 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-nova-compute:latest'
     type: string
+  DockerNovaLibvirtConfigImage:
+    description: The container image to use for the nova_libvirt config_volume
+    default: 'centos-binary-nova-compute:latest'
+    type: string
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -69,10 +73,10 @@ outputs:
         config_volume: nova_libvirt
         puppet_tags: nova_config,nova_paste_api_ini
         step_config: *step_config
-        config_image: &nova_compute_image
+        config_image:
           list_join:
           - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/nova_compute.json:
           command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
@@ -87,7 +91,10 @@ outputs:
         # FIXME: run discover hosts here
         step_4:
           nova_compute:
-            image: *nova_compute_image
+            image: &nova_compute_image
+              list_join:
+              - '/'
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
             net: host
             privileged: true
             user: nova
index 131355d..266180c 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-nova-conductor:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   EndpointMap:
@@ -82,7 +82,7 @@ outputs:
       docker_config:
         step_4:
           nova_conductor:
-            image: &nova_conductor_image
+            image:
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
index 19f25d8..d836797 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-nova-consoleauth:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   EndpointMap:
index 63780fe..8f98839 100644 (file)
@@ -8,12 +8,12 @@ parameters:
     description: namespace
     default: 'tripleoupstream'
     type: string
-  DockerNovaComputeImage:
+  DockerNovaComputeIronicImage:
     description: image
     default: 'centos-binary-nova-compute-ironic:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   ServiceNetMap:
@@ -85,7 +85,7 @@ outputs:
             image:
               list_join:
               - '/'
-              - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeIronicImage} ]
             net: host
             privileged: true
             user: root
index 6c871f1..f1a48cf 100644 (file)
@@ -8,14 +8,14 @@ parameters:
     description: namespace
     default: 'tripleoupstream'
     type: string
-  DockerLibvirtImage:
+  DockerNovaLibvirtImage:
     description: image
     default: 'centos-binary-nova-libvirt:latest'
     type: string
   # we configure libvirt via the nova-compute container due to coupling
   # in the puppet modules
-  DockerNovaConfigImage:
-    description: image
+  DockerNovaLibvirtConfigImage:
+    description: The container image to use for the nova_libvirt config_volume
     default: 'centos-binary-nova-compute:latest'
     type: string
   EnablePackageInstall:
@@ -101,8 +101,8 @@ outputs:
         step_config: *step_config
         config_image:
           list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/nova_libvirt.json:
           command:
@@ -120,7 +120,7 @@ outputs:
             image:
               list_join:
               - '/'
-              - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtImage} ]
             net: host
             pid: host
             privileged: true
index 8f06f73..251bbaa 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-nova-placement-api:latest'
     type: string
+  DockerNovaPlacementConfigImage:
+    description: The container image to use for the nova_placement config_volume
+    default: 'centos-binary-nova-placement-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -66,10 +70,10 @@ outputs:
         config_volume: nova_placement
         puppet_tags: nova_config
         step_config: *step_config
-        config_image: &nova_placement_image
+        config_image:
           list_join:
           - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/nova_placement.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -82,7 +86,10 @@ outputs:
         step_3:
           nova_placement:
             start_order: 1
-            image: *nova_placement_image
+            image:
+              list_join:
+              - '/'
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
             net: host
             user: root
             restart: always
index 6285e98..fbb3abc 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-nova-scheduler:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   EndpointMap:
index 97d2d15..c2b9c3b 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-nova-novncproxy:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   EndpointMap:
diff --git a/docker/services/octavia-api.yaml b/docker/services/octavia-api.yaml
new file mode 100644 (file)
index 0000000..728162f
--- /dev/null
@@ -0,0 +1,155 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Octavia service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOctaviaApiImage:
+    description: image
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  DockerOctaviaConfigImage:
+    description: The container image to use for the octavia config_volume
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OctaviaApiPuppetBase:
+    type: ../../puppet/services/octavia-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Octavia API role.
+    value:
+      service_name: {get_attr: [OctaviaApiPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [OctaviaApiPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [OctaviaApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [OctaviaApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: octavia
+        puppet_tags: octavia_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/octavia_api.json:
+          command: /usr/bin/octavia-api --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/api.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-api
+        /var/lib/kolla/config_files/octavia_api_tls_proxy.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+      docker_config:
+        # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
+        step_2:
+          octavia_api_init_dirs:
+            start_order: 0
+            image: &octavia_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaApiImage} ]
+            user: root
+            volumes:
+              - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+              - /var/log/containers/octavia:/var/log/octavia
+            command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-api; chown -R octavia:octavia /etc/octavia/conf.d/octavia-api; chown -R octavia:octavia /var/log/octavia']
+        step_3:
+          octavia_db_sync:
+            start_order: 0
+            image: *octavia_api_image
+            net: host
+            privileged: false
+            detach: false
+            user: root
+            volumes: &octavia_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/octavia_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+                  - /var/log/containers/octavia:/var/log/octavia
+            command: "/usr/bin/bootstrap_host_exec octavia_api su octavia -s /bin/bash -c '/usr/bin/octavia-db-manage upgrade head'"
+        step_4:
+          map_merge:
+            - octavia_api:
+                start_order: 2
+                image: *octavia_api_image
+                net: host
+                privileged: false
+                restart: always
+                volumes: *octavia_volumes
+                environment:
+                  - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+            - if:
+                - internal_tls_enabled
+                - octavia_api_tls_proxy:
+                    start_order: 2
+                    image: *octavia_api_image
+                    net: host
+                    user: root
+                    restart: always
+                    volumes:
+                      list_concat:
+                        - {get_attr: [ContainersCommon, volumes]}
+                        -
+                          - /var/lib/kolla/config_files/octavia_api_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+                          - /var/lib/config-data/octavia/etc/httpd/:/etc/httpd/:ro
+                          - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                          - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                    environment:
+                      - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+                - {}
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/octavia
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable octavia_api service
+          tags: step2
+          service: name=openstack-octavia-api state=stopped enabled=no
diff --git a/docker/services/octavia-health-manager.yaml b/docker/services/octavia-health-manager.yaml
new file mode 100644 (file)
index 0000000..2228e36
--- /dev/null
@@ -0,0 +1,114 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Octavia health-manager service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOctaviaHealthManagerImage:
+    description: image
+    default: 'centos-binary-octavia-health-manager:latest'
+    type: string
+  DockerOctaviaConfigImage:
+    description: The container image to use for the octavia config_volume
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OctaviaHealthManagerPuppetBase:
+    type: ../../puppet/services/octavia-health-manager.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Octavia health-manager role.
+    value:
+      service_name: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [OctaviaHealthManagerPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: octavia
+        puppet_tags: octavia_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/octavia_health_manager.json:
+          command: /usr/bin/octavia-health-manager --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/health-manager.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-health-manager
+      docker_config:
+        step_2:
+          octavia_health_manager_init_dirs:
+            start_order: 0
+            image: &octavia_health_manager_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaHealthManagerImage} ]
+            user: root
+            volumes:
+              - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+            command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-health-manager; chown -R octavia:octavia /etc/octavia/conf.d/octavia-health-manager']
+        step_4:
+          octavia_health_manager:
+            start_order: 2
+            image: *octavia_health_manager_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/octavia_health_manager.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+                  - /var/log/containers/octavia:/var/log/octavia
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/octavia
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable octavia_health_manager service
+          tags: step2
+          service: name=openstack-octavia-health-manager state=stopped enabled=no
diff --git a/docker/services/octavia-housekeeping.yaml b/docker/services/octavia-housekeeping.yaml
new file mode 100644 (file)
index 0000000..c2986c6
--- /dev/null
@@ -0,0 +1,114 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Octavia service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOctaviaHousekeepingImage:
+    description: image
+    default: 'centos-binary-octavia-housekeeping:latest'
+    type: string
+  DockerOctaviaConfigImage:
+    description: The container image to use for the octavia config_volume
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OctaviaHousekeepingPuppetBase:
+    type: ../../puppet/services/octavia-housekeeping.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Octavia housekeeping role.
+    value:
+      service_name: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [OctaviaHousekeepingPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: octavia
+        puppet_tags: octavia_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/octavia_housekeeping.json:
+          command: /usr/bin/octavia-housekeeping --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/housekeeping.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-housekeeping
+      docker_config:
+        step_2:
+          octavia_housekeeping_init_dirs:
+            start_order: 0
+            image: &octavia_housekeeping_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaHousekeepingImage} ]
+            user: root
+            volumes:
+              - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+            command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-housekeeping; chown -R octavia:octavia /etc/octavia/conf.d/octavia-housekeeping']
+        step_4:
+          octavia_housekeeping:
+            start_order: 2
+            image: *octavia_housekeeping_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/octavia_housekeeping.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+                  - /var/log/containers/octavia:/var/log/octavia
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/octavia
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable octavia_housekeeping service
+          tags: step2
+          service: name=openstack-octavia-housekeeping state=stopped enabled=no
diff --git a/docker/services/octavia-worker.yaml b/docker/services/octavia-worker.yaml
new file mode 100644 (file)
index 0000000..4129512
--- /dev/null
@@ -0,0 +1,114 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Octavia worker service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOctaviaWorkerImage:
+    description: image
+    default: 'centos-binary-octavia-worker:latest'
+    type: string
+  DockerOctaviaConfigImage:
+    description: The container image to use for the octavia config_volume
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OctaviaWorkerPuppetBase:
+    type: ../../puppet/services/octavia-worker.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Octavia worker role.
+    value:
+      service_name: {get_attr: [OctaviaWorkerPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [OctaviaWorkerPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [OctaviaWorkerPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [OctaviaWorkerPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: octavia
+        puppet_tags: octavia_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/octavia_worker.json:
+          command: /usr/bin/octavia-worker --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/worker.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-worker
+      docker_config:
+        step_2:
+          octavia_worker_init_dirs:
+            start_order: 0
+            image: &octavia_worker_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaWorkerImage} ]
+            user: root
+            volumes:
+              - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+            command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-worker; chown -R octavia:octavia /etc/octavia/conf.d/octavia-worker']
+        step_4:
+          octavia_worker:
+            start_order: 2
+            image: *octavia_worker_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/octavia_worker.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+                  - /var/log/containers/octavia:/var/log/octavia
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/octavia
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable octavia_worker service
+          tags: step2
+          service: name=openstack-octavia-worker state=stopped enabled=no
diff --git a/docker/services/opendaylight-api.yaml b/docker/services/opendaylight-api.yaml
new file mode 100644 (file)
index 0000000..cb47668
--- /dev/null
@@ -0,0 +1,116 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized OpenDaylight API service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOpendaylightApiImage:
+    description: image
+    default: 'centos-binary-opendaylight:latest'
+    type: string
+  DockerOpendaylightConfigImage:
+    description: image
+    default: 'centos-binary-opendaylight:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OpenDaylightBase:
+    type: ../../puppet/services/opendaylight-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the OpenDaylight API role.
+    value:
+      service_name: {get_attr: [OpenDaylightBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [OpenDaylightBase, role_data, config_settings]
+      step_config: &step_config
+        list_join:
+          - "\n"
+          - - get_attr: [OpenDaylightBase, role_data, step_config]
+            - "include tripleo::profile::base::neutron::opendaylight::create_cluster"
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: opendaylight
+        # 'file,concat,file_line,augeas' are included by default
+        puppet_tags: odl_user,tripleo::profile::base::neutron::opendaylight::configure_cluster
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOpendaylightConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/opendaylight_api.json:
+          command: /opt/opendaylight/bin/karaf
+          permissions:
+            - path: /opt/opendaylight
+              owner: odl:odl
+              recurse: true
+      docker_config:
+        step_1:
+          opendaylight_api:
+            start_order: 0
+            image: &odl_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOpendaylightApiImage} ]
+            privileged: false
+            net: host
+            detach: true
+            user: odl
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/opendaylight_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/opendaylight/opt/opendaylight/data/idmlight.db.mv.db:/opt/opendaylight/data/idmlight.db.mv.db
+                  - /var/lib/config-data/opendaylight/opt/opendaylight/configuration/initial/:/opt/opendaylight/configuration/initial/
+                  - /var/lib/config-data/opendaylight/opt/opendaylight/etc/jetty.xml:/opt/opendaylight/etc/jetty.xml:ro
+                  - /var/lib/config-data/opendaylight/opt/opendaylight/etc/org.apache.karaf.features.cfg:/opt/opendaylight/etc/org.apache.karaf.features.cfg:ro
+                  - /var/lib/config-data/opendaylight/opt/opendaylight/etc/org.ops4j.pax.logging.cfg:/opt/opendaylight/etc/org.ops4j.pax.logging.cfg:ro
+                  - /var/lib/config-data/opendaylight/opt/opendaylight/etc/org.opendaylight.groupbasedpolicy.neutron.vpp.mapper.startup.cfg:/opt/opendaylight/etc/org.opendaylight.groupbasedpolicy.neutron.vpp.mapper.startup.cfg:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+
+      upgrade_tasks:
+        - name: Stop and disable opendaylight_api service
+          tags: step2
+          service: name=opendaylight state=stopped enabled=no
diff --git a/docker/services/pacemaker/cinder-backup.yaml b/docker/services/pacemaker/cinder-backup.yaml
new file mode 100644 (file)
index 0000000..d15c920
--- /dev/null
@@ -0,0 +1,151 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Backup service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderBackupImage:
+    description: image
+    default: 'centos-binary-cinder-backup:latest'
+    type: string
+  DockerCinderConfigImage:
+    description: The container image to use for the cinder config_volume
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  CinderBackupBackend:
+    default: swift
+    description: The short name of the Cinder Backup backend to use.
+    type: string
+    constraints:
+    - allowed_values: ['swift', 'ceph']
+  CinderBackupRbdPoolName:
+    default: backups
+    type: string
+  CephClientUserName:
+    default: openstack
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  CinderBackupBase:
+    type: ../../../puppet/services/cinder-backup.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      CinderBackupBackend: {get_param: CinderBackupBackend}
+      CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
+      CephClientUserName: {get_param: CephClientUserName}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Backup role.
+    value:
+      service_name: {get_attr: [CinderBackupBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [CinderBackupBase, role_data, config_settings]
+          - tripleo::profile::pacemaker::cinder::backup_bundle::cinder_backup_docker_image: &cinder_backup_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+            cinder::backup::manage_service: false
+            cinder::backup::enabled: false
+      step_config: ""
+      service_config_settings: {get_attr: [CinderBackupBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: {get_attr: [CinderBackupBase, role_data, step_config]}
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_backup.json:
+          command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/lib/cinder
+              owner: cinder:cinder
+              recurse: true
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_3:
+          cinder_backup_init_logs:
+            start_order: 0
+            image: *cinder_backup_image
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_5:
+          cinder_backup_init_bundle:
+            start_order: 1
+            detach: false
+            net: host
+            user: root
+            command:
+              - '/bin/bash'
+              - '-c'
+              - str_replace:
+                  template:
+                    list_join:
+                      - '; '
+                      - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+                        - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+                  params:
+                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+                    CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::backup_bundle'
+            image: *cinder_backup_image
+            volumes:
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /etc/puppet:/tmp/puppet-etc:ro
+              - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+              - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+              - /dev/shm:/dev/shm:rw
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/lib/cinder
+            - /var/log/containers/cinder
+      upgrade_tasks:
+        - name: Stop and disable cinder_backup service
+          tags: step2
+          service: name=openstack-cinder-backup state=stopped enabled=no
index 987ebaf..07e5fc2 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-cinder-volume:latest'
     type: string
-  # we configure all cinder services in the same cinder base container
   DockerCinderConfigImage:
-    description: image
+    description: The container image to use for the cinder config_volume
     default: 'centos-binary-cinder-api:latest'
     type: string
   EndpointMap:
index bad2acf..9fd9402 100644 (file)
@@ -14,6 +14,10 @@ parameters:
     description: image
     default: 'centos-binary-mariadb:latest'
     type: string
+  DockerClustercheckConfigImage:
+    description: The container image to use for the clustercheck config_volume
+    default: 'centos-binary-mariadb:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,10 +67,10 @@ outputs:
         config_volume: clustercheck
         puppet_tags: file # set this even though file is the default
         step_config: "include ::tripleo::profile::pacemaker::clustercheck"
-        config_image: &clustercheck_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/clustercheck.json:
           command: /usr/sbin/xinetd -dontfork
@@ -87,7 +91,10 @@ outputs:
         step_2:
           clustercheck:
             start_order: 1
-            image: *clustercheck_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckImage} ]
             restart: always
             net: host
             volumes:
index d64845f..fb1400f 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-mariadb:latest'
     type: string
+  DockerMysqlConfigImage:
+    description: The container image to use for the mysql config_volume
+    default: 'centos-binary-mariadb:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -77,7 +81,10 @@ outputs:
             - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }"
               - "exec {'wait-for-settle': command => '/bin/true' }"
               - "include ::tripleo::profile::pacemaker::database::mysql_bundle"
-        config_image: *mysql_image
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/mysql.json:
           command: /usr/sbin/pacemaker_remoted
index ef27f7e..2ff15fe 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-redis:latest'
     type: string
+  DockerRedisConfigImage:
+    description: The container image to use for the redis config_volume
+    default: 'centos-binary-redis:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -74,7 +78,10 @@ outputs:
         puppet_tags: 'exec'
         step_config:
           get_attr: [RedisBase, role_data, step_config]
-        config_image: *redis_image
+        config_image: &redis_config_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerRedisConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/redis.json:
           command: /usr/sbin/pacemaker_remoted
@@ -113,7 +120,7 @@ outputs:
                   params:
                     TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
                     CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::redis_bundle'
-            image: *redis_image
+            image: *redis_config_image
             volumes:
               - /etc/hosts:/etc/hosts:ro
               - /etc/localtime:/etc/localtime:ro
index 7557afd..704ffab 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-haproxy:latest'
     type: string
+  DockerHAProxyConfigImage:
+    description: The container image to use for the haproxy config_volume
+    default: 'centos-binary-haproxy:latest'
+    type: string
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -73,7 +77,10 @@ outputs:
               - "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
               - "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
               - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
-        config_image: *haproxy_image
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/haproxy.json:
           command: haproxy -f /etc/haproxy/haproxy.cfg
index 7f6ac70..ab1a612 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-rabbitmq:latest'
     type: string
+  DockerRabbitmqConfigImage:
+    description: The container image to use for the rabbitmq config_volume
+    default: 'centos-binary-rabbitmq:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -72,7 +76,10 @@ outputs:
         config_volume: rabbitmq
         puppet_tags: file
         step_config: *step_config
-        config_image: *rabbitmq_image
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/rabbitmq.json:
           command: /usr/sbin/pacemaker_remoted
index 585148e..0f55606 100644 (file)
@@ -14,6 +14,10 @@ parameters:
     description: image
     default: 'centos-binary-panko-api:latest'
     type: string
+  DockerPankoConfigImage:
+    description: The container image to use for the panko config_volume
+    default: 'centos-binary-panko-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -75,10 +79,10 @@ outputs:
         config_volume: panko
         puppet_tags: panko_api_paste_ini,panko_config
         step_config: *step_config
-        config_image: &panko_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerPankoConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/panko_api.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -89,14 +93,17 @@ outputs:
       docker_config:
         step_2:
           panko_init_log:
-            image: *panko_image
+            image: &panko_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
             user: root
             volumes:
               - /var/log/containers/panko:/var/log/panko
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
         step_3:
           panko_db_sync:
-            image: *panko_image
+            image: *panko_api_image
             net: host
             detach: false
             privileged: false
@@ -111,7 +118,7 @@ outputs:
         step_4:
           panko_api:
             start_order: 2
-            image: *panko_image
+            image: *panko_api_image
             net: host
             privileged: false
             restart: always
index 06d663c..f42f2ed 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-rabbitmq:latest'
     type: string
+  DockerRabbitmqConfigImage:
+    description: The container image to use for the rabbitmq config_volume
+    default: 'centos-binary-rabbitmq:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -73,10 +77,10 @@ outputs:
       puppet_config:
         config_volume: rabbitmq
         step_config: *step_config
-        config_image: &rabbitmq_image
+        config_image: &rabbitmq_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/rabbitmq.json:
           command: /usr/lib/rabbitmq/bin/rabbitmq-server
@@ -90,7 +94,10 @@ outputs:
           rabbitmq_init_logs:
             start_order: 0
             detach: false
-            image: *rabbitmq_image
+            image: &rabbitmq_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
             privileged: false
             user: root
             volumes:
@@ -146,7 +153,7 @@ outputs:
           config_volume: 'rabbit_init_tasks'
           puppet_tags: 'rabbitmq_policy,rabbitmq_user'
           step_config: 'include ::tripleo::profile::base::rabbitmq'
-          config_image: *rabbitmq_image
+          config_image: *rabbitmq_config_image
           volumes:
             - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
             - /var/lib/rabbitmq:/var/lib/rabbitmq:ro
index 1067079..32d6458 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-sahara-api:latest'
     type: string
+  DockerSaharaConfigImage:
+    description: The container image to use for the sahara config_volume
+    default: 'centos-binary-sahara-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -64,10 +68,10 @@ outputs:
         config_volume: sahara
         puppet_tags: sahara_api_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
         step_config: *step_config
-        config_image: &sahara_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/sahara-api.json:
           command: /usr/bin/sahara-api --config-file /etc/sahara/sahara.conf
@@ -81,10 +85,14 @@ outputs:
       docker_config:
         step_3:
           sahara_db_sync:
-            image: *sahara_image
+            image: &sahara_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
             net: host
             privileged: false
             detach: false
+            user: root
             volumes: &sahara_volumes
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
@@ -97,7 +105,7 @@ outputs:
             command: "/usr/bin/bootstrap_host_exec sahara_api su sahara -s /bin/bash -c 'sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head'"
         step_4:
           sahara_api:
-            image: *sahara_image
+            image: *sahara_api_image
             net: host
             privileged: false
             restart: always
index 41b5790..99a51c9 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-sahara-engine:latest'
     type: string
+  DockerSaharaConfigImage:
+    description: The container image to use for the sahara config_volume
+    default: 'centos-binary-sahara-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -64,10 +68,10 @@ outputs:
         config_volume: sahara
         puppet_tags: sahara_engine_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
         step_config: *step_config
-        config_image: &sahara_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/sahara-engine.json:
           command: /usr/bin/sahara-engine --config-file /etc/sahara/sahara.conf
@@ -81,7 +85,10 @@ outputs:
       docker_config:
         step_4:
           sahara_engine:
-            image: *sahara_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
             net: host
             privileged: false
             restart: always
index e6bdf15..42b0c57 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-sensu-client:latest'
     type: string
+  DockerSensuConfigImage:
+    description: The container image to use for the sensu config_volume
+    default: 'centos-binary-sensu-client:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -98,17 +102,24 @@ outputs:
         config_volume: sensu
         puppet_tags:  sensu_rabbitmq_config,sensu_client_config,sensu_check_config,sensu_check
         step_config: *step_config
-        config_image: &sensu_client_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSensuConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/sensu-client.json:
-          command: /usr/bin/sensu-client -d /etc/sensu/conf.d/
+          command: /usr/bin/sensu-client -d /etc/sensu/conf.d/ -l /var/log/sensu/sensu-client.log
+          permissions:
+            - path: /var/log/sensu
+              owner: sensu:sensu
+              recurse: true
       docker_config:
         step_3:
           sensu_client:
-            image: *sensu_client_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
             net: host
             privileged: true
             # NOTE(mmagr) kolla image changes the user to 'sensu', we need it
@@ -123,8 +134,14 @@ outputs:
                   - /var/run/docker.sock:/var/run/docker.sock:rw
                   - /var/lib/kolla/config_files/sensu-client.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/sensu/etc/sensu/:/etc/sensu/:ro
+                  - /var/log/containers/sensu:/var/log/sensu:rw
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/sensu
+            state: directory
       upgrade_tasks:
         - name: Stop and disable sensu-client service
           tags: step2
index f1d0da7..d7a7fe4 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-swift-proxy-server:latest'
     type: string
+  DockerSwiftConfigImage:
+    description: The container image to use for the swift config_volume
+    default: 'centos-binary-swift-proxy-server:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -70,10 +74,10 @@ outputs:
         config_volume: swift
         puppet_tags: swift_proxy_config
         step_config: *step_config
-        config_image: &swift_proxy_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/swift_proxy.json:
           command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
@@ -87,7 +91,10 @@ outputs:
         step_4:
           map_merge:
             - swift_proxy:
-                image: *swift_proxy_image
+                image: &swift_proxy_image
+                  list_join:
+                    - '/'
+                    - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
                 net: host
                 user: swift
                 restart: always
index 075d8d7..00a772d 100644 (file)
@@ -8,8 +8,8 @@ parameters:
     description: namespace
     default: 'tripleoupstream'
     type: string
-  DockerSwiftProxyImage:
-    description: image
+  DockerSwiftConfigImage:
+    description: The container image to use for the swift config_volume
     default: 'centos-binary-swift-proxy-server:latest'
     type: string
   ServiceNetMap:
@@ -98,6 +98,6 @@ outputs:
         config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
       kolla_config: {}
       docker_config: {}
index 55aea20..f2b8c47 100644 (file)
@@ -24,6 +24,10 @@ parameters:
     description: image
     default: 'centos-binary-swift-object:latest'
     type: string
+  DockerSwiftConfigImage:
+    description: The container image to use for the swift config_volume
+    default: 'centos-binary-swift-proxy-server:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -82,12 +86,12 @@ outputs:
       # BEGIN DOCKER SETTINGS
       puppet_config:
         config_volume: swift
-        puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
+        puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config,rsync::server
         step_config: *step_config
-        config_image: &swift_proxy_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/swift_account_auditor.json:
           command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf
@@ -119,6 +123,8 @@ outputs:
             - path: /var/log/swift
               owner: swift:swift
               recurse: true
+        /var/lib/kolla/config_files/swift_xinetd_rsync.json:
+          command: /usr/sbin/xinetd -dontfork
       docker_config:
         step_3:
           # The puppet config sets this up but we don't have a way to mount the named
@@ -286,7 +292,10 @@ outputs:
                   - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_object_expirer:
-            image: *swift_proxy_image
+            image: &swift_proxy_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
             net: host
             user: swift
             restart: always
@@ -349,6 +358,24 @@ outputs:
                   - /dev:/dev
                   - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
+          swift_xinetd_rsync:
+            image: *swift_object_image
+            net: host
+            user: root
+            restart: always
+            privileged: true
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_xinetd_rsync.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc:/etc
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
+            environment: *kolla_env
+
       host_prep_tasks:
         - name: create persistent directories
           file:
index df9750c..84175c5 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-tacker:latest'
     type: string
   DockerTackerConfigImage:
-    description: image
+    description: The container image to use for the tacker config_volume
     default: 'centos-binary-tacker:latest'
     type: string
   EndpointMap:
index 5ce324b..17524e5 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-zaqar:latest'
     type: string
+  DockerZaqarConfigImage:
+    description: The container image to use for the zaqar config_volume
+    default: 'centos-binary-zaqar:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,10 +67,10 @@ outputs:
         config_volume: zaqar
         puppet_tags: zaqar_config
         step_config: *step_config
-        config_image: &zaqar_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerZaqarConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/zaqar.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -79,7 +83,10 @@ outputs:
       docker_config:
         step_4:
           zaqar:
-            image: *zaqar_image
+            image: &zaqar_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
             net: host
             privileged: false
             restart: always
diff --git a/environments/docker-centos-tripleoupstream.yaml b/environments/docker-centos-tripleoupstream.yaml
new file mode 100644 (file)
index 0000000..648c8c2
--- /dev/null
@@ -0,0 +1,109 @@
+parameter_defaults:
+  #DockerAodhApiImage: 'tripleoupstream/centos-binary-aodh-api:latest'
+  #DockerAodhConfigImage: 'tripleoupstream/centos-binary-aodh-api:latest'
+  #DockerAodhEvaluatorImage: 'tripleoupstream/centos-binary-aodh-evaluator:latest'
+  #DockerAodhListenerImage: 'tripleoupstream/centos-binary-aodh-listener:latest'
+  #DockerAodhNotifierImage: 'tripleoupstream/centos-binary-aodh-notifier:latest'
+  #DockerCeilometerCentralImage: 'tripleoupstream/centos-binary-ceilometer-central:latest'
+  #DockerCeilometerComputeImage: 'tripleoupstream/centos-binary-ceilometer-compute:latest'
+  #DockerCeilometerConfigImage: 'tripleoupstream/centos-binary-ceilometer-central:latest'
+  #DockerCeilometerIpmiImage: 'tripleoupstream/centos-binary-ceilometer-ipmi:latest'
+  #DockerCeilometerNotificationImage: 'tripleoupstream/centos-binary-ceilometer-notification:latest'
+  #DockerCinderApiImage: 'tripleoupstream/centos-binary-cinder-api:latest'
+  #DockerCinderBackupImage: 'tripleoupstream/centos-binary-cinder-backup:latest'
+  #DockerCinderConfigImage: 'tripleoupstream/centos-binary-cinder-api:latest'
+  #DockerCinderSchedulerImage: 'tripleoupstream/centos-binary-cinder-scheduler:latest'
+  #DockerCinderVolumeImage: 'tripleoupstream/centos-binary-cinder-volume:latest'
+  #DockerClustercheckConfigImage: 'tripleoupstream/centos-binary-mariadb:latest'
+  #DockerClustercheckImage: 'tripleoupstream/centos-binary-mariadb:latest'
+  #DockerCollectdConfigImage: 'tripleoupstream/centos-binary-collectd:latest'
+  #DockerCollectdImage: 'tripleoupstream/centos-binary-collectd:latest'
+  #DockerCongressApiImage: 'tripleoupstream/centos-binary-congress-api:latest'
+  #DockerCongressConfigImage: 'tripleoupstream/centos-binary-congress-api:latest'
+  #DockerEc2ApiConfigImage: 'tripleoupstream/centos-binary-ec2-api:latest'
+  #DockerEc2ApiImage: 'tripleoupstream/centos-binary-ec2-api:latest'
+  #DockerEtcdConfigImage: 'tripleoupstream/centos-binary-etcd:latest'
+  #DockerEtcdImage: 'tripleoupstream/centos-binary-etcd:latest'
+  #DockerGlanceApiConfigImage: 'tripleoupstream/centos-binary-glance-api:latest'
+  #DockerGlanceApiImage: 'tripleoupstream/centos-binary-glance-api:latest'
+  #DockerGnocchiApiImage: 'tripleoupstream/centos-binary-gnocchi-api:latest'
+  #DockerGnocchiConfigImage: 'tripleoupstream/centos-binary-gnocchi-api:latest'
+  #DockerGnocchiMetricdImage: 'tripleoupstream/centos-binary-gnocchi-metricd:latest'
+  #DockerGnocchiStatsdImage: 'tripleoupstream/centos-binary-gnocchi-statsd:latest'
+  #DockerHAProxyConfigImage: 'tripleoupstream/centos-binary-haproxy:latest'
+  #DockerHAProxyImage: 'tripleoupstream/centos-binary-haproxy:latest'
+  #DockerHeatApiCfnConfigImage: 'tripleoupstream/centos-binary-heat-api-cfn:latest'
+  #DockerHeatApiCfnImage: 'tripleoupstream/centos-binary-heat-api-cfn:latest'
+  #DockerHeatApiConfigImage: 'tripleoupstream/centos-binary-heat-api:latest'
+  #DockerHeatApiImage: 'tripleoupstream/centos-binary-heat-api:latest'
+  #DockerHeatConfigImage: 'tripleoupstream/centos-binary-heat-api:latest'
+  #DockerHeatEngineImage: 'tripleoupstream/centos-binary-heat-engine:latest'
+  #DockerHorizonConfigImage: 'tripleoupstream/centos-binary-horizon:latest'
+  #DockerHorizonImage: 'tripleoupstream/centos-binary-horizon:latest'
+  #DockerIronicApiImage: 'tripleoupstream/centos-binary-ironic-api:latest'
+  #DockerIronicConductorImage: 'tripleoupstream/centos-binary-ironic-conductor:latest'
+  #DockerIronicConfigImage: 'tripleoupstream/centos-binary-ironic-pxe:latest'
+  #DockerIronicPxeImage: 'tripleoupstream/centos-binary-ironic-pxe:latest'
+  #DockerIscsidConfigImage: 'tripleoupstream/centos-binary-iscsid:latest'
+  #DockerIscsidImage: 'tripleoupstream/centos-binary-iscsid:latest'
+  #DockerKeystoneConfigImage: 'tripleoupstream/centos-binary-keystone:latest'
+  #DockerKeystoneImage: 'tripleoupstream/centos-binary-keystone:latest'
+  #DockerManilaApiImage: 'tripleoupstream/centos-binary-manila-api:latest'
+  #DockerManilaConfigImage: 'tripleoupstream/centos-binary-manila-api:latest'
+  #DockerManilaSchedulerImage: 'tripleoupstream/centos-binary-manila-scheduler:latest'
+  #DockerMemcachedConfigImage: 'tripleoupstream/centos-binary-memcached:latest'
+  #DockerMemcachedImage: 'tripleoupstream/centos-binary-memcached:latest'
+  #DockerMistralApiImage: 'tripleoupstream/centos-binary-mistral-api:latest'
+  #DockerMistralConfigImage: 'tripleoupstream/centos-binary-mistral-api:latest'
+  #DockerMistralEngineImage: 'tripleoupstream/centos-binary-mistral-engine:latest'
+  #DockerMistralExecutorImage: 'tripleoupstream/centos-binary-mistral-executor:latest'
+  #DockerMongodbConfigImage: 'tripleoupstream/centos-binary-mongodb:latest'
+  #DockerMongodbImage: 'tripleoupstream/centos-binary-mongodb:latest'
+  #DockerMultipathdConfigImage: 'tripleoupstream/centos-binary-multipathd:latest'
+  #DockerMultipathdImage: 'tripleoupstream/centos-binary-multipathd:latest'
+  #DockerMysqlClientConfigImage: 'tripleoupstream/centos-binary-mariadb:latest'
+  #DockerMysqlConfigImage: 'tripleoupstream/centos-binary-mariadb:latest'
+  #DockerMysqlImage: 'tripleoupstream/centos-binary-mariadb:latest'
+  #DockerNeutronApiImage: 'tripleoupstream/centos-binary-neutron-server:latest'
+  #DockerNeutronConfigImage: 'tripleoupstream/centos-binary-neutron-server:latest'
+  #DockerNeutronDHCPImage: 'tripleoupstream/centos-binary-neutron-dhcp-agent:latest'
+  #DockerNeutronL3AgentImage: 'tripleoupstream/centos-binary-neutron-l3-agent:latest'
+  #DockerNeutronMetadataImage: 'tripleoupstream/centos-binary-neutron-metadata-agent:latest'
+  #DockerNovaApiImage: 'tripleoupstream/centos-binary-nova-api:latest'
+  #DockerNovaComputeImage: 'tripleoupstream/centos-binary-nova-compute:latest'
+  #DockerNovaComputeIronicImage: 'tripleoupstream/centos-binary-nova-compute-ironic:latest'
+  #DockerNovaConductorImage: 'tripleoupstream/centos-binary-nova-conductor:latest'
+  #DockerNovaConfigImage: 'tripleoupstream/centos-binary-nova-base:latest'
+  #DockerNovaConsoleauthImage: 'tripleoupstream/centos-binary-nova-consoleauth:latest'
+  #DockerNovaLibvirtConfigImage: 'tripleoupstream/centos-binary-nova-compute:latest'
+  #DockerNovaLibvirtImage: 'tripleoupstream/centos-binary-nova-libvirt:latest'
+  #DockerNovaPlacementConfigImage: 'tripleoupstream/centos-binary-nova-placement-api:latest'
+  #DockerNovaPlacementImage: 'tripleoupstream/centos-binary-nova-placement-api:latest'
+  #DockerNovaSchedulerImage: 'tripleoupstream/centos-binary-nova-scheduler:latest'
+  #DockerNovaVncProxyImage: 'tripleoupstream/centos-binary-nova-novncproxy:latest'
+  #DockerOctaviaApiImage: 'tripleoupstream/centos-binary-octavia-api:latest'
+  #DockerOctaviaConfigImage: 'tripleoupstream/centos-binary-octavia-api:latest'
+  #DockerOctaviaHealthManagerImage: 'tripleoupstream/centos-binary-octavia-health-manager:latest'
+  #DockerOctaviaHousekeepingImage: 'tripleoupstream/centos-binary-octavia-housekeeping:latest'
+  #DockerOctaviaWorkerImage: 'tripleoupstream/centos-binary-octavia-worker:latest'
+  #DockerOpenvswitchImage: 'tripleoupstream/centos-binary-neutron-openvswitch-agent:latest'
+  #DockerPankoApiImage: 'tripleoupstream/centos-binary-panko-api:latest'
+  #DockerPankoConfigImage: 'tripleoupstream/centos-binary-panko-api:latest'
+  #DockerRabbitmqConfigImage: 'tripleoupstream/centos-binary-rabbitmq:latest'
+  #DockerRabbitmqImage: 'tripleoupstream/centos-binary-rabbitmq:latest'
+  #DockerRedisConfigImage: 'tripleoupstream/centos-binary-redis:latest'
+  #DockerRedisImage: 'tripleoupstream/centos-binary-redis:latest'
+  #DockerSaharaApiImage: 'tripleoupstream/centos-binary-sahara-api:latest'
+  #DockerSaharaConfigImage: 'tripleoupstream/centos-binary-sahara-api:latest'
+  #DockerSaharaEngineImage: 'tripleoupstream/centos-binary-sahara-engine:latest'
+  #DockerSensuClientImage: 'tripleoupstream/centos-binary-sensu-client:latest'
+  #DockerSensuConfigImage: 'tripleoupstream/centos-binary-sensu-client:latest'
+  #DockerSwiftAccountImage: 'tripleoupstream/centos-binary-swift-account:latest'
+  #DockerSwiftConfigImage: 'tripleoupstream/centos-binary-swift-proxy-server:latest'
+  #DockerSwiftContainerImage: 'tripleoupstream/centos-binary-swift-container:latest'
+  #DockerSwiftObjectImage: 'tripleoupstream/centos-binary-swift-object:latest'
+  #DockerSwiftProxyImage: 'tripleoupstream/centos-binary-swift-proxy-server:latest'
+  #DockerTackerConfigImage: 'tripleoupstream/centos-binary-tacker:latest'
+  #DockerTackerImage: 'tripleoupstream/centos-binary-tacker:latest'
+  #DockerZaqarConfigImage: 'tripleoupstream/centos-binary-zaqar:latest'
+  #DockerZaqarImage: 'tripleoupstream/centos-binary-zaqar:latest'
diff --git a/environments/docker-ha.yaml b/environments/docker-ha.yaml
new file mode 100644 (file)
index 0000000..442262b
--- /dev/null
@@ -0,0 +1,22 @@
+# Environment file to deploy the HA services via docker
+# Add it *after* -e docker.yaml:
+# ...deploy..-e docker.yaml -e docker-ha.yaml
+resource_registry:
+  # Pacemaker runs on the host
+  OS::TripleO::Tasks::ControllerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+  OS::TripleO::Services::Pacemaker: ../puppet/services/pacemaker.yaml
+  OS::TripleO::Services::PacemakerRemote: ../puppet/services/pacemaker_remote.yaml
+
+  # Services that are disabled for HA deployments with pacemaker
+  OS::TripleO::Services::Keepalived: OS::Heat::None
+
+  # HA Containers managed by pacemaker
+  OS::TripleO::Services::CinderVolume: ../docker/services/pacemaker/cinder-volume.yaml
+  OS::TripleO::Services::CinderBackup: ../docker/services/pacemaker/cinder-backup.yaml
+  OS::TripleO::Services::Clustercheck: ../docker/services/pacemaker/clustercheck.yaml
+  OS::TripleO::Services::HAproxy: ../docker/services/pacemaker/haproxy.yaml
+  OS::TripleO::Services::MySQL: ../docker/services/pacemaker/database/mysql.yaml
+  OS::TripleO::Services::RabbitMQ: ../docker/services/pacemaker/rabbitmq.yaml
+  OS::TripleO::Services::Redis: ../docker/services/pacemaker/database/redis.yaml
index 6a5ec87..c32001a 100644 (file)
@@ -6,6 +6,8 @@ resource_registry:
   OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
 
   OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+  # Default Neutron ML2 puppet plugin to use when NeutronCorePlugin is set to ML2
+  OS::TripleO::Docker::NeutronMl2PluginBase: ../puppet/services/neutron-plugin-ml2.yaml
 
   #NOTE (dprince) add roles to be docker enabled as we support them
   OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
@@ -30,6 +32,7 @@ resource_registry:
   OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
   OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
   OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
+  OS::TripleO::Services::MySQLClient: ../docker/services/database/mysql-client.yaml
   OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
   OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
   OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
@@ -52,9 +55,9 @@ resource_registry:
   OS::TripleO::Services::Horizon: ../docker/services/horizon.yaml
   OS::TripleO::Services::Iscsid: ../docker/services/iscsid.yaml
   OS::TripleO::Services::Multipathd: ../docker/services/multipathd.yaml
-  OS::TripleO::Services::CinderApi: ../docker/services/cinder-api.yaml
-  OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
   # FIXME: Had to remove these to unblock containers CI. They should be put back when fixed.
+  # OS::TripleO::Services::CinderApi: ../docker/services/cinder-api.yaml
+  # OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
   # OS::TripleO::Services::CinderBackup: ../docker/services/cinder-backup.yaml
   # OS::TripleO::Services::CinderVolume: ../docker/services/cinder-volume.yaml
 
@@ -74,3 +77,4 @@ parameter_defaults:
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::CeilometerAgentCompute
     - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::OpenDaylightOvs
index 24eedf8..20340c7 100644 (file)
@@ -9,4 +9,6 @@ parameter_defaults:
   UpgradeLevelNovaCompute: auto
   UpgradeInitCommonCommand: |
     #!/bin/bash
+    set -eu
     # Ocata to Pike, put any needed host-level workarounds here
+    yum install -y ansible-pacemaker
index f5a0a39..2c87470 100644 (file)
@@ -2,7 +2,6 @@
 # a Cisco Neutron plugin.
 resource_registry:
   OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
-  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
 
 parameter_defaults:
diff --git a/environments/neutron-opendaylight-dpdk.yaml b/environments/neutron-opendaylight-dpdk.yaml
new file mode 100644 (file)
index 0000000..9ee4eb7
--- /dev/null
@@ -0,0 +1,37 @@
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK
+resource_registry:
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+  OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
+  OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
+  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+
+parameter_defaults:
+  NeutronEnableForceMetadata: true
+  NeutronMechanismDrivers: 'opendaylight_v2'
+  NeutronServicePlugins: 'odl-router_v2'
+  NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+  ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+  ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+  ## This can be done using ComputeKernelArgs as shown below.
+  ComputeParameters:
+    #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+    ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+    ## due to CPU contention of DPDK PMD threads.
+    OvsEnableDpdk: True
+    ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+    #OvsDpdkSocketMemory: ""       # Sets the amount of hugepage memory to assign per NUMA node.
+                                   # It is recommended to use the socket closest to the PCIe slot used for the
+                                   # desired DPDK NIC.  Format should be comma separated per socket string such as:
+                                   # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+    #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+    #OvsPmdCoreList: ""            # List or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+                                   # location to cores on socket, number of hyper-threaded logical cores, and
+                                   # desired number of PMD threads can all play a role in configuring this setting.
+                                   # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+                                   # If using hyperthreading then specify both logical cores that would equal the
+                                   # physical core.  Also, specifying more than one core will trigger multiple PMD
+                                   # threads to be spawned, which may improve dataplane performance.
+    #NovaVcpuPinSet: ""            # Cores to pin Nova instances to.  For maximum performance, select cores
+                                   # on the same NUMA node(s) selected for previous settings.
index 6706bcc..ecfd0fe 100644 (file)
@@ -1,18 +1,31 @@
-## A Heat environment that can be used to deploy DPDK with OVS
+# A Heat environment that can be used to deploy DPDK with OVS
+# Deploying DPDK requires enabling hugepages for the overcloud nodes
 resource_registry:
   OS::TripleO::Services::ComputeNeutronOvsAgent: ../puppet/services/neutron-ovs-dpdk-agent.yaml
 
 parameter_defaults:
-  ## NeutronDpdkCoreList and NeutronDpdkMemoryChannels are REQUIRED settings.
-  ## Attempting to deploy DPDK without appropriate values will cause deployment to fail or lead to unstable deployments.
-  #NeutronDpdkCoreList: ""
-  #NeutronDpdkMemoryChannels: ""
-
   NeutronDatapathType: "netdev"
   NeutronVhostuserSocketDir: "/var/lib/vhost_sockets"
-
-  #NeutronDpdkSocketMemory: ""
-  #NeutronDpdkDriverType: "vfio-pci"
-  #NovaReservedHostMemory: 4096
-  #NovaVcpuPinSet: ""
-
+  NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+  ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+  ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+  ## This can be done using ComputeKernelArgs as shown below.
+  #ComputeParameters:
+    #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+    ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+    ## due to CPU contention of DPDK PMD threads.
+    ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+    #OvsDpdkSocketMemory: ""       # Sets the amount of hugepage memory to assign per NUMA node.
+                                   # It is recommended to use the socket closest to the PCIe slot used for the
+                                   # desired DPDK NIC.  Format should be comma separated per socket string such as:
+                                   # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+    #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+    #OvsPmdCoreList: ""            # List or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+                                   # location to cores on socket, number of hyper-threaded logical cores, and
+                                   # desired number of PMD threads can all play a role in configuring this setting.
+                                   # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+                                   # If using hyperthreading then specify both logical cores that would equal the
+                                   # physical core.  Also, specifying more than one core will trigger multiple PMD
+                                   # threads to be spawned, which may improve dataplane performance.
+    #NovaVcpuPinSet: ""            # Cores to pin Nova instances to.  For maximum performance, select cores
+                                   # on the same NUMA node(s) selected for previous settings.
diff --git a/environments/nonha-arch.yaml b/environments/nonha-arch.yaml
new file mode 100644 (file)
index 0000000..7fdcc10
--- /dev/null
@@ -0,0 +1,16 @@
+# An environment which creates an Overcloud without the use of pacemaker
+# (i.e. only with keepalived and systemd for all resources)
+resource_registry:
+  OS::TripleO::Tasks::ControllerPreConfig:  OS::Heat::None
+  OS::TripleO::Tasks::ControllerPostConfig:  OS::Heat::None
+  OS::TripleO::Tasks::ControllerPostPuppetRestart:  OS::Heat::None
+
+  OS::TripleO::Services::CinderVolume: ../puppet/services/cinder-volume.yaml
+  OS::TripleO::Services::RabbitMQ: ../puppet/services/rabbitmq.yaml
+  OS::TripleO::Services::HAproxy: ../puppet/services/haproxy.yaml
+  OS::TripleO::Services::Redis: ../puppet/services/database/redis.yaml
+  OS::TripleO::Services::MySQL: ../puppet/services/database/mysql.yaml
+  OS::TripleO::Services::Keepalived: OS::Heat::None
+  OS::TripleO::Services::Pacemaker: OS::Heat::None
+  OS::TripleO::Services::PacemakerRemote: OS::Heat::None
+
diff --git a/environments/overcloud-baremetal.j2.yaml b/environments/overcloud-baremetal.j2.yaml
new file mode 100644 (file)
index 0000000..668e28d
--- /dev/null
@@ -0,0 +1,19 @@
+resource_registry:
+  OS::TripleO::AllNodes::SoftwareConfig: OS::Heat::None
+  OS::TripleO::PostDeploySteps: OS::Heat::None
+  OS::TripleO::DefaultPasswords: OS::Heat::None
+  OS::TripleO::RandomString: OS::Heat::None
+  OS::TripleO::AllNodesDeployment: OS::Heat::None
+
+parameter_defaults:
+  # Deploy no services
+{% for role in roles %}
+  {{role.name}}Services: []
+{% endfor %}
+
+  # Consistent Hostname format
+  ControllerHostnameFormat: overcloud-controller-%index%
+  ComputeHostnameFormat: overcloud-novacompute-%index%
+  ObjectStorageHostnameFormat: overcloud-objectstorage-%index%
+  CephStorageHostnameFormat: overcloud-cephstorage-%index%
+  BlockStorageHostnameFormat: overcloud-blockstorage-%index%
diff --git a/environments/overcloud-services.yaml b/environments/overcloud-services.yaml
new file mode 100644 (file)
index 0000000..c409b89
--- /dev/null
@@ -0,0 +1,7 @@
+parameter_defaults:
+  # Consistent Hostname format
+  ControllerDeployedServerHostnameFormat: overcloud-controller-%index%
+  ComputeDeployedServerHostnameFormat: overcloud-novacompute-%index%
+  ObjectStorageDeployedServerHostnameFormat: overcloud-objectstorage-%index%
+  CephStorageDeployedServerHostnameFormat: overcloud-cephstorage-%index%
+  BlockStorageDeployedServerHostnameFormat: overcloud-blockstorage-%index%
diff --git a/environments/services-docker/neutron-opendaylight.yaml b/environments/services-docker/neutron-opendaylight.yaml
new file mode 100644 (file)
index 0000000..b749cb6
--- /dev/null
@@ -0,0 +1,16 @@
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR using Docker containers
+resource_registry:
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+  OS::TripleO::Services::OpenDaylightApi: ../../docker/services/opendaylight-api.yaml
+  OS::TripleO::Services::OpenDaylightOvs: ../../puppet/services/opendaylight-ovs.yaml
+  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+  OS::TripleO::Docker::NeutronMl2PluginBase: ../../puppet/services/neutron-plugin-ml2-odl.yaml
+
+parameter_defaults:
+  NeutronEnableForceMetadata: true
+  NeutronMechanismDrivers: 'opendaylight_v2'
+  NeutronServicePlugins: 'odl-router_v2,trunk'
+  DockerNeutronApiImage: 'centos-binary-neutron-server-opendaylight:latest'
+  DockerNeutronConfigImage: 'centos-binary-neutron-server-opendaylight:latest'
diff --git a/environments/services-docker/octavia.yaml b/environments/services-docker/octavia.yaml
new file mode 100644 (file)
index 0000000..b677a4f
--- /dev/null
@@ -0,0 +1,5 @@
+resource_registry:
+  OS::TripleO::Services::OctaviaApi: ../../docker/services/octavia-api.yaml
+  OS::TripleO::Services::OctaviaHousekeeping: ../../docker/services/octavia-housekeeping.yaml
+  OS::TripleO::Services::OctaviaHealthManager: ../../docker/services/octavia-health-manager.yaml
+  OS::TripleO::Services::OctaviaWorker: ../../docker/services/octavia-worker.yaml
index 8359f4a..b81b026 100644 (file)
@@ -2,3 +2,5 @@ resource_registry:
   OS::TripleO::Services::IronicApi: ../../puppet/services/ironic-api.yaml
   OS::TripleO::Services::IronicConductor: ../../puppet/services/ironic-conductor.yaml
   OS::TripleO::Services::NovaIronic: ../../puppet/services/nova-ironic.yaml
+parameter_defaults:
+  NovaSchedulerDiscoverHostsInCellsInterval: 15
diff --git a/environments/services/neutron-lbaasv2.yaml b/environments/services/neutron-lbaasv2.yaml
new file mode 100644 (file)
index 0000000..9dee74e
--- /dev/null
@@ -0,0 +1,17 @@
+# A Heat environment file that can be used to deploy Neutron LBaaSv2 service
+#
+# Currently there are only two interface drivers for Neutron LBaaSv2
+# The default option is the standard OVS driver the other option is to be used
+# when linux bridges are used instead of OVS
+# In order to enable other backend, replace the content of NeutronLbaasInterfaceDriver
+#
+# - OVS: neutron.agent.linux.interface.OVSInterfaceDriver
+# - LinuxBridges: neutron.agent.linux.interface.BridgeInterfaceDriver
+resource_registry:
+  OS::TripleO::Services::NeutronLbaas: ../puppet/services/neutron-lbaas.yaml
+
+parameter_defaults:
+  NeutronLbaasInterfaceDriver: "neutron.agent.linux.interface.OVSInterfaceDriver"
+  NeutronLbaasDeviceDriver: "neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver"
+  NeutronServiceProviders: ['LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default']
+
index 4cdba09..c118fe7 100644 (file)
@@ -34,6 +34,14 @@ parameter_defaults:
   # Type: string
   CinderNetappLogin: <None>
 
+  # 
+  # Type: string
+  CinderNetappNasSecureFileOperations: false
+
+  # 
+  # Type: string
+  CinderNetappNasSecureFilePermissions: false
+
   # 
   # Type: string
   CinderNetappNfsMountOptions: ''
index fb0d169..96632bc 100644 (file)
@@ -59,19 +59,6 @@ parameters:
     description: |
       When enabled, the system will perform a yum update after performing the
       RHEL Registration process.
-  deployment_actions:
-    default: ['CREATE', 'UPDATE']
-    type: comma_delimited_list
-    description: >
-      List of stack actions that will trigger any deployments in this
-      templates. The actions will be an empty list of the server is in the
-      toplevel DeploymentServerBlacklist parameter's value.
-
-conditions:
-  deployment_actions_empty:
-    equals:
-      - {get_param: deployment_actions}
-      - []
 
 resources:
 
@@ -149,11 +136,7 @@ resources:
       name: RHELUnregistrationDeployment
       server:  {get_param: server}
       config: {get_resource: RHELUnregistration}
-      actions:
-        if:
-          - deployment_actions_empty
-          - []
-          - ['DELETE'] # Only do this on DELETE
+      actions: ['DELETE'] # Only do this on DELETE
       input_values:
         REG_METHOD: {get_param: rhel_reg_method}
 
@@ -186,11 +169,7 @@ resources:
       name: UpdateDeploymentAfterRHELRegistration
       config: {get_resource: YumUpdateConfigurationAfterRHELRegistration}
       server:  {get_param: server}
-      actions:
-        if:
-          - deployment_actions_empty
-          - []
-          - ['CREATE'] # Only do this on CREATE
+      actions: ['CREATE'] # Only do this on CREATE
 
 outputs:
   deploy_stdout:
index f4f1a14..2d86261 100644 (file)
@@ -31,7 +31,7 @@
             line: 'isolated_cores={{ _TUNED_CORES_ }}'
           when: _TUNED_CORES_|default("") != ""
 
-        - name: Tune-d provile activation
+        - name: Tune-d profile activation
           shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
       become: true
       when: _TUNED_PROFILE_NAME_|default("") != ""
index 79cb7cb..48ba526 100644 (file)
@@ -7,19 +7,6 @@ description: >
 parameters:
   server:
     type: string
-  deployment_actions:
-    default: ['CREATE', 'UPDATE']
-    type: comma_delimited_list
-    description: >
-      List of stack actions that will trigger any deployments in this
-      templates. The actions will be an empty list of the server is in the
-      toplevel DeploymentServerBlacklist parameter's value.
-
-conditions:
-  deployment_actions_empty:
-    equals:
-      - {get_param: deployment_actions}
-      - []
 
 resources:
 
@@ -37,11 +24,6 @@ resources:
       name: SomeDeployment
       server:  {get_param: server}
       config: {get_resource: SomeConfig}
-      actions:
-        if:
-          - deployment_actions_empty
-          - []
-          - ['CREATE'] # Only do this on CREATE
       actions: ['CREATE'] # Only do this on CREATE
 
   RebootConfig:
@@ -62,9 +44,5 @@ resources:
       name: RebootDeployment
       server:  {get_param: server}
       config: {get_resource: RebootConfig}
-      actions:
-        if:
-          - deployment_actions_empty
-          - []
-          - ['CREATE'] # Only do this on CREATE
+      actions: ['CREATE'] # Only do this on CREATE
       signal_transport: NO_SIGNAL
index fe52ef7..41d8f4f 100644 (file)
@@ -19,13 +19,6 @@ parameters:
   {{role}}HostCpusList:
     type: string
     default: ""
-  deployment_actions:
-    default: ['CREATE', 'UPDATE']
-    type: comma_delimited_list
-    description: >
-      List of stack actions that will trigger any deployments in this
-      templates. The actions will be an empty list of the server is in the
-      toplevel DeploymentServerBlacklist parameter's value.
 
 parameter_group:
   - label: deprecated
@@ -45,10 +38,6 @@ conditions:
           equals:
           - get_param: {{role}}TunedProfileName
           - ""
-  deployment_actions_empty:
-    equals:
-      - {get_param: deployment_actions}
-      - []
 
 resources:
 
@@ -73,11 +62,7 @@ resources:
       name: HostParametersDeployment
       server:  {get_param: server}
       config: {get_resource: HostParametersConfig}
-      actions:
-        if:
-          - deployment_actions_empty
-          - []
-          - ['CREATE'] # Only do this on CREATE
+      actions: ['CREATE'] # Only do this on CREATE
       input_values:
         _KERNEL_ARGS_: {get_param: {{role}}KernelArgs}
         _TUNED_PROFILE_NAME_: {get_param: {{role}}TunedProfileName}
@@ -103,11 +88,7 @@ resources:
       name: RebootDeployment
       server:  {get_param: server}
       config: {get_resource: RebootConfig}
-      actions:
-        if:
-          - deployment_actions_empty
-          - []
-          - ['CREATE'] # Only do this on CREATE
+      actions: ['CREATE'] # Only do this on CREATE
       signal_transport: NO_SIGNAL
 
 outputs:
index 21309dd..009a087 100644 (file)
@@ -11,22 +11,136 @@ parameters:
     type: json
     description: Role Specific parameters
     default: {}
-  deployment_actions:
-    default: ['CREATE', 'UPDATE']
+  ServiceNames:
     type: comma_delimited_list
+    default: []
+  IsolCpusList:
+    default: "0"
+    description: List of cores to be isolated by tuned
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]+"
+  OvsEnableDpdk:
+    default: false
+    description: Whether or not to configure enable DPDK in OVS
+    type: boolean
+  OvsDpdkCoreList:
+    description: >
+      List of cores to be used for DPDK lcore threads.  Note, these threads
+      are used by the OVS control path for validator and handling functions.
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ""
+  OvsDpdkMemoryChannels:
+    description: Number of memory channels per socket to be used for DPDK
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9]*"
+    default: ""
+  OvsDpdkSocketMemory:
+    default: ""
+    description: >
+      Sets the amount of hugepage memory to assign per NUMA node. It is
+      recommended to use the socket closest to the PCIe slot used for the
+      desired DPDK NIC.  The format should be in "<socket 0 mem>, <socket 1
+      mem>, <socket n mem>", where the value is specified in MB.  For example:
+      "1024,0".
+    type: string
+  OvsDpdkDriverType:
+    default: "vfio-pci"
+    description: >
+      DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+      this UIO/PMD driver.
+    type: string
+  OvsPmdCoreList:
     description: >
-      List of stack actions that will trigger any deployments in this
-      templates. The actions will be an empty list of the server is in the
-      toplevel DeploymentServerBlacklist parameter's value.
+      A list or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+      location to cores on socket, number of hyper-threaded logical cores, and
+      desired number of PMD threads can all play a role in configuring this
+      setting.  These cores should be on the same socket where
+      OvsDpdkSocketMemory is assigned.  If using hyperthreading then specify
+      both logical cores that would equal the physical core. Also, specifying
+      more than one core will trigger multiple PMD threads to be spawned which
+      may improve dataplane performance.
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    type: string
+    default: ""
+  # DEPRECATED: the following options are deprecated and are currently maintained
+  # for backwards compatibility. They will be removed in the Queens cycle.
+  HostCpusList:
+    description: List of cores to be used for host process
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]+"
+    default: '0'
+  NeutronDpdkCoreList:
+    description: List of cores to be used for DPDK Poll Mode Driver
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ''
+  NeutronDpdkMemoryChannels:
+    description: Number of memory channels to be used for DPDK
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9]*"
+    default: ''
+  NeutronDpdkSocketMemory:
+    default: ''
+    description: Memory allocated for each socket
+    type: string
+  NeutronDpdkDriverType:
+    default: "vfio-pci"
+    description: DPDK Driver type
+    type: string
 
 conditions:
   is_host_config_required: {not: {equals: [{get_param: [RoleParameters, KernelArgs]}, ""]}}
-  deployment_actions_empty:
-    equals:
-      - {get_param: deployment_actions}
-      - []
+  # YAQL is enabled in conditions with https://review.openstack.org/#/c/467506/
+  is_dpdk_config_required:
+    or:
+      - yaql:
+        expression: $.data.service_names.contains('neutron_ovs_dpdk_agent')
+        data:
+          service_names: {get_param: ServiceNames}
+      - {get_param: OvsEnableDpdk}
+      - {get_param: [RoleParameters, OvsEnableDpdk]}
+  is_reboot_config_required:
+    or:
+      - is_host_config_required
+      - is_dpdk_config_required
+  l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+  pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+  mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+  socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+  driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+  isol_cpus_empty: {equals: [{get_param: IsolCpusList}, '0']}
 
 resources:
+  RoleParametersValue:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+        map_replace:
+          - map_replace:
+            - IsolCpusList: IsolCpusList
+              OvsDpdkCoreList: OvsDpdkCoreList
+              OvsDpdkMemoryChannels: OvsDpdkMemoryChannels
+              OvsDpdkSocketMemory: OvsDpdkSocketMemory
+              OvsDpdkDriverType: OvsDpdkDriverType
+              OvsPmdCoreList: OvsDpdkCoreList
+            - values: {get_param: [RoleParameters]}
+          - values:
+              IsolCpusList: {if: [isol_cpus_empty, {get_param: HostCpusList}, {get_param: IsolCpusList}]}
+              OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+              OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+              OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+              OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+              OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
   HostParametersConfig:
     type: OS::Heat::SoftwareConfig
     condition: is_host_config_required
@@ -48,19 +162,51 @@ resources:
       name: HostParametersDeployment
       server:  {get_param: server}
       config: {get_resource: HostParametersConfig}
-      actions:
-        if:
-          - deployment_actions_empty
-          - []
-          - ['CREATE'] # Only do this on CREATE
+      actions: ['CREATE'] # Only do this on CREATE
       input_values:
         _KERNEL_ARGS_: {get_param: [RoleParameters, KernelArgs]}
         _TUNED_PROFILE_NAME_: {get_param: [RoleParameters, TunedProfileName]}
-        _TUNED_CORES_: {get_param: [RoleParameters, HostIsolatedCoreList]}
+        _TUNED_CORES_: {get_param: [RoleParameters, IsolCpusList]}
+
+  EnableDpdkConfig:
+    type: OS::Heat::SoftwareConfig
+    condition: is_dpdk_config_required
+    properties:
+      group: script
+      config:
+        str_replace:
+          template: |
+            #!/bin/bash
+            set -x
+            # DO NOT use --detailed-exitcodes
+            puppet apply --logdest console \
+              --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+              -e '
+                class {"vswitch::dpdk":
+                  host_core_list  => "$HOST_CORES",
+                  pmd_core_list   => "$PMD_CORES",
+                  memory_channels => "$MEMORY_CHANNELS",
+                  socket_mem      => "$SOCKET_MEMORY",
+                }
+              '
+          params:
+            $HOST_CORES: {get_attr: [RoleParametersValue, value, OvsDpdkCoreList]}
+            $PMD_CORES: {get_attr: [RoleParametersValue, value, OvsPmdCoreList]}
+            $MEMORY_CHANNELS: {get_attr: [RoleParametersValue, value, OvsDpdkMemoryChannels]}
+            $SOCKET_MEMORY: {get_attr: [RoleParametersValue, value, OvsDpdkSocketMemory]}
+
+  EnableDpdkDeployment:
+    type: OS::Heat::SoftwareDeployment
+    condition: is_dpdk_config_required
+    properties:
+      name: EnableDpdkDeployment
+      server:  {get_param: server}
+      config: {get_resource: EnableDpdkConfig}
+      actions: ['CREATE'] # Only do this on CREATE
 
   RebootConfig:
     type: OS::Heat::SoftwareConfig
-    condition: is_host_config_required
+    condition: is_reboot_config_required
     properties:
       group: script
       config: |
@@ -73,16 +219,12 @@ resources:
   RebootDeployment:
     type: OS::Heat::SoftwareDeployment
     depends_on: HostParametersDeployment
-    condition: is_host_config_required
+    condition: is_reboot_config_required
     properties:
       name: RebootDeployment
       server:  {get_param: server}
       config: {get_resource: RebootConfig}
-      actions:
-        if:
-          - deployment_actions_empty
-          - []
-          - ['CREATE'] # Only do this on CREATE
+      actions: ['CREATE'] # Only do this on CREATE
       signal_transport: NO_SIGNAL
 
 outputs:
index 02fdbf1..e4ba0cc 100644 (file)
@@ -7,13 +7,6 @@ parameters:
   server:
     description: ID of the node to apply this config to
     type: string
-  deployment_actions:
-    default: ['CREATE', 'UPDATE']
-    type: comma_delimited_list
-    description: >
-      List of stack actions that will trigger any deployments in this
-      templates. The actions will be an empty list of the server is in the
-      toplevel DeploymentServerBlacklist parameter's value.
 
 resources:
   SshHostPubKeyConfig:
@@ -35,7 +28,6 @@ resources:
     properties:
       config: {get_resource: SshHostPubKeyConfig}
       server: {get_param: server}
-      actions: {get_param: deployment_actions}
 
 
 outputs:
index f92f9a1..95b4745 100644 (file)
@@ -4,19 +4,14 @@ description: >
 parameters:
   BondInterfaceOvsOptions:
     default: ''
-    description: 'The ovs_options string for the bond interface. Set things like
-
-      lacp=active and/or bond_mode=balance-slb using this option.
-
-      '
+    description: The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
-      description: 'The balance-tcp bond mode is known to cause packet loss and
-
+      description: The balance-tcp bond mode is known to cause packet loss and
         should not be used in BondInterfaceOvsOptions.
-
-        '
   ControlPlaneIp:
     default: ''
     description: IP address/subnet on the ctlplane network
index 97177c4..9683456 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 5456c2c..3ad6d65 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 607d346..095c497 100644 (file)
@@ -32,8 +32,9 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
   ExternalNetworkVlanID:
     default: 10
index 1d62a5d..882402a 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 8ac5cda..4901f94 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 2579648..33c6fa6 100644 (file)
@@ -34,16 +34,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: bond_mode=active-backup
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index e4b3012..100821b 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: bond_mode=active-backup
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 6371ceb..0ede081 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
-      description: 'The balance-tcp bond mode is known to cause packet loss and
-
+      description: The balance-tcp bond mode is known to cause packet loss and
         should not be used in BondInterfaceOvsOptions.
-
-        '
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 386520c..bb54ca6 100644 (file)
@@ -34,7 +34,7 @@ parameters:
 
 resources:
   VipPort:
-    type: OS::Neutron::Port
+    type: OS::TripleO::Network::Ports::ControlPlaneVipPort
     properties:
       network: {get_param: ControlPlaneNetwork}
       name: {get_param: PortName}
index c3734af..a9111ed 100644 (file)
@@ -133,6 +133,20 @@ outputs:
                           SERVICE: {get_attr: [EnabledServicesValue, value]}
                   - values: {get_param: ServiceNetMap}
               - values: {get_attr: [NetIpMapValue, value]}
+  ctlplane_service_ips:
+    description: >
+      Map of enabled services to a list of their ctlplane IP addresses
+    value:
+      yaql:
+        expression: dict($.data.map.items().where(len($[1]) > 0))
+        data:
+          map:
+            map_merge:
+              repeat:
+                template:
+                  SERVICE_ctlplane_node_ips: {get_param: ControlPlaneIpList}
+                for_each:
+                  SERVICE: {get_attr: [EnabledServicesValue, value]}
   service_hostnames:
     description: >
       Map of enabled services to a list of hostnames where they're running
index 8fe2d27..864da24 100755 (executable)
@@ -110,7 +110,7 @@ EOF_CAT
 }
 
 if [ -n '$network_config' ]; then
-    if [ -z "${disable_configure_safe_defaults:-''}" ]; then
+    if [ -z "${disable_configure_safe_defaults:-}" ]; then
         trap configure_safe_defaults EXIT
     fi
 
index d3d8cbd..ba8e556 100644 (file)
@@ -42,7 +42,7 @@ parameters:
       CinderApiNetwork: internal_api
       CinderIscsiNetwork: storage
       CongressApiNetwork: internal_api
-      GlanceApiNetwork: storage
+      GlanceApiNetwork: internal_api
       IronicApiNetwork: ctlplane
       IronicNetwork: ctlplane
       IronicInspectorNetwork: ctlplane
index 4aee571..a579ee9 100644 (file)
@@ -4,10 +4,12 @@ resource_registry:
   OS::TripleO::PostDeploySteps: puppet/post.yaml
   OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
   OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
+  OS::TripleO::AllNodesDeployment: OS::Heat::StructuredDeployments
   OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
   OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
   OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
   OS::TripleO::DefaultPasswords: default_passwords.yaml
+  OS::TripleO::RandomString: OS::Heat::RandomString
 
   # Tasks (for internal TripleO usage)
   OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
@@ -136,6 +138,7 @@ resource_registry:
   OS::TripleO::Services::NeutronL2gwApi: OS::Heat::None
   OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
   OS::TripleO::Services::NeutronL2gwAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronLbaasv2Agent: OS::Heat::None
   OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
   # FIXME(shardy) the duplicate NeutronServer line can be removed when we've updated
   # the multinode job ControllerServices after this patch merges
@@ -266,6 +269,7 @@ resource_registry:
   OS::TripleO::Services::Docker: OS::Heat::None
   OS::TripleO::Services::CertmongerUser: OS::Heat::None
   OS::TripleO::Services::Iscsid: OS::Heat::None
+  OS::TripleO::Services::Clustercheck: OS::Heat::None
 
 parameter_defaults:
   EnablePackageInstall: false
index e4c04b4..56a10a5 100644 (file)
@@ -242,15 +242,15 @@ resources:
                 HOST: {get_param: CloudNameStorageManagement}
 
   HeatAuthEncryptionKey:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
 
   PcsdPassword:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
     properties:
       length: 16
 
   HorizonSecret:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
     properties:
       length: 10
 
@@ -334,7 +334,7 @@ resources:
       servers: {get_attr: [{{role.name}}Servers, value]}
 
   {{role.name}}AllNodesDeployment:
-    type: OS::Heat::StructuredDeployments
+    type: OS::TripleO::AllNodesDeployment
     depends_on:
 {% for role_inner in roles %}
       - {{role_inner.name}}HostsDeployment
@@ -462,6 +462,21 @@ resources:
             servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
 {% endfor %}
 
+  # This is a different format to *Servers, as it creates a map of lists
+  # whereas *Servers creates a map of maps with keys of the nested resource names
+  ServerIdMap:
+    type: OS::Heat::Value
+    properties:
+      value:
+        server_ids:
+{% for role in roles %}
+          {{role.name}}: {get_attr: [{{role.name}}, nova_server_resource]}
+{% endfor %}
+        bootstrap_server_id:
+          yaql:
+            expression: coalesce($.data, []).first(null)
+            data: {get_attr: [{{primary_role_name}}, nova_server_resource]}
+
   # This resource just creates a dict out of the DeploymentServerBlacklist,
   # which is a list. The dict is used in the role templates to set a condition
   # on whether to create the deployment resources. We can't use the list
@@ -575,12 +590,12 @@ resources:
       UpdateIdentifier: {get_param: UpdateIdentifier}
 
   MysqlRootPassword:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
     properties:
       length: 10
 
   RabbitCookie:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
     properties:
       length: 20
       salt: {get_param: RabbitCookieSalt}
@@ -735,12 +750,34 @@ resources:
 {% for role in roles %}
         {{role.name}}: {get_attr: [{{role.name}}Servers, value]}
 {% endfor %}
+      stack_name: {get_param: 'OS::stack_name'}
       EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+      ctlplane_service_ips:
+        # Note (shardy) this somewhat complex yaql may be replaced
+        # with a map_deep_merge function in ocata.  It merges the
+        # list of maps, but appends to colliding lists when a service
+        # is deployed on more than one role
+        yaql:
+          expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+          data:
+            l:
+{% for role in roles %}
+              - {get_attr: [{{role.name}}IpListMap, ctlplane_service_ips]}
+{% endfor %}
       role_data:
 {% for role in roles %}
         {{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
 {% endfor %}
 
+  ServerOsCollectConfigData:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+{% for role in roles %}
+        {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+
 outputs:
   ManagedEndpoints:
     description: Asserts that the keystone endpoints have been provisioned.
@@ -791,3 +828,18 @@ outputs:
 {% for role in roles %}
       {{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
 {% endfor %}
+  ServerOsCollectConfigData:
+    description: The os-collect-config configuration associated with each server resource
+    value:
+{% for role in roles %}
+      {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+  VipMap:
+    description: Mapping of each network to VIP addresses. Also includes the Redis VIP.
+    value:
+      map_merge:
+        - {get_attr: [VipMap, net_ip_map]}
+        - redis: {get_attr: [RedisVirtualIP, ip_address]}
+  ServerIdData:
+    description: Mapping of each role to a list of nova server IDs and the bootstrap ID
+    value: {get_attr: [ServerIdMap, value]}
index 95dcf0b..f28f606 100644 (file)
@@ -143,6 +143,25 @@ parameters:
     type: json
     description: Role Specific Parameters
     default: {}
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -150,6 +169,12 @@ conditions:
       equals:
         - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
         - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
   BlockStorage:
@@ -178,6 +203,12 @@ resources:
           - {get_param: BlockStorageServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: BlockStorageSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -377,7 +408,7 @@ resources:
     properties:
       server: {get_resource: BlockStorage}
       RoleParameters: {get_param: RoleParameters}
-      deployment_actions: {get_attr: [DeploymentActions, value]}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -486,9 +517,6 @@ resources:
   NodeExtraConfig:
     depends_on: NodeTLSCAData
     type: OS::TripleO::NodeExtraConfig
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
         server: {get_resource: BlockStorage}
 
@@ -511,21 +539,11 @@ resources:
           - ['CREATE', 'UPDATE']
           - []
 
-  DeploymentActions:
-    type: OS::Heat::Value
-    properties:
-      value:
-        if:
-          - server_not_blacklisted
-          - ['CREATE', 'UPDATE']
-          - []
-
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
     depends_on: BlockStorageDeployment
     properties:
         server: {get_resource: BlockStorage}
-        deployment_actions: {get_attr: [DeploymentActions, value]}
 
 outputs:
   ip_address:
@@ -628,3 +646,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   management_ip_address:
     description: IP address of the server in the management network
     value: {get_attr: [ManagementPort, ip_address]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [BlockStorage, os_collect_config]}
index 6674a8a..85b276d 100644 (file)
@@ -149,6 +149,25 @@ parameters:
     type: json
     description: Role Specific Parameters
     default: {}
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -156,6 +175,12 @@ conditions:
       equals:
         - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
         - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
   CephStorage:
@@ -184,6 +209,12 @@ resources:
           - {get_param: CephStorageServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: CephStorageSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -383,7 +414,7 @@ resources:
     properties:
       server: {get_resource: CephStorage}
       RoleParameters: {get_param: RoleParameters}
-      deployment_actions: {get_attr: [DeploymentActions, value]}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -490,9 +521,6 @@ resources:
   CephStorageExtraConfigPre:
     depends_on: CephStorageDeployment
     type: OS::TripleO::CephStorageExtraConfigPre
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
         server: {get_resource: CephStorage}
 
@@ -501,9 +529,6 @@ resources:
   NodeExtraConfig:
     depends_on: [CephStorageExtraConfigPre, NodeTLSCAData]
     type: OS::TripleO::NodeExtraConfig
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
         server: {get_resource: CephStorage}
 
@@ -525,21 +550,11 @@ resources:
           - ['CREATE', 'UPDATE']
           - []
 
-  DeploymentActions:
-    type: OS::Heat::Value
-    properties:
-      value:
-        if:
-          - server_not_blacklisted
-          - ['CREATE', 'UPDATE']
-          - []
-
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
     depends_on: CephStorageDeployment
     properties:
         server: {get_resource: CephStorage}
-        deployment_actions: {get_attr: [DeploymentActions, value]}
 
 outputs:
   ip_address:
@@ -642,3 +657,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   management_ip_address:
     description: IP address of the server in the management network
     value: {get_attr: [ManagementPort, ip_address]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [CephStorage, os_collect_config]}
index 37eb98d..10d082c 100644 (file)
@@ -37,7 +37,7 @@ parameters:
     type: string
   NeutronPublicInterface:
     default: nic1
-    description: A port to add to the NeutronPhysicalBridge.
+    description: Which interface to add to the NeutronPhysicalBridge.
     type: string
   NodeIndex:
     type: number
@@ -161,8 +161,33 @@ parameters:
     type: json
     description: Role Specific Parameters
     default: {}
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
 
 conditions:
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
   server_not_blacklisted:
     not:
       equals:
@@ -198,6 +223,12 @@ resources:
           - {get_param: NovaComputeServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: NovaComputeSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -386,7 +417,7 @@ resources:
     properties:
       server: {get_resource: NovaCompute}
       RoleParameters: {get_param: RoleParameters}
-      deployment_actions: {get_attr: [DeploymentActions, value]}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkConfig:
     type: OS::TripleO::Compute::Net::SoftwareConfig
@@ -513,9 +544,6 @@ resources:
   ComputeExtraConfigPre:
     depends_on: NovaComputeDeployment
     type: OS::TripleO::ComputeExtraConfigPre
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
         server: {get_resource: NovaCompute}
 
@@ -524,9 +552,6 @@ resources:
   NodeExtraConfig:
     depends_on: [ComputeExtraConfigPre, NodeTLSCAData]
     type: OS::TripleO::NodeExtraConfig
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
         server: {get_resource: NovaCompute}
 
@@ -549,21 +574,11 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
-  DeploymentActions:
-    type: OS::Heat::Value
-    properties:
-      value:
-        if:
-          - server_not_blacklisted
-          - ['CREATE', 'UPDATE']
-          - []
-
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
     depends_on: NovaComputeDeployment
     properties:
         server: {get_resource: NovaCompute}
-        deployment_actions: {get_attr: [DeploymentActions, value]}
 
 outputs:
   ip_address:
@@ -668,3 +683,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
     value:
       {get_resource: NovaCompute}
     condition: server_not_blacklisted
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [NovaCompute, os_collect_config]}
index 01f54df..ca08c65 100644 (file)
@@ -58,9 +58,13 @@ parameters:
     type: string
     constraints:
       - custom_constraint: nova.keypair
+  NeutronPhysicalBridge:
+    default: 'br-ex'
+    description: An OVS bridge to create for accessing external networks.
+    type: string
   NeutronPublicInterface:
     default: nic1
-    description: What interface to bridge onto br-ex for network nodes.
+    description: Which interface to add to the NeutronPhysicalBridge.
     type: string
   ServiceNetMap:
     default: {}
@@ -175,6 +179,25 @@ parameters:
     type: json
     description: Role Specific Parameters
     default: {}
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
 
 parameter_groups:
 - label: deprecated
@@ -188,7 +211,12 @@ conditions:
       equals:
         - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
         - 1
-
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
 
@@ -218,6 +246,12 @@ resources:
           - {get_param: ControllerServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: ControllerSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -406,7 +440,7 @@ resources:
     properties:
       server: {get_resource: Controller}
       RoleParameters: {get_param: RoleParameters}
-      deployment_actions: {get_attr: [DeploymentActions, value]}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkConfig:
     type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -432,7 +466,7 @@ resources:
           - {get_param: NetworkDeploymentActions}
           - []
       input_values:
-        bridge_name: br-ex
+        bridge_name: {get_param: NeutronPhysicalBridge}
         interface_name: {get_param: NeutronPublicInterface}
 
   # Resource for site-specific injection of root certificate
@@ -553,9 +587,6 @@ resources:
   ControllerExtraConfigPre:
     depends_on: ControllerDeployment
     type: OS::TripleO::ControllerExtraConfigPre
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
         server: {get_resource: Controller}
 
@@ -564,9 +595,6 @@ resources:
   NodeExtraConfig:
     depends_on: [ControllerExtraConfigPre, NodeTLSData]
     type: OS::TripleO::NodeExtraConfig
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
         server: {get_resource: Controller}
 
@@ -589,21 +617,11 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
-  DeploymentActions:
-    type: OS::Heat::Value
-    properties:
-      value:
-        if:
-          - server_not_blacklisted
-          - ['CREATE', 'UPDATE']
-          - []
-
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
     depends_on: ControllerDeployment
     properties:
         server: {get_resource: Controller}
-        deployment_actions: {get_attr: [DeploymentActions, value]}
 
 outputs:
   ip_address:
@@ -714,3 +732,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   tls_cert_modulus_md5:
     description: MD5 checksum of the TLS Certificate Modulus
     value: {get_attr: [NodeTLSData, cert_modulus_md5]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [Controller, os_collect_config]}
index 4e1ad89..e4d20b4 100644 (file)
@@ -10,16 +10,20 @@ if [ -n "$artifact_urls" ]; then
   for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
     curl --globoff -o $TMP_DATA/file_data "$URL"
     if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
-      yum install -y $TMP_DATA/file_data
+      mv $TMP_DATA/file_data $TMP_DATA/file_data.rpm
+      yum install -y $TMP_DATA/file_data.rpm
+      rm $TMP_DATA/file_data.rpm
     elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
       pushd /
       tar xvzf $TMP_DATA/file_data
       popd
     else
-      echo "ERROR: Unsupported file format."
+      echo "ERROR: Unsupported file format: $URL"
       exit 1
     fi
-    rm $TMP_DATA/file_data
+    if [ -f $TMP_DATA/file_data ]; then
+      rm $TMP_DATA/file_data
+    fi
   done
 else
   echo "No artifact_urls was set. Skipping..."
index b44095b..574c41b 100644 (file)
@@ -8,11 +8,14 @@ description: 'Upgrade steps for all roles'
 parameters:
   servers:
     type: json
-
+  stack_name:
+    type: string
+    description: Name of the topmost stack
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
-
+  ctlplane_service_ips:
+    type: json
   UpdateIdentifier:
     type: string
     description: >
@@ -206,7 +209,9 @@ resources:
 {%- endfor %}
     properties:
       servers: {get_param: servers}
+      stack_name: {get_param: stack_name}
       role_data: {get_param: role_data}
+      ctlplane_service_ips: {get_param: ctlplane_service_ips}
 
 outputs:
   # Output the config for each role, just use Step1 as the config should be
index e634842..4a1670f 100644 (file)
@@ -143,6 +143,25 @@ parameters:
     type: json
     description: Role Specific Parameters
     default: {}
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -150,6 +169,12 @@ conditions:
       equals:
         - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
         - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
 
@@ -178,6 +203,12 @@ resources:
           - {get_param: SwiftStorageServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: ObjectStorageSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -377,7 +408,7 @@ resources:
     properties:
       server: {get_resource: SwiftStorage}
       RoleParameters: {get_param: RoleParameters}
-      deployment_actions: {get_attr: [DeploymentActions, value]}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -486,9 +517,6 @@ resources:
   NodeExtraConfig:
     depends_on: NodeTLSCAData
     type: OS::TripleO::NodeExtraConfig
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
         server: {get_resource: SwiftStorage}
 
@@ -510,21 +538,11 @@ resources:
           - ['CREATE', 'UPDATE']
           - []
 
-  DeploymentActions:
-    type: OS::Heat::Value
-    properties:
-      value:
-        if:
-          - server_not_blacklisted
-          - ['CREATE', 'UPDATE']
-          - []
-
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
     depends_on: SwiftStorageHieraDeploy
     properties:
         server: {get_resource: SwiftStorage}
-        deployment_actions: {get_attr: [DeploymentActions, value]}
 
 outputs:
   ip_address:
@@ -627,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   management_ip_address:
     description: IP address of the server in the management network
     value: {get_attr: [ManagementPort, ip_address]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [SwiftStorage, os_collect_config]}
index c51b6e1..bdd1e61 100644 (file)
@@ -8,17 +8,20 @@ parameters:
   servers:
     type: json
     description: Mapping of Role name e.g Controller to a list of servers
-
+  stack_name:
+    type: string
+    description: Name of the topmost stack
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
-
   DeployIdentifier:
     default: ''
     type: string
     description: >
       Setting this to a unique value will re-run any deployment tasks which
       perform configuration on a Heat stack-update.
+  ctlplane_service_ips:
+    type: json
 
 resources:
 # Note the include here is the same as post.j2.yaml but the data used at
index 3a15cec..67e1ecf 100644 (file)
@@ -8,7 +8,9 @@ parameters:
   servers:
     type: json
     description: Mapping of Role name e.g Controller to a list of servers
-
+  stack_name:
+    type: string
+    description: Name of the topmost stack
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
@@ -23,6 +25,7 @@ parameters:
     description: >
       Setting this to a unique value will re-run any deployment tasks which
       perform configuration on a Heat stack-update.
+  ctlplane_service_ips:
+    type: json
 
-resources:
 {% include 'puppet-steps.j2' %}
index 5567d65..82c6171 100644 (file)
@@ -1,3 +1,19 @@
+{% set deploy_steps_max = 6 %}
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+  WorkflowTasks_Step{{step}}_Enabled:
+    or:
+    {% for role in roles %}
+      - not:
+          equals:
+            - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+            - ''
+      - False
+    {% endfor %}
+{% endfor %}
+
+resources:
   # Post deployment steps for all roles
   # A single config is re-applied with an incrementing step number
 {% for role in roles %}
       StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
 
   # Step through a series of configuration steps
-{% for step in range(1, 6) %}
+{% for step in range(1, deploy_steps_max) %}
   {{role.name}}Deployment_Step{{step}}:
     type: OS::Heat::StructuredDeploymentGroup
-  {% if step == 1 %}
-    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
-  {% else %}
     depends_on:
+      - WorkflowTasks_Step{{step}}_Execution
+    # TODO(gfidente): the following if/else condition
+    # replicates what is already defined for the
+    # WorkflowTasks_StepX resource and can be remove
+    # if https://bugs.launchpad.net/heat/+bug/1700569
+    # is fixed.
+    {% if step == 1 %}
+    {% for dep in roles %}
+      - {{dep.name}}PreConfig
+      - {{dep.name}}ArtifactsDeploy
+    {% endfor %}
+    {% else %}
     {% for dep in roles %}
       - {{dep.name}}Deployment_Step{{step -1}}
     {% endfor %}
-  {% endif %}
+    {% endif %}
     properties:
       name: {{role.name}}Deployment_Step{{step}}
       servers: {get_param: [servers, {{role.name}}]}
 
 
 {% endfor %}
+
+# BEGIN service_workflow_tasks handling
+{% for step in range(1, deploy_steps_max) %}
+  WorkflowTasks_Step{{step}}:
+    type: OS::Mistral::Workflow
+    condition: WorkflowTasks_Step{{step}}_Enabled
+    depends_on:
+    {% if step == 1 %}
+    {% for dep in roles %}
+      - {{dep.name}}PreConfig
+      - {{dep.name}}ArtifactsDeploy
+    {% endfor %}
+    {% else %}
+    {% for dep in roles %}
+      - {{dep.name}}Deployment_Step{{step -1}}
+    {% endfor %}
+    {% endif %}
+    properties:
+      name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+      type: direct
+      tasks:
+        yaql:
+          expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+          data:
+          {% for role in roles %}
+            - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+          {% endfor %}
+
+  WorkflowTasks_Step{{step}}_Execution:
+    type: OS::Mistral::ExternalResource
+    condition: WorkflowTasks_Step{{step}}_Enabled
+    depends_on: WorkflowTasks_Step{{step}}
+    properties:
+      actions:
+        CREATE:
+          workflow: { get_resource: WorkflowTasks_Step{{step}} }
+          params:
+            env:
+              service_ips: { get_param: ctlplane_service_ips }
+        UPDATE:
+          workflow: { get_resource: WorkflowTasks_Step{{step}} }
+          params:
+            env:
+              service_ips: { get_param: ctlplane_service_ips }
+      always_update: true
+{% endfor %}
+# END service_workflow_tasks handling
index 4911fbe..e1a23ab 100644 (file)
@@ -1,18 +1,18 @@
 heat_template_version: pike
-description: 'OpenStack {{role}} node configured by Puppet'
+description: 'OpenStack {{role.name}} node configured by Puppet'
 parameters:
-  Overcloud{{role}}Flavor:
-    description: Flavor for the {{role}} node.
+  Overcloud{{role.name}}Flavor:
+    description: Flavor for the {{role.name}} node.
     default: baremetal
     type: string
-{% if disable_constraints is not defined %}
+{% if role.disable_constraints is not defined %}
     constraints:
       - custom_constraint: nova.flavor
 {% endif %}
-  {{role}}Image:
+  {{role.name}}Image:
     type: string
     default: overcloud-full
-{% if disable_constraints is not defined %}
+{% if role.disable_constraints is not defined %}
     constraints:
       - custom_constraint: glance.image
 {% endif %}
@@ -24,13 +24,17 @@ parameters:
     description: Name of an existing Nova key pair to enable SSH access to the instances
     type: string
     default: default
-{% if disable_constraints is not defined %}
+{% if role.disable_constraints is not defined %}
     constraints:
       - custom_constraint: nova.keypair
 {% endif %}
+  NeutronPhysicalBridge:
+    default: 'br-ex'
+    description: An OVS bridge to create for accessing tenant networks.
+    type: string
   NeutronPublicInterface:
     default: nic1
-    description: What interface to bridge onto br-ex for network nodes.
+    description: Which interface to add to the NeutronPhysicalBridge.
     type: string
   ServiceNetMap:
     default: {}
@@ -59,14 +63,14 @@ parameters:
     default: {}
     description: |
       Additional hiera configuration to inject into the cluster. Note
-      that {{role}}ExtraConfig takes precedence over ExtraConfig.
+      that {{role.name}}ExtraConfig takes precedence over ExtraConfig.
     type: json
-  {{role}}ExtraConfig:
+  {{role.name}}ExtraConfig:
     default: {}
     description: |
       Role specific additional hiera configuration to inject into the cluster.
     type: json
-  {{role}}IPs:
+  {{role.name}}IPs:
     default: {}
     type: json
   NetworkDeploymentActions:
@@ -87,7 +91,7 @@ parameters:
     description: >
       The DNS domain used for the hosts. This must match the
       overcloud_domain_name configured on the undercloud.
-  {{role}}ServerMetadata:
+  {{role.name}}ServerMetadata:
     default: {}
     description: >
       Extra properties or metadata passed to Nova for the created nodes in
@@ -102,7 +106,7 @@ parameters:
       the overcloud. It's accessible via the Nova metadata API. This applies to
       all roles and is merged with a role-specific metadata parameter.
     type: json
-  {{role}}SchedulerHints:
+  {{role.name}}SchedulerHints:
     type: json
     description: Optional scheduler hints to pass to nova
     default: {}
@@ -165,6 +169,25 @@ parameters:
     type: json
     description: Role Specific Parameters
     default: {}
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -172,18 +195,24 @@ conditions:
       equals:
         - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
         - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
-  {{role}}:
+  {{role.name}}:
     type: OS::TripleO::{{role.name}}Server
     metadata:
       os-collect-config:
         command: {get_param: ConfigCommand}
         splay: {get_param: ConfigCollectSplay}
     properties:
-      image: {get_param: {{role}}Image}
+      image: {get_param: {{role.name}}Image}
       image_update_policy: {get_param: ImageUpdatePolicy}
-      flavor: {get_param: Overcloud{{role}}Flavor}
+      flavor: {get_param: Overcloud{{role.name}}Flavor}
       key_name: {get_param: KeyName}
       networks:
         - network: ctlplane
@@ -197,9 +226,15 @@ resources:
       metadata:
         map_merge:
           - {get_param: ServerMetadata}
-          - {get_param: {{role}}ServerMetadata}
+          - {get_param: {{role.name}}ServerMetadata}
           - {get_param: ServiceMetadataSettings}
-      scheduler_hints: {get_param: {{role}}SchedulerHints}
+      scheduler_hints: {get_param: {{role.name}}SchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -226,54 +261,54 @@ resources:
   # For optional operator role-specific userdata
   # Should return a OS::Heat::MultipartMime reference via OS::stack_id
   RoleUserData:
-    type: OS::TripleO::{{role}}::NodeUserData
+    type: OS::TripleO::{{role.name}}::NodeUserData
 
   ExternalPort:
-    type: OS::TripleO::{{role}}::Ports::ExternalPort
+    type: OS::TripleO::{{role.name}}::Ports::ExternalPort
     properties:
-      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
-      IPPool: {get_param: {{role}}IPs}
+      ControlPlaneIP: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role.name}}IPs}
       NodeIndex: {get_param: NodeIndex}
 
   InternalApiPort:
-    type: OS::TripleO::{{role}}::Ports::InternalApiPort
+    type: OS::TripleO::{{role.name}}::Ports::InternalApiPort
     properties:
-      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
-      IPPool: {get_param: {{role}}IPs}
+      ControlPlaneIP: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role.name}}IPs}
       NodeIndex: {get_param: NodeIndex}
 
   StoragePort:
-    type: OS::TripleO::{{role}}::Ports::StoragePort
+    type: OS::TripleO::{{role.name}}::Ports::StoragePort
     properties:
-      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
-      IPPool: {get_param: {{role}}IPs}
+      ControlPlaneIP: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role.name}}IPs}
       NodeIndex: {get_param: NodeIndex}
 
   StorageMgmtPort:
-    type: OS::TripleO::{{role}}::Ports::StorageMgmtPort
+    type: OS::TripleO::{{role.name}}::Ports::StorageMgmtPort
     properties:
-      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
-      IPPool: {get_param: {{role}}IPs}
+      ControlPlaneIP: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role.name}}IPs}
       NodeIndex: {get_param: NodeIndex}
 
   TenantPort:
-    type: OS::TripleO::{{role}}::Ports::TenantPort
+    type: OS::TripleO::{{role.name}}::Ports::TenantPort
     properties:
-      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
-      IPPool: {get_param: {{role}}IPs}
+      ControlPlaneIP: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role.name}}IPs}
       NodeIndex: {get_param: NodeIndex}
 
   ManagementPort:
-    type: OS::TripleO::{{role}}::Ports::ManagementPort
+    type: OS::TripleO::{{role.name}}::Ports::ManagementPort
     properties:
-      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
-      IPPool: {get_param: {{role}}IPs}
+      ControlPlaneIP: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role.name}}IPs}
       NodeIndex: {get_param: NodeIndex}
 
   NetworkConfig:
-    type: OS::TripleO::{{role}}::Net::SoftwareConfig
+    type: OS::TripleO::{{role.name}}::Net::SoftwareConfig
     properties:
-      ControlPlaneIp: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      ControlPlaneIp: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
       ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
       InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
       StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
@@ -284,7 +319,7 @@ resources:
   NetIpMap:
     type: OS::TripleO::Network::Ports::NetIpMap
     properties:
-      ControlPlaneIp: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      ControlPlaneIp: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
       ExternalIp: {get_attr: [ExternalPort, ip_address]}
       ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
       ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
@@ -313,93 +348,93 @@ resources:
           fqdn:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - external
               - {get_param: CloudDomain}
           short:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - external
         internal_api:
           fqdn:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - internalapi
               - {get_param: CloudDomain}
           short:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - internalapi
         storage:
           fqdn:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - storage
               - {get_param: CloudDomain}
           short:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - storage
         storage_mgmt:
           fqdn:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - storagemgmt
               - {get_param: CloudDomain}
           short:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - storagemgmt
         tenant:
           fqdn:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - tenant
               - {get_param: CloudDomain}
           short:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - tenant
         management:
           fqdn:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - management
               - {get_param: CloudDomain}
           short:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - management
         ctlplane:
           fqdn:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - ctlplane
               - {get_param: CloudDomain}
           short:
             list_join:
             - '.'
-            - - {get_attr: [{{role}}, name]}
+            - - {get_attr: [{{role.name}}, name]}
               - ctlplane
 
   PreNetworkConfig:
-    type: OS::TripleO::{{role}}::PreNetworkConfig
+    type: OS::TripleO::{{role.name}}::PreNetworkConfig
     properties:
-      server: {get_resource: {{role}}}
+      server: {get_resource: {{role.name}}}
       RoleParameters: {get_param: RoleParameters}
-      deployment_actions: {get_attr: [DeploymentActions, value]}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -407,10 +442,10 @@ resources:
     properties:
       name: NetworkDeployment
       config: {get_resource: NetworkConfig}
-      server: {get_resource: {{role}}}
+      server: {get_resource: {{role.name}}}
       actions: {get_param: NetworkDeploymentActions}
       input_values:
-        bridge_name: br-ex
+        bridge_name: {get_param: NeutronPhysicalBridge}
         interface_name: {get_param: NeutronPublicInterface}
       actions:
         if:
@@ -418,7 +453,7 @@ resources:
           - {get_param: NetworkDeploymentActions}
           - []
 
-  {{role}}UpgradeInitConfig:
+  {{role.name}}UpgradeInitConfig:
     type: OS::Heat::SoftwareConfig
     properties:
       group: script
@@ -432,26 +467,26 @@ resources:
 
   # Note we may be able to make this conditional on UpgradeInitCommandNotEmpty
   # but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
-  {{role}}UpgradeInitDeployment:
+  {{role.name}}UpgradeInitDeployment:
     type: OS::Heat::SoftwareDeployment
     depends_on: NetworkDeployment
     properties:
-      name: {{role}}UpgradeInitDeployment
-      server: {get_resource: {{role}}}
-      config: {get_resource: {{role}}UpgradeInitConfig}
+      name: {{role.name}}UpgradeInitDeployment
+      server: {get_resource: {{role.name}}}
+      config: {get_resource: {{role.name}}UpgradeInitConfig}
       actions:
         if:
           - server_not_blacklisted
           - ['CREATE', 'UPDATE']
           - []
 
-  {{role}}Deployment:
+  {{role.name}}Deployment:
     type: OS::Heat::StructuredDeployment
-    depends_on: {{role}}UpgradeInitDeployment
+    depends_on: {{role.name}}UpgradeInitDeployment
     properties:
-      name: {{role}}Deployment
-      config: {get_resource: {{role}}Config}
-      server: {get_resource: {{role}}}
+      name: {{role.name}}Deployment
+      config: {get_resource: {{role.name}}Config}
+      server: {get_resource: {{role.name}}}
       input_values:
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
       actions:
@@ -460,7 +495,7 @@ resources:
           - ['CREATE', 'UPDATE']
           - []
 
-  {{role}}Config:
+  {{role.name}}Config:
     type: OS::Heat::StructuredConfig
     properties:
       group: hiera
@@ -469,11 +504,11 @@ resources:
           - '"%{::uuid}"'
           - heat_config_%{::deploy_config_name}
           - config_step
-          - {{role.lower()}}_extraconfig
+          - {{role.name.lower()}}_extraconfig
           - extraconfig
           - service_names
           - service_configs
-          - {{role.lower()}}
+          - {{role.name.lower()}}
           - bootstrap_node # provided by allNodesConfig
           - all_nodes # provided by allNodesConfig
           - vip_data # provided by allNodesConfig
@@ -487,9 +522,9 @@ resources:
             map_replace:
               - {get_param: ServiceConfigSettings}
               - values: {get_attr: [NetIpMap, net_ip_map]}
-          {{role.lower()}}_extraconfig: {get_param: {{role}}ExtraConfig}
+          {{role.name.lower()}}_extraconfig: {get_param: {{role.name}}ExtraConfig}
           extraconfig: {get_param: ExtraConfig}
-          {{role.lower()}}:
+          {{role.name.lower()}}:
             tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
             tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: LoggingSources}
             tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: LoggingGroups}
@@ -503,31 +538,25 @@ resources:
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
-    depends_on: {{role}}Deployment
+    depends_on: {{role.name}}Deployment
     type: OS::TripleO::NodeTLSCAData
     properties:
-      server: {get_resource: {{role}}}
+      server: {get_resource: {{role.name}}}
 
   # Hook for site-specific additional pre-deployment config, e.g extra hieradata
-  {{role}}ExtraConfigPre:
-    depends_on: {{role}}Deployment
-    type: OS::TripleO::{{role}}ExtraConfigPre
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
+  {{role.name}}ExtraConfigPre:
+    depends_on: {{role.name}}Deployment
+    type: OS::TripleO::{{role.name}}ExtraConfigPre
     properties:
-        server: {get_resource: {{role}}}
+        server: {get_resource: {{role.name}}}
 
   # Hook for site-specific additional pre-deployment config,
   # applying to all nodes, e.g node registration/unregistration
   NodeExtraConfig:
-    depends_on: [{{role}}ExtraConfigPre, NodeTLSCAData]
+    depends_on: [{{role.name}}ExtraConfigPre, NodeTLSCAData]
     type: OS::TripleO::NodeExtraConfig
-    # We have to use conditions here so that we don't break backwards
-    # compatibility with templates everywhere
-    condition: server_not_blacklisted
     properties:
-        server: {get_resource: {{role}}}
+        server: {get_resource: {{role.name}}}
 
   UpdateConfig:
     type: OS::TripleO::Tasks::PackageUpdate
@@ -538,7 +567,7 @@ resources:
     properties:
       name: UpdateDeployment
       config: {get_resource: UpdateConfig}
-      server: {get_resource: {{role}}}
+      server: {get_resource: {{role.name}}}
       input_values:
         update_identifier:
           get_param: UpdateIdentifier
@@ -548,29 +577,19 @@ resources:
           - ['CREATE', 'UPDATE']
           - []
 
-  DeploymentActions:
-    type: OS::Heat::Value
-    properties:
-      value:
-        if:
-          - server_not_blacklisted
-          - ['CREATE', 'UPDATE']
-          - []
-
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
-    depends_on: {{role}}Deployment
+    depends_on: {{role.name}}Deployment
     properties:
-        server: {get_resource: {{role}}}
-        deployment_actions: {get_attr: [DeploymentActions, value]}
+        server: {get_resource: {{role.name}}}
 
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
-    value: {get_attr: [{{role}}, networks, ctlplane, 0]}
+    value: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
   hostname:
     description: Hostname of the server
-    value: {get_attr: [{{role}}, name]}
+    value: {get_attr: [{{role.name}}, name]}
   hostname_map:
     description: Mapping of network names to hostnames
     value:
@@ -594,9 +613,9 @@ outputs:
           MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
           CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST
         params:
-          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role.name}}HostnameResolveNetwork]}]}
           DOMAIN: {get_param: CloudDomain}
-          PRIMARYHOST: {get_attr: [{{role}}, name]}
+          PRIMARYHOST: {get_attr: [{{role.name}}, name]}
           EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
           EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
           INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
@@ -609,7 +628,7 @@ outputs:
           TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
           MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
-          CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+          CTLPLANEIP: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
   known_hosts_entry:
     description: Entry for ssh known hosts
@@ -624,9 +643,9 @@ TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
 MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
 CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
         params:
-          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role.name}}HostnameResolveNetwork]}]}
           DOMAIN: {get_param: CloudDomain}
-          PRIMARYHOST: {get_attr: [{{role}}, name]}
+          PRIMARYHOST: {get_attr: [{{role.name}}, name]}
           EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
           EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
           INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
@@ -639,13 +658,13 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
           TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
           MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
-          CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+          CTLPLANEIP: {get_attr: [{{role.name}}, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
           HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
-    description: Heat resource handle for {{role}} server
+    description: Heat resource handle for {{role.name}} server
     value:
-      {get_resource: {{role}}}
+      {get_resource: {{role.name}}}
     condition: server_not_blacklisted
   external_ip_address:
     description: IP address of the server in the external network
@@ -665,3 +684,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   management_ip_address:
     description: IP address of the server in the management network
     value: {get_attr: [ManagementPort, ip_address]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [{{role.name}}, os_collect_config]}
index 7a18ef0..d55414b 100644 (file)
@@ -95,6 +95,30 @@ are re-asserted when applying latter ones.
 
    5) Service activation (Pacemaker)
 
+It is also possible to use Mistral actions or workflows together with
+a deployment step, these are executed before the main configuration run.
+To describe actions or workflows from within a service use:
+
+  * service_workflow_tasks: One or more workflow task properties
+
+which expects a map where the key is the step and the value a list of
+dictionaries descrbing each a workflow task, for example::
+
+  service_workflow_tasks:
+    step2:
+      - name: echo
+        action: std.echo output=Hello
+    step3:
+      - name: external
+        workflow: my-pre-existing-workflow-name
+        input:
+          workflow_param1: value
+          workflow_param2: value
+
+The Heat guide for the `OS::Mistral::Workflow task property
+<https://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Mistral::Workflow-prop-tasks>`_
+has more details about the expected dictionary.
+
 Batch Upgrade Steps
 -------------------
 
index e12c55e..48d9599 100644 (file)
@@ -29,20 +29,9 @@ parameters:
   GlanceRbdPoolName:
     default: images
     type: string
-  GlanceBackend:
-    default: swift
-    description: The short name of the Glance backend to use. Should be one
-      of swift, rbd, or file
-    type: string
-    constraints:
-    - allowed_values: ['swift', 'file', 'rbd']
   GnocchiRbdPoolName:
     default: metrics
     type: string
-  NovaEnableRbdBackend:
-    default: false
-    description: Whether to enable or not the Rbd backend for Nova
-    type: boolean
   NovaRbdPoolName:
     default: vms
     type: string
@@ -82,16 +71,6 @@ parameter_groups:
   parameters:
   - ControllerEnableCephStorage
 
-conditions:
-  glance_multiple_locations:
-    and:
-    - equals:
-      - get_param: GlanceBackend
-      - rbd
-    - equals:
-      - get_param: NovaEnableRbdBackend
-      - true
-
 outputs:
   role_data:
     description: Role data for the Ceph base service.
@@ -153,6 +132,3 @@ outputs:
             - keys:
                 CEPH_CLIENT_KEY:
                   list_join: ['.', ['client', {get_param: CephClientUserName}]]
-      service_config_settings:
-        glance_api:
-          glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
index 599532c..65e6ea8 100644 (file)
@@ -27,20 +27,9 @@ parameters:
   GlanceRbdPoolName:
     default: images
     type: string
-  GlanceBackend:
-    default: swift
-    description: The short name of the Glance backend to use. Should be one
-      of swift, rbd, or file
-    type: string
-    constraints:
-    - allowed_values: ['swift', 'file', 'rbd']
   GnocchiRbdPoolName:
     default: metrics
     type: string
-  NovaEnableRbdBackend:
-    default: false
-    description: Whether to enable or not the Rbd backend for Nova
-    type: boolean
   NovaRbdPoolName:
     default: vms
     type: string
@@ -76,16 +65,6 @@ parameters:
                  clients using older Ceph servers.
     type: string
 
-conditions:
-  glance_multiple_locations:
-    and:
-    - equals:
-      - get_param: GlanceBackend
-      - rbd
-    - equals:
-      - get_param: NovaEnableRbdBackend
-      - true
-
 outputs:
   role_data:
     description: Role data for the Ceph External service.
@@ -122,8 +101,5 @@ outputs:
           - ceph-base
           - ceph-mon
           - ceph-osd
-      service_config_settings:
-        glance_api:
-          glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
       step_config: |
         include ::tripleo::profile::base::ceph::client
index 2bde903..882ba29 100644 (file)
@@ -118,6 +118,16 @@ outputs:
                   template: "%{hiera('cloud_name_NETWORK')}"
                   params:
                     NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+              dnsnames:
+                - str_replace:
+                    template: "%{hiera('cloud_name_NETWORK')}"
+                    params:
+                      NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+                - str_replace:
+                    template:
+                      "%{hiera('fqdn_$NETWORK')}"
+                    params:
+                      $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
               principal:
                 str_replace:
                   template: "mysql/%{hiera('cloud_name_NETWORK')}"
@@ -132,6 +142,9 @@ outputs:
             - service: mysql
               network: {get_param: [ServiceNetMap, MysqlNetwork]}
               type: vip
+            - service: mysql
+              network: {get_param: [ServiceNetMap, MysqlNetwork]}
+              type: node
           - null
       upgrade_tasks:
         - name: Check for galera root password
index 7be394b..7110afa 100644 (file)
@@ -36,3 +36,6 @@ outputs:
         - name: Remove ceilometer expirer cron tab on upgrade
           tags: step1
           shell: '/usr/bin/crontab -u ceilometer -r'
+          register: remove_ceilometer_expirer_crontab
+          failed_when: remove_ceilometer_expirer_crontab.rc != 0 and remove_ceilometer_expirer_crontab.stderr != "no crontab for ceilometer"
+          changed_when: remove_ceilometer_expirer_crontab.stderr != "no crontab for ceilometer"
index 7812c8e..a3d5a79 100644 (file)
@@ -96,6 +96,10 @@ parameters:
   GlanceRbdPoolName:
     default: images
     type: string
+  NovaEnableRbdBackend:
+    default: false
+    description: Whether to enable or not the Rbd backend for Nova
+    type: boolean
   RabbitPassword:
     description: The password for RabbitMQ
     type: string
@@ -129,6 +133,14 @@ conditions:
   use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
   glance_workers_unset: {equals : [{get_param: GlanceWorkers}, '']}
   service_debug_unset: {equals : [{get_param: GlanceDebug}, '']}
+  glance_multiple_locations:
+    and:
+    - equals:
+      - get_param: GlanceBackend
+      - rbd
+    - equals:
+      - get_param: NovaEnableRbdBackend
+      - true
 
 resources:
 
@@ -187,6 +199,7 @@ outputs:
             glance::keystone::authtoken::project_domain_name: 'Default'
             glance::api::pipeline: 'keystone'
             glance::api::show_image_direct_url: true
+            glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
             # (eg. for internal_api):
index 619cf13..5bdc3b8 100644 (file)
@@ -38,6 +38,10 @@ parameters:
     default: /dev/log
     description: Syslog address where HAproxy will send its log
     type: string
+  HAProxyStatsEnabled:
+    default: true
+    description: Whether or not to enable the HAProxy stats interface.
+    type: boolean
   RedisPassword:
     description: The password for Redis
     type: string
@@ -95,6 +99,7 @@ outputs:
             tripleo::haproxy::redis_password: {get_param: RedisPassword}
             tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
             tripleo::haproxy::crl_file: {get_param: InternalTLSCRLPEMFile}
+            tripleo::haproxy::haproxy_stats: {get_param: HAProxyStatsEnabled}
             tripleo::profile::base::haproxy::certificates_specs:
               map_merge:
                 - get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
index 092d072..1f97b8b 100644 (file)
@@ -89,7 +89,6 @@ outputs:
           horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
           horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
           horizon::vhost_extra_params:
-            add_listen: false
             priority: 10
             access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
             options: ['FollowSymLinks','MultiViews']
index 945033a..0e8eacf 100644 (file)
@@ -43,8 +43,21 @@ parameters:
       e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
     default: {}
     type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
 
 resources:
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
+
   IronicBase:
     type: ./ironic-base.yaml
     properties:
@@ -63,6 +76,7 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [IronicBase, role_data, config_settings]
+          - get_attr: [ApacheServiceBase, role_data, config_settings]
           - ironic::api::authtoken::password: {get_param: IronicPassword}
             ironic::api::authtoken::project_name: 'service'
             ironic::api::authtoken::user_domain_name: 'Default'
@@ -80,7 +94,17 @@ outputs:
             ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
             # This is used to build links in responses
             ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+            ironic::api::service_name: 'httpd'
             ironic::policy::policies: {get_param: IronicApiPolicies}
+            ironic::wsgi::apache::bind_host: {get_param: [ServiceNetMap, IronicApiNetwork]}
+            ironic::wsgi::apache::port: {get_param: [EndpointMap, IronicInternal, port]}
+            ironic::wsgi::apache::servername:
+              str_replace:
+                template:
+                  "%{hiera('fqdn_$NETWORK')}"
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, IronicApiNetwork]}
+            ironic::wsgi::apache::ssl: {get_param: EnableInternalTLS}
             tripleo.ironic_api.firewall_rules:
               '133 ironic api':
                 dport:
@@ -106,6 +130,9 @@ outputs:
             - '%'
             - "%{hiera('mysql_bind_host')}"
       upgrade_tasks:
-        - name: Stop ironic_api service
+        - name: Stop ironic_api service (before httpd support)
+          tags: step1
+          service: name=openstack-ironic-api state=stopped enabled=no
+        - name: Stop ironic_api service (running under httpd)
           tags: step1
-          service: name=openstack-ironic-api state=stopped
+          service: name=httpd state=stopped
diff --git a/puppet/services/neutron-lbaas.yaml b/puppet/services/neutron-lbaas.yaml
new file mode 100644 (file)
index 0000000..5529db9
--- /dev/null
@@ -0,0 +1,70 @@
+heat_template_version: pike
+
+description: >
+  Neutron LBaaS service configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  NeutronLbaasInterfaceDriver:
+    default: 'neutron.agent.linux.interface.OVSInterfaceDriver'
+    type: string
+  NeutronLbaasDeviceDriver:
+    default: 'neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver'
+    type: string
+  NeutronServiceProviders:
+    default: 'LOADBALANCERV2:Haproxy:neutron_lbaas.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default'
+    description: Global list of service providers used by neutron. This
+                 list should be passed in to ensure all service
+                 providers desired by the user are included. The
+                 provided default value only set the provider for the LBaaSv2
+                 subsystem.This is currently incompatible with enabling
+                 octavia-api as one service or the other will break because the defaults are different.
+    type: comma_delimited_list
+
+resources:
+
+  NeutronBase:
+    type: ./neutron-base.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Neutron LBaaS role.
+    value:
+      service_name: neutron_lbaas
+      config_settings:
+        map_merge:
+          - get_attr: [NeutronBase, role_data, config_settings]
+          - neutron::agents::lbaas::interface_driver: {get_param: NeutronLbaasInterfaceDriver}
+            neutron::agents::lbaas::device_driver: {get_param: NeutronLbaasDeviceDriver}
+      step_config: |
+        include ::tripleo::profile::base::neutron::lbaas
+      service_config_settings:
+        neutron_api:
+          neutron::server::service_providers: {get_param: NeutronServiceProviders}
index 76d5c26..4493721 100644 (file)
@@ -92,8 +92,12 @@ resources:
       RoleName: {get_param: RoleName}
       RoleParameters: {get_param: RoleParameters}
 
-  OpenVswitchUpgrade:
-    type: ./openvswitch-upgrade.yaml
+  Ovs:
+    type: ./openvswitch.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
 
 outputs:
   role_data:
@@ -138,7 +142,7 @@ outputs:
           expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
           data:
             ovs_upgrade:
-              get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+              get_attr: [Ovs, role_data, upgrade_tasks]
             neutron_ovs_upgrade:
               - name: Check if neutron_ovs_agent is deployed
                 command: systemctl is-enabled neutron-openvswitch-agent
index 29c1046..da7a4d6 100644 (file)
@@ -26,32 +26,6 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
-  HostCpusList:
-    default: "0"
-    description: List of cores to be used for host process
-    type: string
-    constraints:
-      - allowed_pattern: "[0-9,-]+"
-  NeutronDpdkCoreList:
-    default: ""
-    description: List of cores to be used for DPDK Poll Mode Driver
-    type: string
-    constraints:
-      - allowed_pattern: "[0-9,-]*"
-  NeutronDpdkMemoryChannels:
-    default: ""
-    description: Number of memory channels to be used for DPDK
-    type: string
-    constraints:
-      - allowed_pattern: "[0-9]*"
-  NeutronDpdkSocketMemory:
-    default: ""
-    description: Memory allocated for each socket
-    type: string
-  NeutronDpdkDriverType:
-    default: "vfio-pci"
-    description: DPDK Driver type
-    type: string
   # below parameters has to be set in neutron agent only for compute nodes.
   # as of now there is no other usecase for these parameters except dpdk.
   # should be moved to compute only ovs agent in case of any other usecases.
@@ -75,9 +49,6 @@ resources:
       RoleName: {get_param: RoleName}
       RoleParameters: {get_param: RoleParameters}
 
-  OpenVswitchUpgrade:
-    type: ./openvswitch-upgrade.yaml
-
   # Merging role-specific parameters (RoleParameters) with the default parameters.
   # RoleParameters will have the precedence over the default parameters.
   RoleParametersValue:
@@ -89,20 +60,19 @@ resources:
           - map_replace:
             - neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
               neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
-              vswitch::dpdk::driver_type: NeutronDpdkDriverType
-              vswitch::dpdk::host_core_list: HostCpusList
-              vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
-              vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
-              vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
             - values: {get_param: [RoleParameters]}
           - values:
               NeutronDatapathType: {get_param: NeutronDatapathType}
               NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
-              NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
-              HostCpusList: {get_param: HostCpusList}
-              NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
-              NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
-              NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
+
+  Ovs:
+    type: ./openvswitch.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -116,7 +86,8 @@ outputs:
             - keys:
                 tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
           - neutron::agents::ml2::ovs::enable_dpdk: true
+          - get_attr: [Ovs, role_data, config_settings]
           - get_attr: [RoleParametersValue, value]
       step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
       upgrade_tasks:
-        get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+        get_attr: [Ovs, role_data, upgrade_tasks]
index fe2f294..4ce5316 100644 (file)
@@ -28,7 +28,7 @@ parameters:
     type: json
   NovaWorkers:
     default: 0
-    description: Number of workers for Nova API service.
+    description: Number of workers for Nova services.
     type: number
   NovaPassword:
     description: The password for the nova service and db account, used by nova-api.
@@ -81,17 +81,15 @@ conditions:
   nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
 
 resources:
-  # Temporarily disable Nova API deployed in WSGI
-  # https://bugs.launchpad.net/nova/+bug/1661360
-  # ApacheServiceBase:
-  #   type: ./apache.yaml
-  #   properties:
-  #     ServiceNetMap: {get_param: ServiceNetMap}
-  #     DefaultPasswords: {get_param: DefaultPasswords}
-  #     EndpointMap: {get_param: EndpointMap}
-  #     RoleName: {get_param: RoleName}
-  #     RoleParameters: {get_param: RoleParameters}
-  #     EnableInternalTLS: {get_param: EnableInternalTLS}
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
 
   NovaBase:
     type: ./nova-base.yaml
@@ -114,9 +112,7 @@ outputs:
       config_settings:
         map_merge:
         - get_attr: [NovaBase, role_data, config_settings]
-        # Temporarily disable Nova API deployed in WSGI
-        # https://bugs.launchpad.net/nova/+bug/1661360
-        # - get_attr: [ApacheServiceBase, role_data, config_settings]
+        - get_attr: [ApacheServiceBase, role_data, config_settings]
         - nova::cron::archive_deleted_rows::hour: '*/12'
           nova::cron::archive_deleted_rows::destination: '/dev/null'
           tripleo.nova_api.firewall_rules:
@@ -143,23 +139,21 @@ outputs:
                 "%{hiera('fqdn_$NETWORK')}"
               params:
                 $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
-          # Temporarily disable Nova API deployed in WSGI
-          # https://bugs.launchpad.net/nova/+bug/1661360
-          nova_wsgi_enabled: false
-          # nova::api::service_name: 'httpd'
-          # nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+          nova_wsgi_enabled: true
+          nova::api::service_name: 'httpd'
+          nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
           # NOTE: bind IP is found in Heat replacing the network name with the local node IP
           # for the given network; replacement examples (eg. for internal_api):
           # internal_api -> IP
           # internal_api_uri -> [IP]
           # internal_api_subnet - > IP/CIDR
-          nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
-          nova::wsgi::apache_api::servername:
-            str_replace:
-              template:
-                "%{hiera('fqdn_$NETWORK')}"
-              params:
-                $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+          nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+          nova::wsgi::apache_api::servername:
+            str_replace:
+              template:
+                "%{hiera('fqdn_$NETWORK')}"
+              params:
+                $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
           nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
           nova::api::instance_name_template: {get_param: InstanceNameTemplate}
           nova_enable_db_purge: {get_param: NovaEnableDBPurge}
@@ -169,9 +163,7 @@ outputs:
           - nova_workers_zero
           - {}
           - nova::api::osapi_compute_workers: {get_param: NovaWorkers}
-          # Temporarily disable Nova API deployed in WSGI
-          # https://bugs.launchpad.net/nova/+bug/1661360
-          # nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
+            nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
       step_config: |
         include tripleo::profile::base::nova::api
       service_config_settings:
@@ -199,87 +191,91 @@ outputs:
           nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
           nova::keystone::auth::password: {get_param: NovaPassword}
           nova::keystone::auth::region: {get_param: KeystoneRegion}
-      # Temporarily disable Nova API deployed in WSGI
-      # https://bugs.launchpad.net/nova/+bug/1661360
-      # metadata_settings:
-      #   get_attr: [ApacheServiceBase, role_data, metadata_settings]
+      metadata_settings:
+        get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: get bootstrap nodeid
-          tags: common
-          command: hiera bootstrap_nodeid
-          register: bootstrap_node
-        - name: set is_bootstrap_node fact
-          tags: common
-          set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
-        - name: Extra migration for nova tripleo/+bug/1656791
-          tags: step0,pre-upgrade
-          when: is_bootstrap_node
-          command: nova-manage db online_data_migrations
-        - name: Stop and disable nova_api service (pre-upgrade not under httpd)
-          tags: step2
-          service: name=openstack-nova-api state=stopped enabled=no
-        - name: Create puppet manifest to set transport_url in nova.conf
-          tags: step5
-          when: is_bootstrap_node
-          copy:
-            dest: /root/nova-api_upgrade_manifest.pp
-            mode: 0600
-            content: >
-              $transport_url = os_transport_url({
-                'transport' => hiera('messaging_service_name', 'rabbit'),
-                'hosts'     => any2array(hiera('rabbitmq_node_names', undef)),
-                'port'      => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
-                'username'  => hiera('nova::rabbit_userid', 'guest'),
-                'password'  => hiera('nova::rabbit_password'),
-                'ssl'       => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
-              })
-              oslo::messaging::default { 'nova_config':
-                transport_url => $transport_url
-              }
-        - name: Run puppet apply to set tranport_url in nova.conf
-          tags: step5
-          when: is_bootstrap_node
-          command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
-          register: puppet_apply_nova_api_upgrade
-          failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
-          changed_when: puppet_apply_nova_api_upgrade.rc == 2
-        - name: Setup cell_v2 (map cell0)
-          tags: step5
-          when: is_bootstrap_node
-          shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
-        - name: Setup cell_v2 (create default cell)
-          tags: step5
-          when: is_bootstrap_node
-          # (owalsh) puppet-nova expects the cell name 'default'
-          # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
-          shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
-          register: nova_api_create_cell
-          failed_when: nova_api_create_cell.rc not in [0,2]
-          changed_when: nova_api_create_cell.rc == 0
-        - name: Setup cell_v2 (sync nova/cell DB)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage db sync
-          async: {get_param: NovaDbSyncTimeout}
-          poll: 10
-        - name: Setup cell_v2 (get cell uuid)
-          tags: step5
-          when: is_bootstrap_node
-          shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
-          register: nova_api_cell_uuid
-        - name: Setup cell_v2 (migrate hosts)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
-        - name: Setup cell_v2 (migrate instances)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
-        - name: Sync nova_api DB
-          tags: step5
-          command: nova-manage api_db sync
-          when: is_bootstrap_node
-        - name: Online data migration for nova
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage db online_data_migrations
+        yaql:
+          expression: $.data.apache_upgrade + $.data.nova_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            nova_api_upgrade:
+              - name: get bootstrap nodeid
+                tags: common
+                command: hiera bootstrap_nodeid
+                register: bootstrap_node
+              - name: set is_bootstrap_node fact
+                tags: common
+                set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+              - name: Extra migration for nova tripleo/+bug/1656791
+                tags: step0,pre-upgrade
+                when: is_bootstrap_node
+                command: nova-manage db online_data_migrations
+              - name: Stop and disable nova_api service (pre-upgrade not under httpd)
+                tags: step2
+                service: name=openstack-nova-api state=stopped enabled=no
+              - name: Create puppet manifest to set transport_url in nova.conf
+                tags: step5
+                when: is_bootstrap_node
+                copy:
+                  dest: /root/nova-api_upgrade_manifest.pp
+                  mode: 0600
+                  content: >
+                    $transport_url = os_transport_url({
+                      'transport' => hiera('messaging_service_name', 'rabbit'),
+                      'hosts'     => any2array(hiera('rabbitmq_node_names', undef)),
+                      'port'      => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
+                      'username'  => hiera('nova::rabbit_userid', 'guest'),
+                      'password'  => hiera('nova::rabbit_password'),
+                      'ssl'       => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
+                    })
+                    oslo::messaging::default { 'nova_config':
+                      transport_url => $transport_url
+                    }
+              - name: Run puppet apply to set tranport_url in nova.conf
+                tags: step5
+                when: is_bootstrap_node
+                command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+                register: puppet_apply_nova_api_upgrade
+                failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
+                changed_when: puppet_apply_nova_api_upgrade.rc == 2
+              - name: Setup cell_v2 (map cell0)
+                tags: step5
+                when: is_bootstrap_node
+                shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
+              - name: Setup cell_v2 (create default cell)
+                tags: step5
+                when: is_bootstrap_node
+                # (owalsh) puppet-nova expects the cell name 'default'
+                # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
+                shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
+                register: nova_api_create_cell
+                failed_when: nova_api_create_cell.rc not in [0,2]
+                changed_when: nova_api_create_cell.rc == 0
+              - name: Setup cell_v2 (sync nova/cell DB)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage db sync
+                async: {get_param: NovaDbSyncTimeout}
+                poll: 10
+              - name: Setup cell_v2 (get cell uuid)
+                tags: step5
+                when: is_bootstrap_node
+                shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
+                register: nova_api_cell_uuid
+              - name: Setup cell_v2 (migrate hosts)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
+              - name: Setup cell_v2 (migrate instances)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
+              - name: Sync nova_api DB
+                tags: step5
+                command: nova-manage api_db sync
+                when: is_bootstrap_node
+              - name: Online data migration for nova
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage db online_data_migrations
index 68a71e4..33b07de 100644 (file)
@@ -32,6 +32,13 @@ parameters:
   CephClientUserName:
     default: openstack
     type: string
+  CephClientKey:
+    description: The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
+    type: string
+    hidden: true
+  CephClusterFSID:
+    type: string
+    description: The Ceph cluster FSID. Must be a UUID.
   CinderEnableNfsBackend:
     default: false
     description: Whether to enable or not the NFS backend for Cinder
@@ -159,12 +166,8 @@ outputs:
             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
             tripleo::profile::base::nova::compute::cinder_nfs_backend: {get_param: CinderEnableNfsBackend}
             rbd_persistent_storage: {get_param: CinderEnableRbdBackend}
-            nova::compute::rbd::rbd_keyring:
-              list_join:
-              - '.'
-              - - 'client'
-                - {get_param: CephClientUserName}
-            nova::compute::rbd::libvirt_rbd_secret_uuid: "%{hiera('ceph::profile::params::fsid')}"
+            nova::compute::rbd::libvirt_rbd_secret_key: {get_param: CephClientKey}
+            nova::compute::rbd::libvirt_rbd_secret_uuid: {get_param: CephClusterFSID}
             nova::compute::instance_usage_audit: true
             nova::compute::instance_usage_audit_period: 'hour'
             nova::compute::rbd::ephemeral_storage: {get_param: NovaEnableRbdBackend}
index 30eb127..b83b985 100644 (file)
@@ -28,7 +28,7 @@ parameters:
     type: json
   NovaWorkers:
     default: 0
-    description: Number of workers for Nova Conductor service.
+    description: Number of workers for Nova services.
     type: number
   MonitoringSubscriptionNovaConductor:
     default: 'overcloud-nova-conductor'
index 335b2c2..bc7dc1b 100644 (file)
@@ -28,7 +28,7 @@ parameters:
     type: json
   NovaWorkers:
     default: 0
-    description: Number of workers for Nova API service.
+    description: Number of workers for Nova services.
     type: number
 
 conditions:
index 86aa079..aaa7ef5 100644 (file)
@@ -28,7 +28,7 @@ parameters:
     type: json
   NovaWorkers:
     default: 0
-    description: Number of workers for Nova Placement API service.
+    description: Number of workers for Nova services.
     type: number
   NovaPassword:
     description: The password for the nova service and db account, used by nova-placement.
index 5da6d43..72a1fce 100644 (file)
@@ -45,6 +45,14 @@ parameters:
     default:
       tag: openstack.nova.scheduler
       path: /var/log/nova/nova-scheduler.log
+  NovaSchedulerDiscoverHostsInCellsInterval:
+    type: number
+    default: -1
+    description: >
+      This value controls how often (in seconds) the scheduler should
+      attempt to discover new hosts that have been added to cells.
+      The default value of -1 disables the periodic task completely.
+      It is recommended to set this parameter for deployments using Ironic.
 
 resources:
   NovaBase:
@@ -71,6 +79,7 @@ outputs:
           - nova::ram_allocation_ratio: '1.0'
             nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
             nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
+            nova::scheduler::discover_hosts_in_cells_interval: {get_param: NovaSchedulerDiscoverHostsInCellsInterval}
       step_config: |
         include tripleo::profile::base::nova::scheduler
       upgrade_tasks:
index 0d859be..1a8754a 100644 (file)
@@ -57,8 +57,14 @@ parameters:
     type: json
 
 resources:
-  OpenVswitchUpgrade:
-    type: ./openvswitch-upgrade.yaml
+  Ovs:
+    type: ./openvswitch.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -66,19 +72,21 @@ outputs:
     value:
       service_name: opendaylight_ovs
       config_settings:
-        opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
-        opendaylight::username: {get_param: OpenDaylightUsername}
-        opendaylight::password: {get_param: OpenDaylightPassword}
-        opendaylight_check_url: {get_param: OpenDaylightCheckURL}
-        opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
-        neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
-        neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
-        tripleo.opendaylight_ovs.firewall_rules:
-          '118 neutron vxlan networks':
-             proto: 'udp'
-             dport: 4789
-          '136 neutron gre networks':
-             proto: 'gre'
+        map_merge:
+          - opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+            opendaylight::username: {get_param: OpenDaylightUsername}
+            opendaylight::password: {get_param: OpenDaylightPassword}
+            opendaylight_check_url: {get_param: OpenDaylightCheckURL}
+            opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
+            neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+            neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
+            tripleo.opendaylight_ovs.firewall_rules:
+              '118 neutron vxlan networks':
+               proto: 'udp'
+               dport: 4789
+              '136 neutron gre networks':
+               proto: 'gre'
+          - get_attr: [Ovs, role_data, config_settings]
       step_config: |
         include tripleo::profile::base::neutron::plugins::ovs::opendaylight
       upgrade_tasks:
@@ -86,7 +94,7 @@ outputs:
           expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
           data:
             ovs_upgrade:
-              get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+              get_attr: [Ovs, role_data, upgrade_tasks]
             opendaylight_upgrade:
               - name: Check if openvswitch is deployed
                 command: systemctl is-enabled openvswitch
diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml
deleted file mode 100644 (file)
index f6e7846..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-heat_template_version: pike
-
-description: >
-  Openvswitch package special handling for upgrade.
-
-outputs:
-  role_data:
-    description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
-    value:
-      service_name: openvswitch_upgrade
-      upgrade_tasks:
-        - name: Check openvswitch version.
-          tags: step2
-          register: ovs_version
-          ignore_errors: true
-          shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
-        - name: Check openvswitch packaging.
-          tags: step2
-          shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
-          register: ovs_packaging_issue
-          ignore_errors: true
-        - block:
-            - name: "Ensure empty directory: emptying."
-              file:
-                state: absent
-                path: /root/OVS_UPGRADE
-            - name: "Ensure empty directory: creating."
-              file:
-                state: directory
-                path: /root/OVS_UPGRADE
-                owner: root
-                group: root
-                mode: 0750
-            - name: Download OVS packages.
-              command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
-            - name: Get rpm list for manual upgrade of OVS.
-              shell: ls -1 /root/OVS_UPGRADE/*.rpm
-              register: ovs_list_of_rpms
-            - name: Manual upgrade of OVS
-              shell: |
-                rpm -U --test {{item}} 2>&1 | grep "already installed" || \
-                rpm -U --replacepkgs --notriggerun --nopostun {{item}};
-              args:
-                chdir: /root/OVS_UPGRADE
-              with_items:
-                - "{{ovs_list_of_rpms.stdout_lines}}"
-          tags: step2
-          when: "'2.5.0-14' in '{{ovs_version.stdout}}'
-                or
-                ovs_packaging_issue|succeeded"
diff --git a/puppet/services/openvswitch.yaml b/puppet/services/openvswitch.yaml
new file mode 100644 (file)
index 0000000..36aa5db
--- /dev/null
@@ -0,0 +1,178 @@
+heat_template_version: pike
+
+description: >
+  Open vSwitch Configuration
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  OvsDpdkCoreList:
+    description: >
+      List of cores to be used for DPDK lcore threads.  Note, these threads
+      are used by the OVS control path for validator and handling functions.
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ""
+  OvsDpdkMemoryChannels:
+    description: Number of memory channels per socket to be used for DPDK
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9]*"
+    default: ""
+  OvsDpdkSocketMemory:
+    default: ""
+    description: >
+      Sets the amount of hugepage memory to assign per NUMA node. It is
+      recommended to use the socket closest to the PCIe slot used for the
+      desired DPDK NIC.  The format should be in "<socket 0 mem>, <socket 1
+      mem>, <socket n mem>", where the value is specified in MB.  For example:
+      "1024,0".
+    type: string
+  OvsDpdkDriverType:
+    default: "vfio-pci"
+    description: >
+      DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+      this UIO/PMD driver.
+    type: string
+  OvsPmdCoreList:
+    description: >
+      A list or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+      location to cores on socket, number of hyper-threaded logical cores, and
+      desired number of PMD threads can all play a role in configuring this
+      setting.  These cores should be on the same socket where
+      OvsDpdkSocketMemory is assigned.  If using hyperthreading then specify
+      both logical cores that would equal the physical core. Also, specifying
+      more than one core will trigger multiple PMD threads to be spawned which
+      may improve dataplane performance.
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    type: string
+    default: ""
+  # DEPRECATED: the following options are deprecated and are currently maintained
+  # for backwards compatibility. They will be removed in the Queens cycle.
+  HostCpusList:
+    description: List of cores to be used for host process
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ''
+  NeutronDpdkCoreList:
+    description: List of cores to be used for DPDK Poll Mode Driver
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ''
+  NeutronDpdkMemoryChannels:
+    description: Number of memory channels to be used for DPDK
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9]*"
+    default: ''
+  NeutronDpdkSocketMemory:
+    default: ''
+    description: Memory allocated for each socket
+    type: string
+  NeutronDpdkDriverType:
+    default: "vfio-pci"
+    description: DPDK Driver type
+    type: string
+
+parameter_groups:
+- label: deprecated
+  description: Do not use deprecated params, they will be removed.
+  parameters:
+    - HostCpusList
+    - NeutronDpdkCoreList
+    - NeutronDpdkMemoryChannels
+    - NeutronDpdkSocketMemory
+    - NeutronDpdkDriverType
+
+conditions:
+  l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+  pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+  mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+  socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+  driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+
+outputs:
+  role_data:
+    description: Role data for the Open vSwitch service.
+    value:
+      service_name: openvswitch
+      config_settings:
+        map_replace:
+          - map_replace:
+            - vswitch::dpdk::driver_type: OvsDpdkDriverType
+              vswitch::dpdk::host_core_list: OvsDpdkCoreList
+              vswitch::dpdk::pmd_core_list: OvsPmdCoreList
+              vswitch::dpdk::memory_channels: OvsDpdkMemoryChannels
+              vswitch::dpdk::socket_mem: OvsDpdkSocketMemory
+            - values: {get_param: [RoleParameters]}
+          - values:
+              OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+              OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+              OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+              OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+              OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+      upgrade_tasks:
+        - name: Check openvswitch version.
+          tags: step2
+          register: ovs_version
+          ignore_errors: true
+          shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+        - name: Check openvswitch packaging.
+          tags: step2
+          shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+          register: ovs_packaging_issue
+          ignore_errors: true
+        - block:
+            - name: "Ensure empty directory: emptying."
+              file:
+                state: absent
+                path: /root/OVS_UPGRADE
+            - name: "Ensure empty directory: creating."
+              file:
+                state: directory
+                path: /root/OVS_UPGRADE
+                owner: root
+                group: root
+                mode: 0750
+            - name: Download OVS packages.
+              command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+            - name: Get rpm list for manual upgrade of OVS.
+              shell: ls -1 /root/OVS_UPGRADE/*.rpm
+              register: ovs_list_of_rpms
+            - name: Manual upgrade of OVS
+              shell: |
+                rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+                rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+              args:
+                chdir: /root/OVS_UPGRADE
+              with_items:
+                - "{{ovs_list_of_rpms.stdout_lines}}"
+          tags: step2
+          when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+                or
+                ovs_packaging_issue|succeeded"
index d8e942d..0a7659e 100644 (file)
@@ -27,6 +27,11 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
 
 resources:
 
@@ -61,6 +66,8 @@ outputs:
             # internal_api_subnet - > IP/CIDR
             tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr:
               get_param: [ServiceNetMap, MysqlNetwork]
+            tripleo::profile::pacemaker::database::mysql::ca_file:
+              get_param: InternalTLSCAFile
       step_config: |
         include ::tripleo::profile::pacemaker::database::mysql
       metadata_settings:
index 9a304ed..c707efb 100644 (file)
@@ -59,10 +59,10 @@ parameters:
     type: string
   SwiftCeilometerPipelineEnabled:
     description: Set to False to disable the swift proxy ceilometer pipeline.
-    default: True
+    default: false
     type: boolean
   SwiftCeilometerIgnoreProjects:
-    default: ['services']
+    default: ['service']
     description: Comma-seperated list of project names to ignore.
     type: comma_delimited_list
   RabbitClientPort:
@@ -81,7 +81,7 @@ parameters:
 
 conditions:
 
-  ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+  ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, true]}
   use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
 
 resources:
@@ -118,14 +118,20 @@ outputs:
             swift::proxy::authtoken::project_name: 'service'
             swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
             swift::proxy::workers: {get_param: SwiftWorkers}
-            swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
-            swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
-            swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
-            swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
-            swift::proxy::ceilometer::password: {get_param: SwiftPassword}
-            swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
-            swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
-            swift::proxy::ceilometer::nonblocking_notify: true
+          -
+            if:
+            - ceilometer_pipeline_enabled
+            -
+              swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
+              swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+              swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+              swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+              swift::proxy::ceilometer::password: {get_param: SwiftPassword}
+              swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
+              swift::proxy::ceilometer::nonblocking_notify: true
+              swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+            - {}
+          - swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
             tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
             tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
             tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
@@ -168,7 +174,6 @@ outputs:
                     - ''
                   - 'proxy-logging'
                   - 'proxy-server'
-            swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
             swift::proxy::account_autocreate: true
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
diff --git a/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml b/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml
new file mode 100644 (file)
index 0000000..193154d
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - The HAProxy stats interface can now be enabled/disabled with the
+    HAProxyStatsEnabled flag. Note that it's still enabled by default.
diff --git a/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml b/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml
new file mode 100644 (file)
index 0000000..67a55cd
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - Added new DeploymentSwiftDataMap parameter, which is used to set the
+    deployment_swift_data property on the Server resoures. The parameter is a
+    map where the keys are the Heat assigned hostnames, and the value is a map
+    of the container/object name in Swift.
diff --git a/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml b/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml
new file mode 100644 (file)
index 0000000..cd352ac
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - Adds a new output, ServerOsCollectConfigData, which is the
+    os-collect-config configuration associated with each server resource.
+    This can be used to [pre]configure the os-collect-config agents on
+    deployed-server's.
diff --git a/releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml b/releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml
new file mode 100644 (file)
index 0000000..98ba86d
--- /dev/null
@@ -0,0 +1,6 @@
+---
+fixes:
+  - |
+    When ``environments/services/ironic.yaml`` is used, enable periodic task
+    in nova-scheduler to automatically discover new nodes. Otherwise a user
+    has to run nova management command on controllers each time.
diff --git a/releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml b/releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml
new file mode 100644 (file)
index 0000000..28dac8b
--- /dev/null
@@ -0,0 +1,5 @@
+---
+fixes:
+  - Disable ceilometer in the swift proxy middleware pipeline out of the box.
+    This generates a lot of events with gnocchi and swift backend and causes
+    heavy load. It should be easy to enable if needed.
diff --git a/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml b/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml
new file mode 100644 (file)
index 0000000..4cb9b80
--- /dev/null
@@ -0,0 +1,8 @@
+---
+features:
+  - DPDK is enabled in OvS  before the NetworkDeployment to ensure DPDK
+    is ready to handle new port additions.
+upgrade:
+  - A new parameter ServiceNames is added to the PreNeworkConfig resource.
+    All templates associated with PreNeworkConfig should add this new
+    parameter during the upgrade.
diff --git a/releasenotes/notes/enable-neutron-lbaas-integration-b72126f2c7e71cee.yaml b/releasenotes/notes/enable-neutron-lbaas-integration-b72126f2c7e71cee.yaml
new file mode 100644 (file)
index 0000000..490dc24
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - Allows the configuration of the Neutron LBaaS
+    agent.
\ No newline at end of file
diff --git a/releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml b/releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml
new file mode 100644 (file)
index 0000000..18474cf
--- /dev/null
@@ -0,0 +1,3 @@
+---
+fixes:
+  - Incorrect network used for Glance API service.
diff --git a/releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml b/releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml
new file mode 100644 (file)
index 0000000..25016e8
--- /dev/null
@@ -0,0 +1,4 @@
+---
+fixes:
+  - |
+    Fix support for RPMs to be installed via DeployArtifactURLs. LP#1697102
diff --git a/releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml b/releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml
new file mode 100644 (file)
index 0000000..0721334
--- /dev/null
@@ -0,0 +1,21 @@
+---
+features:
+  - |
+    There is now a tool in tripleo-heat-templates, similar to the
+    oslo-config-generator, that can be used to programmatically generate
+    sample environment files based directly on the contents of the templates
+    themselves.  This ensures consistency in the sample environments, as well
+    as making it easier to update environments to reflect changes to the
+    templates.
+upgrade:
+  - |
+    Some sample environment files will be moving as part of the work to
+    generate them programmatically.  The old versions will be left in place for
+    one cycle to allow a smooth upgrade process.  When upgrading, if any of the
+    environment files in use for the deployment have been deprecated they
+    should be replaced with the new generated verions.
+deprecations:
+  - |
+    Where a generated sample environment replaces an existing one, the existing
+    environment is deprecated.  This will be noted in a comment at the top of
+    the file.
diff --git a/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml b/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml
new file mode 100644 (file)
index 0000000..1e44d92
--- /dev/null
@@ -0,0 +1,23 @@
+---
+features:
+  - Adds common openvswitch service template to be
+    inherited by other services.
+  - Adds environment file to be used for deploying
+    OpenDaylight + OVS DPDK.
+  - Adds first boot and ovs configuration scripts
+deprecations:
+  - The ``HostCpusList`` parameter is deprecated in
+    favor of ``OvsDpdkCoreList`` and will be removed
+    in a future release.
+  - The ``NeutronDpdkCoreList`` parameter is deprecated in
+    favor of ``OvsPmdCoreList`` and will be removed
+    in a future release.
+  - The ``NeutronDpdkMemoryChannels`` parameter is deprecated in
+    favor of ``OvsDpdkMemoryChannels`` and will be removed
+    in a future release.
+  - The ``NeutronDpdkSocketMemory`` parameter is deprecated in
+    favor of ``OvsDpdkSocketMemory`` and will be removed
+    in a future release.
+  - The ``NeutronDpdkDriverType`` parameter is deprecated in
+    favor of ``OvsDpdkDriverType`` and will be removed
+    in a future release.
diff --git a/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml b/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml
new file mode 100644 (file)
index 0000000..cf99ec5
--- /dev/null
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    It is now possible to trigger Mistral workflows or workflow actions
+    before a deployment step is applied. This can be defined within the
+    scope of a service template and is described as a task property
+    for the Heat OS::Mistral::Workflow resource, for more details also
+    see the puppet/services/README.rst file.
\ No newline at end of file
diff --git a/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml b/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml
new file mode 100644 (file)
index 0000000..1bc9937
--- /dev/null
@@ -0,0 +1,7 @@
+---
+features:
+  - Add 2 new example environments to facilitate deploying split-stack,
+    environments/overcloud-baremetal.j2.yaml and
+    environments/overcloud-services.yaml. The environments are used to deploy two
+    separate Heat stacks, one for just the baremetal+network configuration and one
+    for the service configuration.
diff --git a/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml b/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml
new file mode 100644 (file)
index 0000000..1f49bac
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Add VipMap output to the top level stack output. VipMap is a mapping from
+    each network to the VIP address on that network. Also includes the Redis
+    VIP.
index b0a1313..40e51aa 100644 (file)
@@ -46,6 +46,7 @@
     - OS::TripleO::Services::CinderVolume
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Congress
+    - OS::TripleO::Services::Clustercheck
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::Etcd
@@ -84,6 +85,7 @@
     - OS::TripleO::Services::NeutronL2gwAgent
     - OS::TripleO::Services::NeutronL2gwApi
     - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronLbaasv2Agent
     - OS::TripleO::Services::NeutronLinuxbridgeAgent
     - OS::TripleO::Services::NeutronMetadataAgent
     - OS::TripleO::Services::NeutronML2FujitsuCfab
index 6cf2120..4ad405a 100644 (file)
@@ -40,6 +40,7 @@
     - OS::TripleO::Services::CinderVolume
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Congress
+    - OS::TripleO::Services::Clustercheck
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::Etcd
index a28eaa6..635c430 100644 (file)
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::NeutronApi
-    - OS::TripleO::Services::NeutronBgpvpnApi
+    - OS::TripleO::Services::NeutronBgpVpnApi
     - OS::TripleO::Services::NeutronCorePlugin
     - OS::TripleO::Services::NeutronDhcpAgent
     - OS::TripleO::Services::NeutronL2gwAgent
     - OS::TripleO::Services::NeutronL2gwApi
     - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronLbaasv2Agent
     - OS::TripleO::Services::NeutronMetadataAgent
     - OS::TripleO::Services::NeutronML2FujitsuCfab
     - OS::TripleO::Services::NeutronML2FujitsuFossw
index f96e562..7bb87c2 100644 (file)
@@ -47,6 +47,7 @@
     - OS::TripleO::Services::CinderHPELeftHandISCSI
     - OS::TripleO::Services::CinderScheduler
     - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Clustercheck
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Congress
     - OS::TripleO::Services::Docker
@@ -87,6 +88,7 @@
     - OS::TripleO::Services::NeutronL2gwAgent
     - OS::TripleO::Services::NeutronL2gwApi
     - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronLbaasv2Agent
     - OS::TripleO::Services::NeutronLinuxbridgeAgent
     - OS::TripleO::Services::NeutronMetadataAgent
     - OS::TripleO::Services::NeutronML2FujitsuCfab
index 724727b..4d3ca8d 100644 (file)
@@ -116,6 +116,10 @@ outputs:
         yaql:
           expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
           data: {role_data: {get_attr: [ServiceChain, role_data]}}
+      service_workflow_tasks:
+        yaql:
+          expression: $.data.role_data.where($ != null).select($.get('service_workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})
+          data: {role_data: {get_attr: [ServiceChain, role_data]}}
       step_config: {get_attr: [ServiceChain, role_data, step_config]}
       upgrade_tasks:
         yaql:
index 8113635..6e0eea3 100644 (file)
@@ -4,7 +4,7 @@
 PyYAML>=3.10.0 # MIT
 Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
 six>=1.9.0 # MIT
-sphinx!=1.6.1,>=1.5.1 # BSD
+sphinx>=1.6.2 # BSD
 oslosphinx>=4.7.0 # Apache-2.0
 reno!=2.3.1,>=1.8.0 # Apache-2.0
 coverage!=4.4,>=4.0 # Apache-2.0
index 69ed96a..c7d5ed9 100755 (executable)
@@ -138,19 +138,31 @@ def process_templates(template_path, role_data_path, output_dir,
                         print("jinja2 rendering roles %s" % ","
                               .join(role_names))
                         for role in role_names:
-                            j2_data = {'role': role}
-                            # (dprince) For the undercloud installer we don't
-                            # want to have heat check nova/glance API's
-                            if r_map[role].get('disable_constraints', False):
-                                j2_data['disable_constraints'] = True
+                            j2_data = {'role': r_map[role]}
                             out_f = "-".join(
                                 [role.lower(),
                                  os.path.basename(f).replace('.role.j2.yaml',
                                                              '.yaml')])
                             out_f_path = os.path.join(out_dir, out_f)
                             if not (out_f_path in excl_templates):
-                                _j2_render_to_file(template_data, j2_data,
-                                                   out_f_path, overwrite)
+                                if '{{role.name}}' in template_data:
+                                    j2_data = {'role': r_map[role]}
+                                    _j2_render_to_file(template_data, j2_data,
+                                                       out_f_path, overwrite)
+                                else:
+                                    # Backwards compatibility with templates
+                                    # that specify {{role}} vs {{role.name}}
+                                    j2_data = {'role': role}
+                                    # (dprince) For the undercloud installer we
+                                    # don'twant to have heat check nova/glance
+                                    # API's
+                                    if r_map[role].get('disable_constraints',
+                                                       False):
+                                        j2_data['disable_constraints'] = True
+                                    _j2_render_to_file(
+                                        template_data,j2_data,
+                                        out_f_path, overwrite)
+
                             else:
                                 print('skipping rendering of %s' % out_f_path)
                 elif f.endswith('.j2.yaml'):
index ff215fb..f9e89db 100755 (executable)
@@ -38,7 +38,25 @@ OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
                             'metadata_settings', 'kolla_config']
 REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
                                           'config_image']
-OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
+OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags', 'volumes' ]
+# Mapping of parameter names to a list of the fields we should _not_ enforce
+# consistency across files on.  This should only contain parameters whose
+# definition we cannot change for backwards compatibility reasons.  New
+# parameters to the templates should not be added to this list.
+PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
+                                   'ManagementAllocationPools': ['default'],
+                                   'ExternalNetCidr': ['default'],
+                                   'ExternalAllocationPools': ['default'],
+                                   'StorageNetCidr': ['default'],
+                                   'StorageAllocationPools': ['default'],
+                                   'StorageMgmtNetCidr': ['default'],
+                                   'StorageMgmtAllocationPools': ['default'],
+                                   }
+
+PREFERRED_CAMEL_CASE = {
+    'ec2api': 'Ec2Api',
+    'haproxy': 'HAProxy',
+}
 
 
 def exit_usage():
@@ -46,6 +64,11 @@ def exit_usage():
     sys.exit(1)
 
 
+def to_camel_case(string):
+    return PREFERRED_CAMEL_CASE.get(string, ''.join(s.capitalize() or '_' for
+                                                    s in string.split('_')))
+
+
 def get_base_endpoint_map(filename):
     try:
         tpl = yaml.load(open(filename).read())
@@ -170,6 +193,30 @@ def validate_docker_service(filename, tpl):
                         % (key, filename))
                   return 1
 
+            config_volume = puppet_config.get('config_volume')
+            expected_config_image_parameter = "Docker%sConfigImage" % to_camel_case(config_volume)
+            if config_volume and not expected_config_image_parameter in tpl.get('parameters', []):
+                print('ERROR: Missing %s heat parameter for %s config_volume.'
+                      % (expected_config_image_parameter, config_volume))
+                return 1
+
+        if 'docker_config' in role_data:
+            docker_config = role_data['docker_config']
+            for _, step in docker_config.items():
+                for _, container in step.items():
+                    if not isinstance(container, dict):
+                        # NOTE(mandre) this skips everything that is not a dict
+                        # so we may ignore some containers definitions if they
+                        # are in a map_merge for example
+                        continue
+                    command = container.get('command', '')
+                    if isinstance(command, list):
+                        command = ' '.join(map(str, command))
+                    if 'bootstrap_host_exec' in command \
+                            and container.get('user') != 'root':
+                      print('ERROR: bootstrap_host_exec needs to run as the root user.')
+                      return 1
+
     if 'parameters' in tpl:
         for param in required_params:
             if param not in tpl['parameters']:
@@ -211,7 +258,30 @@ def validate_service(filename, tpl):
     return 0
 
 
-def validate(filename):
+def validate(filename, param_map):
+    """Validate a Heat template
+
+    :param filename: The path to the file to validate
+    :param param_map: A dict which will be populated with the details of the
+                      parameters in the template.  The dict will have the
+                      following structure:
+
+                          {'ParameterName': [
+                               {'filename': ./file1.yaml,
+                                'data': {'description': '',
+                                         'type': string,
+                                         'default': '',
+                                         ...}
+                                },
+                               {'filename': ./file2.yaml,
+                                'data': {'description': '',
+                                         'type': string,
+                                         'default': '',
+                                         ...}
+                                },
+                                ...
+                           ]}
+    """
     print('Validating %s' % filename)
     retval = 0
     try:
@@ -240,7 +310,9 @@ def validate(filename):
         return 1
     # yaml is OK, now walk the parameters and output a warning for unused ones
     if 'heat_template_version' in tpl:
-        for p in tpl.get('parameters', {}):
+        for p, data in tpl.get('parameters', {}).items():
+            definition = {'data': data, 'filename': filename}
+            param_map.setdefault(p, []).append(definition)
             if p in required_params:
                 continue
             str_p = '\'%s\'' % p
@@ -260,6 +332,7 @@ exit_val = 0
 failed_files = []
 base_endpoint_map = None
 env_endpoint_maps = list()
+param_map = {}
 
 for base_path in path_args:
     if os.path.isdir(base_path):
@@ -267,7 +340,7 @@ for base_path in path_args:
             for f in files:
                 if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
                     file_path = os.path.join(subdir, f)
-                    failed = validate(file_path)
+                    failed = validate(file_path, param_map)
                     if failed:
                         failed_files.append(file_path)
                     exit_val |= failed
@@ -278,7 +351,7 @@ for base_path in path_args:
                         if env_endpoint_map:
                             env_endpoint_maps.append(env_endpoint_map)
     elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
-        failed = validate(base_path)
+        failed = validate(base_path, param_map)
         if failed:
             failed_files.append(base_path)
         exit_val |= failed
@@ -310,6 +383,34 @@ else:
         failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
     exit_val |= 1
 
+# Validate that duplicate parameters defined in multiple files all have the
+# same definition.
+mismatch_count = 0
+for p, defs in param_map.items():
+    # Nothing to validate if the parameter is only defined once
+    if len(defs) == 1:
+        continue
+    check_data = [d['data'] for d in defs]
+    # Override excluded fields so they don't affect the result
+    exclusions = PARAMETER_DEFINITION_EXCLUSIONS.get(p, [])
+    ex_dict = {}
+    for field in exclusions:
+        ex_dict[field] = 'IGNORED'
+    for d in check_data:
+        d.update(ex_dict)
+    # If all items in the list are not == the first, then the check fails
+    if check_data.count(check_data[0]) != len(check_data):
+        mismatch_count += 1
+        # TODO(bnemec): Make this a hard failure once all the templates have
+        #               been fixed.
+        #exit_val |= 1
+        #failed_files.extend([d['filename'] for d in defs])
+        print('Mismatched parameter definitions found for "%s"' % p)
+        print('Definitions found:')
+        for d in defs:
+            print('  %s:\n    %s' % (d['filename'], d['data']))
+print('Mismatched parameter definitions: %d' % mismatch_count)
+
 if failed_files:
     print('Validation failed on:')
     for f in failed_files: