Merge "Add detach to docker-toool"
authorJenkins <jenkins@review.openstack.org>
Fri, 30 Jun 2017 03:52:38 +0000 (03:52 +0000)
committerGerrit Code Review <review@openstack.org>
Fri, 30 Jun 2017 03:52:38 +0000 (03:52 +0000)
290 files changed:
.gitignore
.testr.conf [new file with mode: 0644]
README.rst
ci/environments/README.rst [new file with mode: 0644]
ci/environments/ceph-min-osds.yaml [new file with mode: 0644]
ci/environments/multinode.yaml
ci/environments/scenario001-multinode-containers.yaml [new file with mode: 0644]
ci/environments/scenario002-multinode-containers.yaml [moved from ci/environments/multinode-container-upgrade.yaml with 71% similarity]
ci/environments/scenario003-multinode-containers.yaml [new file with mode: 0644]
ci/environments/scenario004-multinode-containers.yaml [new file with mode: 0644]
common/README [new file with mode: 0644]
deployed-server/deployed-server.yaml
docker/deploy-steps-playbook.yaml [new file with mode: 0644]
docker/docker-puppet.py
docker/docker-steps.j2
docker/docker-toool
docker/firstboot/setup_docker_host.yaml
docker/services/aodh-api.yaml
docker/services/aodh-evaluator.yaml
docker/services/aodh-listener.yaml
docker/services/aodh-notifier.yaml
docker/services/ceilometer-agent-central.yaml
docker/services/ceilometer-agent-compute.yaml
docker/services/ceilometer-agent-ipmi.yaml [new file with mode: 0644]
docker/services/ceilometer-agent-notification.yaml
docker/services/cinder-api.yaml [new file with mode: 0644]
docker/services/cinder-backup.yaml [new file with mode: 0644]
docker/services/cinder-scheduler.yaml [new file with mode: 0644]
docker/services/cinder-volume.yaml [new file with mode: 0644]
docker/services/collectd.yaml [new file with mode: 0644]
docker/services/congress-api.yaml [new file with mode: 0644]
docker/services/containers-common.yaml
docker/services/database/mongodb.yaml
docker/services/database/mysql-client.yaml [new file with mode: 0644]
docker/services/database/mysql.yaml
docker/services/database/redis.yaml
docker/services/ec2-api.yaml [new file with mode: 0644]
docker/services/etcd.yaml
docker/services/glance-api.yaml
docker/services/gnocchi-api.yaml
docker/services/gnocchi-metricd.yaml
docker/services/gnocchi-statsd.yaml
docker/services/haproxy.yaml [new file with mode: 0644]
docker/services/heat-api-cfn.yaml
docker/services/heat-api.yaml
docker/services/heat-engine.yaml
docker/services/horizon.yaml [new file with mode: 0644]
docker/services/ironic-api.yaml
docker/services/ironic-conductor.yaml
docker/services/ironic-pxe.yaml
docker/services/iscsid.yaml [new file with mode: 0644]
docker/services/keystone.yaml
docker/services/manila-api.yaml [new file with mode: 0644]
docker/services/manila-scheduler.yaml [new file with mode: 0644]
docker/services/memcached.yaml
docker/services/mistral-api.yaml
docker/services/mistral-engine.yaml
docker/services/mistral-executor.yaml
docker/services/multipathd.yaml [new file with mode: 0644]
docker/services/neutron-api.yaml
docker/services/neutron-dhcp.yaml
docker/services/neutron-l3.yaml
docker/services/neutron-metadata.yaml
docker/services/neutron-ovs-agent.yaml
docker/services/neutron-plugin-ml2.yaml
docker/services/nova-api.yaml
docker/services/nova-compute.yaml
docker/services/nova-conductor.yaml
docker/services/nova-consoleauth.yaml [new file with mode: 0644]
docker/services/nova-ironic.yaml
docker/services/nova-libvirt.yaml
docker/services/nova-placement.yaml
docker/services/nova-scheduler.yaml
docker/services/nova-vnc-proxy.yaml [new file with mode: 0644]
docker/services/octavia-api.yaml [new file with mode: 0644]
docker/services/octavia-health-manager.yaml [new file with mode: 0644]
docker/services/octavia-housekeeping.yaml [new file with mode: 0644]
docker/services/octavia-worker.yaml [new file with mode: 0644]
docker/services/pacemaker/cinder-backup.yaml [new file with mode: 0644]
docker/services/pacemaker/cinder-volume.yaml [new file with mode: 0644]
docker/services/pacemaker/clustercheck.yaml [new file with mode: 0644]
docker/services/pacemaker/database/mysql.yaml [new file with mode: 0644]
docker/services/pacemaker/database/redis.yaml [new file with mode: 0644]
docker/services/pacemaker/haproxy.yaml [new file with mode: 0644]
docker/services/pacemaker/rabbitmq.yaml [new file with mode: 0644]
docker/services/panko-api.yaml
docker/services/rabbitmq.yaml
docker/services/sahara-api.yaml [new file with mode: 0644]
docker/services/sahara-engine.yaml [new file with mode: 0644]
docker/services/sensu-client.yaml [new file with mode: 0644]
docker/services/services.yaml [deleted file]
docker/services/swift-proxy.yaml
docker/services/swift-ringbuilder.yaml
docker/services/swift-storage.yaml
docker/services/tacker.yaml [new file with mode: 0644]
docker/services/zaqar.yaml
environments/cinder-dellsc-config.yaml
environments/cinder-netapp-config.yaml
environments/docker-services-tls-everywhere.yaml
environments/docker.yaml
environments/enable-tls.yaml
environments/host-config-and-reboot.j2.yaml [new file with mode: 0644]
environments/host-config-pre-network.j2.yaml [deleted file]
environments/hyperconverged-ceph.yaml
environments/inject-trust-anchor-hiera.yaml
environments/inject-trust-anchor.yaml
environments/network-isolation.j2.yaml [new file with mode: 0644]
environments/network-isolation.yaml [deleted file]
environments/networking/neutron-midonet.yaml [new file with mode: 0644]
environments/neutron-bgpvpn-opendaylight.yaml [new file with mode: 0644]
environments/neutron-linuxbridge.yaml [new file with mode: 0644]
environments/neutron-midonet.yaml
environments/neutron-ml2-ovn-ha.yaml [new file with mode: 0644]
environments/neutron-opendaylight-dpdk.yaml [new file with mode: 0644]
environments/neutron-ovs-dpdk.yaml
environments/nonha-arch.yaml [new file with mode: 0644]
environments/overcloud-baremetal.j2.yaml [new file with mode: 0644]
environments/overcloud-services.yaml [new file with mode: 0644]
environments/predictable-placement/custom-hostnames.yaml [new file with mode: 0644]
environments/puppet-ceph-devel.yaml
environments/puppet-ceph-external.yaml
environments/services-docker/collectd.yaml [new file with mode: 0644]
environments/services-docker/congress.yaml [new file with mode: 0644]
environments/services-docker/ec2-api.yaml [new file with mode: 0644]
environments/services-docker/manila.yaml [new file with mode: 0644]
environments/services-docker/octavia.yaml [new file with mode: 0644]
environments/services-docker/sahara.yaml [new file with mode: 0644]
environments/services-docker/sensu-client.yaml [new file with mode: 0644]
environments/services-docker/tacker.yaml [new file with mode: 0644]
environments/services-docker/undercloud-ceilometer.yaml
environments/services/ironic.yaml
environments/ssl/enable-tls.yaml [new file with mode: 0644]
environments/ssl/inject-trust-anchor-hiera.yaml [new file with mode: 0644]
environments/ssl/inject-trust-anchor.yaml [new file with mode: 0644]
environments/ssl/tls-endpoints-public-dns.yaml [new file with mode: 0644]
environments/ssl/tls-endpoints-public-ip.yaml [new file with mode: 0644]
environments/ssl/tls-everywhere-endpoints-dns.yaml [new file with mode: 0644]
environments/storage/cinder-netapp-config.yaml [new file with mode: 0644]
environments/storage/cinder-nfs.yaml [new file with mode: 0644]
environments/storage/enable-ceph.yaml [new file with mode: 0644]
environments/storage/external-ceph.yaml [new file with mode: 0644]
environments/storage/glance-nfs.yaml [new file with mode: 0644]
environments/tls-endpoints-public-dns.yaml
environments/tls-endpoints-public-ip.yaml
environments/tls-everywhere-endpoints-dns.yaml
environments/undercloud.yaml
extraconfig/post_deploy/undercloud_post.sh
extraconfig/pre_network/ansible_host_config.yaml [moved from extraconfig/pre_network/ansible_host_config.ansible with 89% similarity]
extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
extraconfig/pre_network/host_config_and_reboot.yaml [new file with mode: 0644]
extraconfig/tasks/pacemaker_common_functions.sh
extraconfig/tasks/post_puppet_pacemaker.j2.yaml
extraconfig/tasks/yum_update.sh
net-config-bond.yaml
network/config/bond-with-vlans/ceph-storage.yaml
network/config/bond-with-vlans/cinder-storage.yaml
network/config/bond-with-vlans/compute-dpdk.yaml
network/config/bond-with-vlans/compute.yaml
network/config/bond-with-vlans/controller-no-external.yaml
network/config/bond-with-vlans/controller-v6.yaml
network/config/bond-with-vlans/controller.yaml
network/config/bond-with-vlans/swift-storage.yaml
network/endpoints/endpoint_data.yaml
network/endpoints/endpoint_map.yaml
network/ports/ctlplane_vip.yaml
network/ports/net_ip_list_map.yaml
network/scripts/run-os-net-config.sh
network/service_net_map.j2.yaml
overcloud-resource-registry-puppet.j2.yaml
overcloud.j2.yaml
plan-samples/README.rst [new file with mode: 0644]
plan-samples/plan-environment-derived-params.yaml [new file with mode: 0644]
puppet/all-nodes-config.yaml
puppet/blockstorage-role.yaml
puppet/cephstorage-role.yaml
puppet/compute-role.yaml
puppet/controller-role.yaml
puppet/deploy-artifacts.sh
puppet/major_upgrade_steps.j2.yaml
puppet/objectstorage-role.yaml
puppet/post.j2.yaml
puppet/puppet-steps.j2
puppet/role.role.j2.yaml
puppet/services/README.rst
puppet/services/aodh-base.yaml
puppet/services/barbican-api.yaml
puppet/services/ceilometer-base.yaml
puppet/services/certmonger-user.yaml
puppet/services/cinder-backend-dellsc.yaml
puppet/services/cinder-backend-netapp.yaml
puppet/services/cinder-base.yaml
puppet/services/cinder-volume.yaml
puppet/services/congress.yaml
puppet/services/database/mysql.yaml
puppet/services/database/redis.yaml
puppet/services/disabled/ceilometer-expirer-disabled.yaml
puppet/services/glance-api.yaml
puppet/services/gnocchi-base.yaml
puppet/services/haproxy.yaml
puppet/services/heat-base.yaml
puppet/services/horizon.yaml
puppet/services/ironic-api.yaml
puppet/services/ironic-base.yaml
puppet/services/ironic-conductor.yaml
puppet/services/ironic-inspector.yaml [new file with mode: 0644]
puppet/services/keystone.yaml
puppet/services/manila-base.yaml
puppet/services/mistral-base.yaml
puppet/services/neutron-base.yaml
puppet/services/neutron-linuxbridge-agent.yaml [new file with mode: 0644]
puppet/services/neutron-ovs-agent.yaml
puppet/services/neutron-ovs-dpdk-agent.yaml
puppet/services/neutron-sriov-agent.yaml
puppet/services/nova-api.yaml
puppet/services/nova-base.yaml
puppet/services/nova-compute.yaml
puppet/services/nova-scheduler.yaml
puppet/services/octavia-base.yaml
puppet/services/opendaylight-ovs.yaml
puppet/services/openvswitch-upgrade.yaml [deleted file]
puppet/services/openvswitch.yaml [new file with mode: 0644]
puppet/services/ovn-dbs.yaml
puppet/services/pacemaker/database/mysql.yaml
puppet/services/pacemaker/ovn-dbs.yaml [new file with mode: 0644]
puppet/services/pacemaker_remote.yaml
puppet/services/panko-api.yaml
puppet/services/panko-base.yaml
puppet/services/sahara-base.yaml
puppet/services/swift-proxy.yaml
puppet/services/tacker.yaml
puppet/services/zaqar.yaml
releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml [new file with mode: 0644]
releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml [new file with mode: 0644]
releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml [new file with mode: 0644]
releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml [new file with mode: 0644]
releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml [new file with mode: 0644]
releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml [new file with mode: 0644]
releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml [new file with mode: 0644]
releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml [new file with mode: 0644]
releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml [new file with mode: 0644]
releasenotes/notes/debug_per_service-54a260917c4a7e3a.yaml [new file with mode: 0644]
releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml [new file with mode: 0644]
releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml [new file with mode: 0644]
releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml [new file with mode: 0644]
releasenotes/notes/example-roles-d27c748090f6a154.yaml [new file with mode: 0644]
releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml [new file with mode: 0644]
releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml [new file with mode: 0644]
releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml [new file with mode: 0644]
releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml [new file with mode: 0644]
releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml [new file with mode: 0644]
releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml [new file with mode: 0644]
releasenotes/notes/ovn-ha-c0139ac519680872.yaml [new file with mode: 0644]
releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml [new file with mode: 0644]
releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml [new file with mode: 0644]
releasenotes/notes/remove-ceilometer-cron-85362e197ba245a0.yaml [new file with mode: 0644]
releasenotes/notes/server-blacklist-support-370c1a1f15a28a41.yaml [new file with mode: 0644]
releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml [new file with mode: 0644]
releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml [new file with mode: 0644]
releasenotes/notes/update-metric-delay-default-963d073026e2cc15.yaml [new file with mode: 0644]
releasenotes/notes/vhost_default_dir-cac327a0ac05df90.yaml [new file with mode: 0644]
releasenotes/notes/vipmap-output-4a9ce99930960346.yaml [new file with mode: 0644]
releasenotes/source/conf.py
roles/BlockStorage.yaml [new file with mode: 0644]
roles/CephStorage.yaml [new file with mode: 0644]
roles/Compute.yaml [new file with mode: 0644]
roles/Controller.yaml [new file with mode: 0644]
roles/ControllerOpenstack.yaml [new file with mode: 0644]
roles/Database.yaml [new file with mode: 0644]
roles/IronicConductor.yaml [new file with mode: 0644]
roles/Messaging.yaml [new file with mode: 0644]
roles/Networker.yaml [new file with mode: 0644]
roles/ObjectStorage.yaml [new file with mode: 0644]
roles/README.rst [new file with mode: 0644]
roles/Telemetry.yaml [new file with mode: 0644]
roles/Undercloud.yaml [new file with mode: 0644]
roles_data.yaml
roles_data_undercloud.yaml
sample-env-generator/README.rst [new file with mode: 0644]
sample-env-generator/networking.yaml [new file with mode: 0644]
sample-env-generator/predictable-placement.yaml [new file with mode: 0644]
sample-env-generator/ssl.yaml [new file with mode: 0644]
sample-env-generator/storage.yaml [new file with mode: 0644]
services.yaml [moved from puppet/services/services.yaml with 84% similarity]
test-requirements.txt
tools/yaml-validate.py
tox.ini
tripleo_heat_templates/__init__.py [new file with mode: 0644]
tripleo_heat_templates/environment_generator.py [new file with mode: 0755]
tripleo_heat_templates/tests/__init__.py [new file with mode: 0644]
tripleo_heat_templates/tests/test_environment_generator.py [new file with mode: 0644]

index cea6064..2d06721 100644 (file)
@@ -22,8 +22,10 @@ lib64
 pip-log.txt
 
 # Unit test / coverage reports
+cover
 .coverage
 .tox
+.testrepository
 nosetests.xml
 
 # Translations
diff --git a/.testr.conf b/.testr.conf
new file mode 100644 (file)
index 0000000..5837838
--- /dev/null
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./tripleo_heat_templates ./tripleo_heat_templates $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
index 6a753c0..988a0d8 100644 (file)
@@ -54,6 +54,9 @@ A description of the directory layout in TripleO Heat Templates.
  * validation-scripts: validation scripts useful to all deployment
                        configurations
 
+ * roles: example roles that can be used with the tripleoclient to generate
+          a roles_data.yaml for a deployment See the
+          `roles/README.rst <roles/README.rst>`_ for additional details.
 
 Service testing matrix
 ----------------------
diff --git a/ci/environments/README.rst b/ci/environments/README.rst
new file mode 100644 (file)
index 0000000..4a3cb9d
--- /dev/null
@@ -0,0 +1,4 @@
+This directory contains environments that are used in tripleo-ci.  They may change from
+release to release or within a release, and should not be relied upon in a production
+environment.  The top-level ``environments`` directory in tripleo-heat-templates
+contains the production-ready environment files.
diff --git a/ci/environments/ceph-min-osds.yaml b/ci/environments/ceph-min-osds.yaml
new file mode 100644 (file)
index 0000000..4e72d31
--- /dev/null
@@ -0,0 +1,2 @@
+parameter_defaults:
+  CephPoolDefaultSize: 1
index 20e37e3..102787a 100644 (file)
@@ -52,6 +52,8 @@ parameter_defaults:
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::Horizon
+    - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::Sshd
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
diff --git a/ci/environments/scenario001-multinode-containers.yaml b/ci/environments/scenario001-multinode-containers.yaml
new file mode 100644 (file)
index 0000000..c142922
--- /dev/null
@@ -0,0 +1,134 @@
+# NOTE: This is an environment specific for containers CI. Mainly we
+# deploy non-pacemakerized overcloud. Once we are able to deploy and
+# upgrade pacemakerized and containerized overcloud, we should remove
+# this file and use normal CI multinode environments/scenarios.
+
+resource_registry:
+  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
+  OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
+  OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml
+  OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
+  OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml
+  OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml
+  OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml
+  OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
+  OS::TripleO::Services::SensuClient: ../../puppet/services/monitoring/sensu-client.yaml
+  # NOTE: This is needed because of upgrades from Ocata to Pike. We
+  # deploy the initial environment with Ocata templates, and
+  # overcloud-resource-registry.yaml there doesn't have this Docker
+  # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
+  # remove this.
+  OS::TripleO::Services::Docker: OS::Heat::None
+
+parameter_defaults:
+  ControllerServices:
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::HeatApi
+    - OS::TripleO::Services::HeatApiCfn
+    - OS::TripleO::Services::HeatApiCloudwatch
+    - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronMetadataAgent
+    - OS::TripleO::Services::NeutronServer
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::HAproxy
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::Redis
+    - OS::TripleO::Services::AodhApi
+    - OS::TripleO::Services::AodhEvaluator
+    - OS::TripleO::Services::AodhNotifier
+    - OS::TripleO::Services::AodhListener
+    - OS::TripleO::Services::CeilometerAgentCentral
+    - OS::TripleO::Services::CeilometerAgentIpmi
+    - OS::TripleO::Services::CeilometerAgentNotification
+    - OS::TripleO::Services::GnocchiApi
+    - OS::TripleO::Services::GnocchiMetricd
+    - OS::TripleO::Services::GnocchiStatsd
+    - OS::TripleO::Services::PankoApi
+    - OS::TripleO::Services::CephMon
+    - OS::TripleO::Services::CephOSD
+    - OS::TripleO::Services::CephClient
+    - OS::TripleO::Services::CinderApi
+    - OS::TripleO::Services::CinderBackup
+    - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Tacker
+    - OS::TripleO::Services::Congress
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::SensuClient
+
+  ControllerExtraConfig:
+    nova::compute::libvirt::services::libvirt_virt_type: qemu
+    nova::compute::libvirt::libvirt_virt_type: qemu
+  Debug: true
+  #NOTE(gfidente): not great but we need this to deploy on ext4
+  #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+  ExtraConfig:
+    ceph::profile::params::osd_max_object_name_len: 256
+    ceph::profile::params::osd_max_object_namespace_len: 64
+  #NOTE: These ID's and keys should be regenerated for
+  # a production deployment. What is here is suitable for
+  # developer and CI testing only.
+  CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
+  CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
+  CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
+  CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+  NovaEnableRbdBackend: true
+  CinderEnableRbdBackend: true
+  CinderBackupBackend: ceph
+  GlanceBackend: rbd
+  GnocchiBackend: rbd
+  CinderEnableIscsiBackend: false
+  BannerText: |
+    ******************************************************************
+    * This system is for the use of authorized users only. Usage of  *
+    * this system may be monitored and recorded by system personnel. *
+    * Anyone using this system expressly consents to such monitoring *
+    * and is advised that if such monitoring reveals possible        *
+    * evidence of criminal activity, system personnel may provide    *
+    * the evidence from such monitoring to law enforcement officials.*
+    ******************************************************************
+  CollectdExtraPlugins:
+    - rrdtool
+  LoggingServers:
+    - host: 127.0.0.1
+      port: 24224
+  MonitoringRabbitHost: 127.0.0.1
+  MonitoringRabbitPort: 5676
+  MonitoringRabbitPassword: sensu
+  TtyValues:
+    - console
+    - tty1
+    - tty2
+    - tty3
+    - tty4
+    - tty5
+    - tty6
@@ -1,12 +1,15 @@
-# NOTE: This is an environment specific for containers upgrade
-# CI. Mainly we deploy non-pacemakerized overcloud, as at the time
-# being containerization of services managed by pacemaker is not
-# complete, so we deploy and upgrade the non-HA services for now.
+# NOTE: This is an environment specific for containers CI. Mainly we
+# deploy non-pacemakerized overcloud. Once we are able to deploy and
+# upgrade pacemakerized and containerized overcloud, we should remove
+# this file and use normal CI multinode environments/scenarios.
 
 resource_registry:
-  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
-  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
-
+  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  # TODO: Barbican is not yet containerized: https://review.openstack.org/#/c/474327
+  # OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
+  OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
+  OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
   # NOTE: This is needed because of upgrades from Ocata to Pike. We
   # deploy the initial environment with Ocata templates, and
   # overcloud-resource-registry.yaml there doesn't have this Docker
@@ -16,11 +19,6 @@ resource_registry:
 
 parameter_defaults:
   ControllerServices:
-    - OS::TripleO::Services::CephMon
-    - OS::TripleO::Services::CephOSD
-    - OS::TripleO::Services::CinderApi
-    - OS::TripleO::Services::CinderScheduler
-    - OS::TripleO::Services::CinderVolume
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
@@ -48,23 +46,26 @@ parameter_defaults:
     - OS::TripleO::Services::NovaMetadata
     - OS::TripleO::Services::NovaScheduler
     - OS::TripleO::Services::Ntp
-    - OS::TripleO::Services::SwiftProxy
-    - OS::TripleO::Services::SwiftStorage
-    - OS::TripleO::Services::SwiftRingBuilder
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Timezone
-    - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::CinderApi
+    - OS::TripleO::Services::CinderBackup
+    - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::SwiftStorage
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::BarbicanApi
+    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::Zaqar
+    - OS::TripleO::Services::Ec2Api
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::Sshd
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
-    # Required for Centos 7.3 and Qemu 2.6.0
-    nova::compute::libvirt::libvirt_cpu_mode: 'none'
-    #NOTE(gfidente): not great but we need this to deploy on ext4
-    #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
-    ceph::profile::params::osd_max_object_name_len: 256
-    ceph::profile::params::osd_max_object_namespace_len: 64
-  SwiftCeilometerPipelineEnabled: False
-  Debug: True
+  Debug: true
+  SwiftCeilometerPipelineEnabled: false
diff --git a/ci/environments/scenario003-multinode-containers.yaml b/ci/environments/scenario003-multinode-containers.yaml
new file mode 100644 (file)
index 0000000..7b917ae
--- /dev/null
@@ -0,0 +1,69 @@
+# NOTE: This is an environment specific for containers CI. Mainly we
+# deploy non-pacemakerized overcloud. Once we are able to deploy and
+# upgrade pacemakerized and containerized overcloud, we should remove
+# this file and use normal CI multinode environments/scenarios.
+
+resource_registry:
+  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+  OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
+  OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+  OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+  OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
+  # NOTE: This is needed because of upgrades from Ocata to Pike. We
+  # deploy the initial environment with Ocata templates, and
+  # overcloud-resource-registry.yaml there doesn't have this Docker
+  # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
+  # remove this.
+  OS::TripleO::Services::Docker: OS::Heat::None
+
+parameter_defaults:
+  ControllerServices:
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::HeatApi
+    - OS::TripleO::Services::HeatApiCfn
+    - OS::TripleO::Services::HeatApiCloudwatch
+    - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronMetadataAgent
+    - OS::TripleO::Services::NeutronServer
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::HAproxy
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::SaharaApi
+    - OS::TripleO::Services::SaharaEngine
+    - OS::TripleO::Services::MistralApi
+    - OS::TripleO::Services::MistralEngine
+    - OS::TripleO::Services::MistralExecutor
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::Sshd
+  ControllerExtraConfig:
+    nova::compute::libvirt::services::libvirt_virt_type: qemu
+    nova::compute::libvirt::libvirt_virt_type: qemu
+  Debug: true
+  # we don't deploy Swift so we switch to file backend.
+  GlanceBackend: 'file'
+  KeystoneTokenProvider: 'fernet'
+  SwiftCeilometerPipelineEnabled: false
diff --git a/ci/environments/scenario004-multinode-containers.yaml b/ci/environments/scenario004-multinode-containers.yaml
new file mode 100644 (file)
index 0000000..1d6d591
--- /dev/null
@@ -0,0 +1,95 @@
+# NOTE: This is an environment specific for containers CI. Mainly we
+# deploy non-pacemakerized overcloud. Once we are able to deploy and
+# upgrade pacemakerized and containerized overcloud, we should remove
+# this file and use normal CI multinode environments/scenarios.
+
+resource_registry:
+  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/
+  OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml
+  OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
+  OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
+  OS::TripleO::Services::CephRgw: ../../puppet/services/ceph-rgw.yaml
+  OS::TripleO::Services::SwiftProxy: OS::Heat::None
+  OS::TripleO::Services::SwiftStorage: OS::Heat::None
+  OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+  OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+  OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
+  # NOTE: being containerized here: https://review.openstack.org/#/c/471527/
+  OS::TripleO::Services::ManilaShare: ../../puppet/services/manila-share.yaml
+  OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+  # TODO: containerize NeutronBgpVpnApi
+  OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
+  # NOTE: This is needed because of upgrades from Ocata to Pike. We
+  # deploy the initial environment with Ocata templates, and
+  # overcloud-resource-registry.yaml there doesn't have this Docker
+  # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
+  # remove this.
+  OS::TripleO::Services::Docker: OS::Heat::None
+
+
+parameter_defaults:
+  ControllerServices:
+    - OS::TripleO::Services::CephMds
+    - OS::TripleO::Services::CephMon
+    - OS::TripleO::Services::CephOSD
+    - OS::TripleO::Services::CephRgw
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::HeatApi
+    - OS::TripleO::Services::HeatApiCfn
+    - OS::TripleO::Services::HeatApiCloudwatch
+    - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronBgpVpnApi
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronMetadataAgent
+    - OS::TripleO::Services::NeutronServer
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::HAproxy
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::ManilaApi
+    - OS::TripleO::Services::ManilaScheduler
+    - OS::TripleO::Services::ManilaBackendCephFs
+    - OS::TripleO::Services::ManilaShare
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::Sshd
+  ControllerExtraConfig:
+    nova::compute::libvirt::services::libvirt_virt_type: qemu
+    nova::compute::libvirt::libvirt_virt_type: qemu
+  Debug: true
+  #NOTE(gfidente): not great but we need this to deploy on ext4
+  #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+  ExtraConfig:
+    ceph::profile::params::osd_max_object_name_len: 256
+    ceph::profile::params::osd_max_object_namespace_len: 64
+  #NOTE: These ID's and keys should be regenerated for
+  # a production deployment. What is here is suitable for
+  # developer and CI testing only.
+  CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
+  CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
+  CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
+  CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+  SwiftCeilometerPipelineEnabled: false
+  NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+  BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
diff --git a/common/README b/common/README
new file mode 100644 (file)
index 0000000..6a52311
--- /dev/null
@@ -0,0 +1 @@
+This will contain some common templates but it needs to be added to the RPM spec first
index 0847bfb..16deb7d 100644 (file)
@@ -44,6 +44,9 @@ parameters:
       Command or script snippet to run on all overcloud nodes to
       initialize the upgrade process. E.g. a repository switch.
     default: ''
+  deployment_swift_data:
+    type: json
+    default: {}
 
 resources:
   deployed-server:
@@ -51,6 +54,7 @@ resources:
     properties:
       name: {get_param: name}
       software_config_transport: {get_param: software_config_transport}
+      deployment_swift_data: {get_param: deployment_swift_data}
 
   UpgradeInitConfig:
     type: OS::Heat::SoftwareConfig
@@ -133,3 +137,5 @@ outputs:
         - {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
   name:
     value: {get_attr: [HostsEntryDeployment, hostname]}
+  os_collect_config:
+    value: {get_attr: [deployed-server, os_collect_config]}
diff --git a/docker/deploy-steps-playbook.yaml b/docker/deploy-steps-playbook.yaml
new file mode 100644 (file)
index 0000000..87587a4
--- /dev/null
@@ -0,0 +1,51 @@
+- hosts: localhost
+  connection: local
+  tasks:
+    #####################################################
+    # Per step puppet configuration of the baremetal host
+    #####################################################
+    - name: Write the config_step hieradata
+      copy: content="{{dict(step=step|int)|to_json}}" dest=/etc/puppet/hieradata/config_step.json force=true
+    - name: Run puppet host configuration for step {{step}}
+      # FIXME: modulepath requires ansible 2.4, our builds currently only have 2.3
+      # puppet: manifest=/var/lib/tripleo-config/puppet_step_config.pp modulepath=/etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
+      puppet: manifest=/var/lib/tripleo-config/puppet_step_config.pp
+    ######################################
+    # Generate config via docker-puppet.py
+    ######################################
+    - name: Run docker-puppet tasks (generate config)
+      shell: python /var/lib/docker-puppet/docker-puppet.py
+      environment:
+        NET_HOST: 'true'
+        DEBUG: '{{docker_puppet_debug}}'
+      when: step == "1"
+      changed_when: false
+      check_mode: no
+    ##################################################
+    # Per step starting of the containers using paunch
+    ##################################################
+    - name: Check if /var/lib/hashed-tripleo-config/docker-container-startup-config-step_{{step}}.json exists
+      stat:
+        path: /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json
+      register: docker_config_json
+    # Note docker-puppet.py generates the hashed-*.json file, which is a copy of
+    # the *step_n.json with a hash of the generated external config added
+    # This acts as a salt to enable restarting the container if config changes
+    - name: Start containers for step {{step}}
+      command: paunch --debug apply --file /var/lib/tripleo-config/hashed-docker-container-startup-config-step_{{step}}.json --config-id tripleo_step{{step}} --managed-by tripleo-{{role_name}}
+      when: docker_config_json.stat.exists
+      changed_when: false
+      check_mode: no
+    ########################################################
+    # Bootstrap tasks, only performed on bootstrap_server_id
+    ########################################################
+    - name: Run docker-puppet tasks (bootstrap tasks)
+      shell: python /var/lib/docker-puppet/docker-puppet.py
+      environment:
+        CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
+        NET_HOST: "true"
+        NO_ARCHIVE: "true"
+        STEP: "{{step}}"
+      when: deploy_server_id == bootstrap_server_id
+      changed_when: false
+      check_mode: no
index 49dd00c..430aa88 100755 (executable)
 # that can be used to generate config files or run ad-hoc puppet modules
 # inside of a container.
 
+import glob
 import json
 import logging
 import os
+import sys
 import subprocess
 import sys
 import tempfile
 import multiprocessing
 
 log = logging.getLogger()
-log.setLevel(logging.DEBUG)
 ch = logging.StreamHandler(sys.stdout)
-ch.setLevel(logging.DEBUG)
+if os.environ.get('DEBUG', False):
+    log.setLevel(logging.DEBUG)
+    ch.setLevel(logging.DEBUG)
+else:
+    log.setLevel(logging.INFO)
+    ch.setLevel(logging.INFO)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 ch.setFormatter(formatter)
 log.addHandler(ch)
@@ -55,6 +61,28 @@ def pull_image(name):
         log.debug(cmd_stderr)
 
 
+def match_config_volume(prefix, config):
+    # Match the mounted config volume - we can't just use the
+    # key as e.g "novacomute" consumes config-data/nova
+    volumes = config.get('volumes', [])
+    config_volume=None
+    for v in volumes:
+        if v.startswith(prefix):
+            config_volume =  os.path.relpath(
+                v.split(":")[0], prefix).split("/")[0]
+            break
+    return config_volume
+
+
+def get_config_hash(prefix, config_volume):
+    hashfile = os.path.join(prefix, "%s.md5sum" % config_volume)
+    hash_data = None
+    if os.path.isfile(hashfile):
+        with open(hashfile) as f:
+            hash_data = f.read().rstrip()
+    return hash_data
+
+
 def rm_container(name):
     if os.environ.get('SHOW_DIFF', None):
         log.info('Diffing container: %s' % name)
@@ -121,11 +149,11 @@ for service in (json_data or []):
     if not manifest or not config_image:
         continue
 
-    log.debug('config_volume %s' % config_volume)
-    log.debug('puppet_tags %s' % puppet_tags)
-    log.debug('manifest %s' % manifest)
-    log.debug('config_image %s' % config_image)
-    log.debug('volumes %s' % volumes)
+    log.info('config_volume %s' % config_volume)
+    log.info('puppet_tags %s' % puppet_tags)
+    log.info('manifest %s' % manifest)
+    log.info('config_image %s' % config_image)
+    log.info('volumes %s' % volumes)
     # We key off of config volume for all configs.
     if config_volume in configs:
         # Append puppet tags and manifest.
@@ -166,33 +194,34 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
         if [ -n "$PUPPET_TAGS" ]; then
             TAGS="--tags \"$PUPPET_TAGS\""
         fi
+
+        # workaround LP1696283
+        mkdir -p /etc/ssh
+        touch /etc/ssh/ssh_known_hosts
+
         FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
 
         # Disables archiving
         if [ -z "$NO_ARCHIVE" ]; then
-            rm -Rf /var/lib/config-data/${NAME}
-
-            # copying etc should be enough for most services
-            mkdir -p /var/lib/config-data/${NAME}/etc
-            cp -a /etc/* /var/lib/config-data/${NAME}/etc/
-
-            if [ -d /root/ ]; then
-              cp -a /root/ /var/lib/config-data/${NAME}/root/
-            fi
-            if [ -d /var/lib/ironic/tftpboot/ ]; then
-              mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
-              cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/tftpboot/
-            fi
-            if [ -d /var/lib/ironic/httpboot/ ]; then
-              mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
-              cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/httpboot/
-            fi
-
-            # apache services may files placed in /var/www/
-            if [ -d /var/www/ ]; then
-             mkdir -p /var/lib/config-data/${NAME}/var/www
-             cp -a /var/www/* /var/lib/config-data/${NAME}/var/www/
-            fi
+            archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
+            rsync_srcs=""
+            for d in "${archivedirs[@]}"; do
+                if [ -d "$d" ]; then
+                    rsync_srcs+=" $d"
+                fi
+            done
+            rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
+
+            # Also make a copy of files modified during puppet run
+            # This is useful for debugging
+            mkdir -p /var/lib/config-data/puppet-generated/${NAME}
+            rsync -a -R -0 --delay-updates --delete-after \
+                          --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \
+                          / /var/lib/config-data/puppet-generated/${NAME}
+
+            # Write a checksum of the config-data dir, this is used as a
+            # salt to trigger container restart when the config changes
+            tar cf - /var/lib/config-data/${NAME} | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
         fi
         """)
 
@@ -247,13 +276,17 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
         subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE, env=env)
         cmd_stdout, cmd_stderr = subproc.communicate()
-        if cmd_stdout:
-            log.debug(cmd_stdout)
-        if cmd_stderr:
-            log.debug(cmd_stderr)
         if subproc.returncode != 0:
             log.error('Failed running docker-puppet.py for %s' % config_volume)
+            if cmd_stdout:
+                log.error(cmd_stdout)
+            if cmd_stderr:
+                log.error(cmd_stderr)
         else:
+            if cmd_stdout:
+                log.debug(cmd_stdout)
+            if cmd_stderr:
+                log.debug(cmd_stderr)
             # only delete successful runs, for debugging
             rm_container('docker-puppet-%s' % config_volume)
         return subproc.returncode
@@ -293,5 +326,30 @@ for returncode, config_volume in zip(returncodes, config_volumes):
         log.error('ERROR configuring %s' % config_volume)
         success = False
 
+
+# Update the startup configs with the config hash we generated above
+config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data')
+log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix)
+startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
+log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs)
+infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
+for infile in infiles:
+    with open(infile) as f:
+        infile_data = json.load(f)
+
+    for k, v in infile_data.iteritems():
+        config_volume = match_config_volume(config_volume_prefix, v)
+        if config_volume:
+            config_hash = get_config_hash(config_volume_prefix, config_volume)
+            if config_hash:
+                env = v.get('environment', [])
+                env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash)
+                log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash))
+                infile_data[k]['environment'] = env
+
+    outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile))
+    with open(outfile, 'w') as out_f:
+        json.dump(infile_data, out_f)
+
 if not success:
     sys.exit(1)
index 86811b8..73a3cb7 100644 (file)
@@ -21,6 +21,9 @@ parameters:
   servers:
     type: json
     description: Mapping of Role name e.g Controller to a list of servers
+  stack_name:
+    type: string
+    description: Name of the topmost stack
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
@@ -35,6 +38,25 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  DockerPuppetDebug:
+    type: string
+    default: ''
+    description: Set to True to enable debug logging with docker-puppet.py
+  ctlplane_service_ips:
+    type: json
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+  WorkflowTasks_Step{{step}}_Enabled:
+    or:
+    {% for role in roles %}
+      - not:
+          equals:
+            - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+            - ''
+      - False
+    {% endfor %}
+{% endfor %}
 
 resources:
 
@@ -55,39 +77,66 @@ resources:
               step_{{step}}: {}
 {%- endfor %}
 
-# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
-{% for step in range(1, deploy_steps_max) %}
-
-  {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
+  RoleConfig:
     type: OS::Heat::SoftwareConfig
     properties:
-      group: script
-      config: {get_file: docker-puppet.py}
+      group: ansible
+      options:
+        modulepath: /usr/share/ansible-modules
       inputs:
-        - name: CONFIG
-        - name: NET_HOST
-        - name: NO_ARCHIVE
-        - name: STEP
+        - name: step
+        - name: role_name
+        - name: update_identifier
+        - name: bootstrap_server_id
+        - name: docker_puppet_debug
+      config: {get_file: deploy-steps-playbook.yaml}
 
-  {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
-    type: OS::Heat::SoftwareDeployment
+{%- for step in range(1, deploy_steps_max) %}
+# BEGIN service_workflow_tasks handling
+  WorkflowTasks_Step{{step}}:
+    type: OS::Mistral::Workflow
+    condition: WorkflowTasks_Step{{step}}_Enabled
     depends_on:
-      {% for dep in roles %}
-      - {{dep.name}}Deployment_Step{{step}}
-      - {{dep.name}}ContainersDeployment_Step{{step}}
-      {% endfor %}
+    {% if step == 1 %}
+    {% for dep in roles %}
+      - {{dep.name}}PreConfig
+      - {{dep.name}}ArtifactsDeploy
+    {% endfor %}
+    {% else %}
+    {% for dep in roles %}
+      - {{dep.name}}Deployment_Step{{step -1}}
+    {% endfor %}
+    {% endif %}
     properties:
-      name: {{primary_role_name}}DockerPuppetTasksDeployment{{step}}
-      server: {get_param: [servers, {{primary_role_name}}, '0']}
-      config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
-      input_values:
-        CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
-        NET_HOST: 'true'
-        NO_ARCHIVE: 'true'
-        STEP: {{step}}
+      name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+      type: direct
+      tasks:
+        yaql:
+          expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+          data:
+          {% for role in roles %}
+            - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+          {% endfor %}
 
+  WorkflowTasks_Step{{step}}_Execution:
+    type: OS::Mistral::ExternalResource
+    condition: WorkflowTasks_Step{{step}}_Enabled
+    depends_on: WorkflowTasks_Step{{step}}
+    properties:
+      actions:
+        CREATE:
+          workflow: { get_resource: WorkflowTasks_Step{{step}} }
+          params:
+            env:
+              service_ips: { get_param: ctlplane_service_ips }
+        UPDATE:
+          workflow: { get_resource: WorkflowTasks_Step{{step}} }
+          params:
+            env:
+              service_ips: { get_param: ctlplane_service_ips }
+      always_update: true
+# END service_workflow_tasks handling
 {% endfor %}
-# END primary_role_name docker-puppet-tasks
 
 {% for role in roles %}
   # Post deployment steps for all roles
@@ -122,6 +171,7 @@ resources:
                   docker_startup_configs: {get_attr: [{{role.name}}DockerConfig, value]}
                   kolla_config: {get_param: [role_data, {{role.name}}, kolla_config]}
                   bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+                  puppet_step_config: {get_attr: [{{role.name}}PuppetStepConfig, value]}
                 tasks:
                   # Join host_prep_tasks with the other per-host configuration
                   yaql:
@@ -130,9 +180,11 @@ resources:
                       host_prep_tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
                       template_tasks:
 {%- raw %}
-                        # This is where we stack puppet configuration (for now)...
-                        - name: Create /var/lib/config-data
-                          file: path=/var/lib/config-data state=directory
+                        # Write the manifest for baremetal puppet configuration
+                        - name: Create /var/lib/tripleo-config directory
+                          file: path=/var/lib/tripleo-config state=directory
+                        - name: Write the puppet step_config manifest
+                          copy: content="{{puppet_step_config}}" dest=/var/lib/tripleo-config/puppet_step_config.pp force=yes
                         # This is the docker-puppet configs end in
                         - name: Create /var/lib/docker-puppet
                           file: path=/var/lib/docker-puppet state=directory
@@ -145,8 +197,13 @@ resources:
                         # Here we are dumping all the docker container startup configuration data
                         # so that we can have access to how they are started outside of heat
                         # and docker-cmd.  This lets us create command line tools to test containers.
+                        # FIXME do we need the docker-container-startup-configs.json or is the new per-step
+                        # data consumed by paunch enough?
                         - name: Write docker-container-startup-configs
                           copy: content="{{docker_startup_configs | to_json}}" dest=/var/lib/docker-container-startup-configs.json force=yes
+                        - name: Write per-step docker-container-startup-configs
+                          copy: content="{{item.value|to_json}}" dest="/var/lib/tripleo-config/docker-container-startup-config-{{item.key}}.json" force=yes
+                          with_dict: "{{docker_startup_configs}}"
                         - name: Create /var/lib/kolla/config_files directory
                           file: path=/var/lib/kolla/config_files state=directory
                         - name: Write kolla config json files
@@ -167,24 +224,6 @@ resources:
       servers: {get_param: [servers, {{role.name}}]}
       config: {get_resource: {{role.name}}HostPrepConfig}
 
-  {{role.name}}GenerateConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: {get_file: docker-puppet.py}
-      inputs:
-        - name: NET_HOST
-
-  {{role.name}}GenerateConfigDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: [{{role.name}}ArtifactsDeploy, {{role.name}}HostPrepDeployment]
-    properties:
-      name: {{role.name}}GenerateConfigDeployment
-      servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}GenerateConfig}
-      input_values:
-        NET_HOST: 'true'
-
   {{role.name}}PuppetStepConfig:
     type: OS::Heat::Value
     properties:
@@ -212,103 +251,76 @@ resources:
             service_names: {get_param: [role_data, {{role.name}}, service_names]}
             docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
 
-  # BEGIN BAREMETAL CONFIG STEPS
+  # BEGIN CONFIG STEPS
 
   {{role.name}}PreConfig:
     type: OS::TripleO::Tasks::{{role.name}}PreConfig
+    depends_on: {{role.name}}HostPrepDeployment
     properties:
       servers: {get_param: [servers, {{role.name}}]}
       input_values:
         update_identifier: {get_param: DeployIdentifier}
 
-  {{role.name}}Config:
-    type: OS::TripleO::{{role.name}}Config
-    properties:
-      StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
-
   {% for step in range(1, deploy_steps_max) %}
 
   {{role.name}}Deployment_Step{{step}}:
     type: OS::Heat::StructuredDeploymentGroup
-  {% if step == 1 %}
-    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
-  {% else %}
     depends_on:
-      {% for dep in roles %}
+      - WorkflowTasks_Step{{step}}_Execution
+    # TODO(gfidente): the following if/else condition
+    # replicates what is already defined for the
+    # WorkflowTasks_StepX resource and can be remove
+    # if https://bugs.launchpad.net/heat/+bug/1700569
+    # is fixed.
+    {% if step == 1 %}
+    {% for dep in roles %}
+      - {{dep.name}}PreConfig
+      - {{dep.name}}ArtifactsDeploy
+    {% endfor %}
+    {% else %}
+    {% for dep in roles %}
       - {{dep.name}}Deployment_Step{{step -1}}
-      - {{dep.name}}ContainersDeployment_Step{{step -1}}
-      {% endfor %}
-      - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
-  {% endif %}
+    {% endfor %}
+    {% endif %}
     properties:
       name: {{role.name}}Deployment_Step{{step}}
       servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}Config}
+      config: {get_resource: RoleConfig}
       input_values:
         step: {{step}}
+        role_name: {{role.name}}
         update_identifier: {get_param: DeployIdentifier}
+        bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+        docker_puppet_debug: {get_param: DockerPuppetDebug}
 
   {% endfor %}
-  # END BAREMETAL CONFIG STEPS
-
-  # BEGIN CONTAINER CONFIG STEPS
-  {% for step in range(1, deploy_steps_max) %}
-
-  {{role.name}}ContainersConfig_Step{{step}}:
-    type: OS::Heat::StructuredConfig
-    properties:
-      group: docker-cmd
-      config:
-        {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
+  # END CONFIG STEPS
 
-  {{role.name}}ContainersDeployment_Step{{step}}:
-    type: OS::Heat::StructuredDeploymentGroup
-  {% if step == 1 %}
-    depends_on:
-        {%- for dep in roles %}
-      - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
-        {%- endfor %}
-      - {{role.name}}PreConfig
-      - {{role.name}}HostPrepDeployment
-      - {{role.name}}GenerateConfigDeployment
-  {% else %}
+  # Note, this should be the last step to execute configuration changes.
+  # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+  # after all the previous deployment steps.
+  {{role.name}}ExtraConfigPost:
     depends_on:
-        {% for dep in roles %}
-        - {{dep.name}}ContainersDeployment_Step{{step -1}}
-        - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
-        - {{dep.name}}Deployment_Step{{step -1}}
-        {% endfor %}
-        - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
-  {% endif %}
-    properties:
-      name: {{role.name}}ContainersDeployment_Step{{step}}
-      servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
-
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step5
   {% endfor %}
-  # END CONTAINER CONFIG STEPS
+    type: OS::TripleO::NodeExtraConfigPost
+    properties:
+        servers: {get_param: [servers, {{role.name}}]}
 
+  # The {{role.name}}PostConfig steps are in charge of
+  # quiescing all services, i.e. in the Controller case,
+  # we should run a full service reload.
   {{role.name}}PostConfig:
     type: OS::TripleO::Tasks::{{role.name}}PostConfig
     depends_on:
   {% for dep in roles %}
-      - {{dep.name}}Deployment_Step5
-      - {{primary_role_name}}DockerPuppetTasksDeployment5
+      - {{dep.name}}ExtraConfigPost
   {% endfor %}
     properties:
       servers:  {get_param: servers}
       input_values:
         update_identifier: {get_param: DeployIdentifier}
 
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  {{role.name}}ExtraConfigPost:
-    depends_on:
-  {% for dep in roles %}
-      - {{dep.name}}PostConfig
-  {% endfor %}
-    type: OS::TripleO::NodeExtraConfigPost
-    properties:
-        servers: {get_param: [servers, {{role.name}}]}
 
 {% endfor %}
index 3132f8e..a1ffe34 100755 (executable)
@@ -80,6 +80,9 @@ def parse_opts(argv):
 
 def docker_arg_map(key, value):
     value = str(value).encode('ascii', 'ignore')
+    if len(value) == 0:
+        return ''
+
     return {
         'environment': "--env=%s" % value,
         # 'image': value,
index 4b061e1..41b036d 100644 (file)
@@ -4,6 +4,7 @@ parameters:
   DockerNamespace:
     type: string
     default: tripleoupstream
+    description: namespace
   DockerNamespaceIsRegistry:
     type: boolean
     default: false
index f802e4e..3be0f18 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-aodh-api:latest'
     type: string
+  DockerAodhConfigImage:
+    description: The container image to use for the aodh config_volume
+    default: 'centos-binary-aodh-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -73,12 +77,12 @@ outputs:
         config_volume: aodh
         puppet_tags: aodh_api_paste_ini,aodh_config
         step_config: *step_config
-        config_image: &aodh_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/aodh-api.json:
+        /var/lib/kolla/config_files/aodh_api.json:
           command: /usr/sbin/httpd -DFOREGROUND
           permissions:
             - path: /var/log/aodh
@@ -86,17 +90,19 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           aodh_init_log:
-            start_order: 0
-            image: *aodh_image
+            image: &aodh_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
             user: root
             volumes:
               - /var/log/containers/aodh:/var/log/aodh
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
+        step_3:
           aodh_db_sync:
-            start_order: 1
-            image: *aodh_image
+            image: *aodh_api_image
             net: host
             privileged: false
             detach: false
@@ -110,7 +116,7 @@ outputs:
             command: "/usr/bin/bootstrap_host_exec aodh_api su aodh -s /bin/bash -c /usr/bin/aodh-dbsync"
         step_4:
           aodh_api:
-            image: *aodh_image
+            image: *aodh_api_image
             net: host
             privileged: false
             restart: always
@@ -118,9 +124,11 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/aodh_api.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
-                  - /var/lib/config-data/aodh/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/aodh/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/aodh/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/aodh/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/lib/config-data/aodh/var/www/:/var/www/:ro
                   - /var/log/containers/aodh:/var/log/aodh
                   -
index 9d514d0..108a552 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-aodh-evaluator:latest'
     type: string
+  DockerAodhConfigImage:
+    description: The container image to use for the aodh config_volume
+    default: 'centos-binary-aodh-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,12 +69,12 @@ outputs:
         config_volume: aodh
         puppet_tags: aodh_config
         step_config: *step_config
-        config_image: &aodh_evaluator_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/aodh-evaluator.json:
+        /var/lib/kolla/config_files/aodh_evaluator.json:
           command: /usr/bin/aodh-evaluator
           permissions:
             - path: /var/log/aodh
@@ -79,7 +83,10 @@ outputs:
       docker_config:
         step_4:
           aodh_evaluator:
-            image: *aodh_evaluator_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
             net: host
             privileged: false
             restart: always
@@ -87,7 +94,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/aodh_evaluator.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
                   - /var/log/containers/aodh:/var/log/aodh
             environment:
index dac6108..d78af5b 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-aodh-listener:latest'
     type: string
+  DockerAodhConfigImage:
+    description: The container image to use for the aodh config_volume
+    default: 'centos-binary-aodh-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,12 +69,12 @@ outputs:
         config_volume: aodh
         puppet_tags: aodh_config
         step_config: *step_config
-        config_image: &aodh_listener_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/aodh-listener.json:
+        /var/lib/kolla/config_files/aodh_listener.json:
           command: /usr/bin/aodh-listener
           permissions:
             - path: /var/log/aodh
@@ -79,7 +83,10 @@ outputs:
       docker_config:
         step_4:
           aodh_listener:
-            image: *aodh_listener_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
             net: host
             privileged: false
             restart: always
@@ -87,7 +94,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/aodh_listener.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
                   - /var/log/containers/aodh:/var/log/aodh
             environment:
index a22ae85..abfb374 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-aodh-notifier:latest'
     type: string
+  DockerAodhConfigImage:
+    description: The container image to use for the aodh config_volume
+    default: 'centos-binary-aodh-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,12 +69,12 @@ outputs:
         config_volume: aodh
         puppet_tags: aodh_config
         step_config: *step_config
-        config_image: &aodh_notifier_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/aodh-notifier.json:
+        /var/lib/kolla/config_files/aodh_notifier.json:
           command: /usr/bin/aodh-notifier
           permissions:
             - path: /var/log/aodh
@@ -79,7 +83,10 @@ outputs:
       docker_config:
         step_4:
           aodh_notifier:
-            image: *aodh_notifier_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
             net: host
             privileged: false
             restart: always
@@ -87,7 +94,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/aodh_notifier.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
                   - /var/log/containers/aodh:/var/log/aodh
             environment:
index 94caded..af1f47a 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-ceilometer-central:latest'
     type: string
+  DockerCeilometerConfigImage:
+    description: The container image to use for the ceilometer config_volume
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,18 +67,21 @@ outputs:
         config_volume: ceilometer
         puppet_tags: ceilometer_config
         step_config: *step_config
-        config_image: &ceilometer_agent_central_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/ceilometer-agent-central.json:
+        /var/lib/kolla/config_files/ceilometer_agent_central.json:
           command: /usr/bin/ceilometer-polling --polling-namespaces central
       docker_config:
         step_3:
           ceilometer_init_log:
             start_order: 0
-            image: *ceilometer_agent_central_image
+            image: &ceilometer_agent_central_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
             user: root
             command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
             volumes:
@@ -89,7 +96,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/ceilometer-agent-central.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/ceilometer_agent_central.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
@@ -110,4 +117,4 @@ outputs:
       upgrade_tasks:
         - name: Stop and disable ceilometer agent central service
           tags: step2
-          service: name=openstack-ceilometer-agent-central state=stopped enabled=no
+          service: name=openstack-ceilometer-central state=stopped enabled=no
index 9033cf4..3cc440b 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-ceilometer-compute:latest'
     type: string
+  DockerCeilometerConfigImage:
+    description: The container image to use for the ceilometer config_volume
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,17 +67,20 @@ outputs:
         config_volume: ceilometer
         puppet_tags: ceilometer_config
         step_config: *step_config
-        config_image: &ceilometer_agent_compute_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/ceilometer-agent-compute.json:
+        /var/lib/kolla/config_files/ceilometer_agent_compute.json:
           command: /usr/bin/ceilometer-polling --polling-namespaces compute
       docker_config:
         step_4:
-          ceilometer_agent-compute:
-            image: *ceilometer_agent_compute_image
+          ceilometer_agent_compute:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
             net: host
             privileged: false
             restart: always
@@ -81,11 +88,12 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/ceilometer_agent_compute.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+                  - /var/run/libvirt:/var/run/libvirt:ro
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
       upgrade_tasks:
         - name: Stop and disable ceilometer-agent-compute service
           tags: step2
-          service: name=openstack-ceilometer-agent-compute state=stopped enabled=no
+          service: name=openstack-ceilometer-compute state=stopped enabled=no
diff --git a/docker/services/ceilometer-agent-ipmi.yaml b/docker/services/ceilometer-agent-ipmi.yaml
new file mode 100644 (file)
index 0000000..7d02939
--- /dev/null
@@ -0,0 +1,106 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Ceilometer Agent Ipmi service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCeilometerIpmiImage:
+    description: image
+    default: 'centos-binary-ceilometer-ipmi:latest'
+    type: string
+  DockerCeilometerConfigImage:
+    description: The container image to use for the ceilometer config_volume
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+   type: ./containers-common.yaml
+
+  CeilometerAgentIpmiBase:
+    type: ../../puppet/services/ceilometer-agent-ipmi.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Ceilometer Agent Ipmi role.
+    value:
+      service_name: {get_attr: [CeilometerAgentIpmiBase, role_data, service_name]}
+      config_settings: {get_attr: [CeilometerAgentIpmiBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CeilometerAgentIpmiBase, role_data, step_config]
+      service_config_settings: {get_attr: [CeilometerAgentIpmiBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: ceilometer
+        puppet_tags: ceilometer_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:
+          command: /usr/bin/ceilometer-polling --polling-namespaces ipmi
+      docker_config:
+        step_3:
+          ceilometer_init_log:
+            start_order: 0
+            image: &ceilometer_agent_ipmi_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerIpmiImage} ]
+            user: root
+            command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+            volumes:
+              - /var/log/containers/ceilometer:/var/log/ceilometer
+        step_4:
+          ceilometer_agent_ipmi:
+            image: *ceilometer_agent_ipmi_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      upgrade_tasks:
+        - name: Stop and disable ceilometer agent ipmi service
+          tags: step2
+          service: name=openstack-ceilometer-agent-ipmi state=stopped enabled=no
index 79df330..b2e85bb 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-ceilometer-notification:latest'
     type: string
+  DockerCeilometerConfigImage:
+    description: The container image to use for the ceilometer config_volume
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,24 +67,27 @@ outputs:
         config_volume: ceilometer
         puppet_tags: ceilometer_config
         step_config: *step_config
-        config_image: &ceilometer_agent_notification_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/ceilometer-agent-notification.json:
+        /var/lib/kolla/config_files/ceilometer_agent_notification.json:
           command: /usr/bin/ceilometer-agent-notification
       docker_config:
         step_3:
           ceilometer_init_log:
             start_order: 0
-            image: *ceilometer_agent_notification_image
+            image: &ceilometer_agent_notification_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
             user: root
             command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
             volumes:
               - /var/log/containers/ceilometer:/var/log/ceilometer
         step_4:
-          ceilometer_agent-notification:
+          ceilometer_agent_notification:
             image: *ceilometer_agent_notification_image
             net: host
             privileged: false
@@ -89,24 +96,10 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/ceilometer-agent-notification.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/ceilometer_agent_notification.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-        step_5:
-          ceilometer_gnocchi_upgrade:
-            start_order: 1
-            image: *ceilometer_agent_notification_image
-            net: host
-            detach: false
-            privileged: false
-            volumes:
-              list_concat:
-                - {get_attr: [ContainersCommon, volumes]}
-                -
-                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
-                  - /var/log/containers/ceilometer:/var/log/ceilometer
-            command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
       upgrade_tasks:
         - name: Stop and disable ceilometer agent notification service
           tags: step2
diff --git a/docker/services/cinder-api.yaml b/docker/services/cinder-api.yaml
new file mode 100644 (file)
index 0000000..6a5d74b
--- /dev/null
@@ -0,0 +1,155 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder API service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderApiImage:
+    description: image
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  DockerCinderConfigImage:
+    description: The container image to use for the cinder config_volume
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CinderBase:
+    type: ../../puppet/services/cinder-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder API role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CinderBase, role_data, step_config]
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_api.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_2:
+          cinder_api_init_logs:
+            image: &cinder_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderApiImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_3:
+          cinder_api_db_sync:
+            image: *cinder_api_image
+            net: host
+            privileged: false
+            detach: false
+            user: root
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/log/containers/cinder:/var/log/cinder
+            command:
+              - '/usr/bin/bootstrap_host_exec'
+              - 'cinder_api'
+              - "su cinder -s /bin/bash -c 'cinder-manage db sync'"
+        step_4:
+          cinder_api:
+            image: *cinder_api_image
+            net: host
+            privileged: false
+            restart: always
+            # NOTE(mandre) kolla image changes the user to 'cinder', we need it
+            # to be root to run httpd
+            user: root
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/cinder_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/lib/config-data/cinder/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/cinder/var/www/:/var/www/:ro
+                  - /var/log/containers/cinder:/var/log/cinder
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                      - ''
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                      - ''
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/cinder
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable cinder_api service
+          tags: step2
+          service: name=httpd state=stopped enabled=no
diff --git a/docker/services/cinder-backup.yaml b/docker/services/cinder-backup.yaml
new file mode 100644 (file)
index 0000000..2cde6f1
--- /dev/null
@@ -0,0 +1,131 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Backup service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderBackupImage:
+    description: image
+    default: 'centos-binary-cinder-backup:latest'
+    type: string
+  DockerCinderConfigImage:
+    description: The container image to use for the cinder config_volume
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CinderBase:
+    type: ../../puppet/services/cinder-backup.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Backup role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CinderBase, role_data, step_config]
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_backup.json:
+          command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/lib/cinder
+              owner: cinder:cinder
+              recurse: true
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_3:
+          cinder_backup_init_logs:
+            start_order: 0
+            image: &cinder_backup_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_4:
+          cinder_backup:
+            image: *cinder_backup_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/cinder_backup.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/lib/config-data/ceph/etc/ceph/:/etc/ceph/:ro #FIXME: we need to generate a ceph.conf with puppet for this
+                  - /dev/:/dev/
+                  - /run/:/run/
+                  - /sys:/sys
+                  - /lib/modules:/lib/modules:ro
+                  - /etc/iscsi:/etc/iscsi
+                  - /var/lib/cinder:/var/lib/cinder
+                  - /var/log/containers/cinder:/var/log/cinder
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/lib/cinder
+            - /var/log/containers/cinder
+      upgrade_tasks:
+        - name: Stop and disable cinder_backup service
+          tags: step2
+          service: name=openstack-cinder-backup state=stopped enabled=no
diff --git a/docker/services/cinder-scheduler.yaml b/docker/services/cinder-scheduler.yaml
new file mode 100644 (file)
index 0000000..bcf32b2
--- /dev/null
@@ -0,0 +1,119 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Scheduler service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderSchedulerImage:
+    description: image
+    default: 'centos-binary-cinder-scheduler:latest'
+    type: string
+  DockerCinderConfigImage:
+    description: The container image to use for the cinder config_volume
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CinderBase:
+    type: ../../puppet/services/cinder-scheduler.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Scheduler role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CinderBase, role_data, step_config]
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_scheduler.json:
+          command: /usr/bin/cinder-scheduler --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_2:
+          cinder_scheduler_init_logs:
+            image: &cinder_scheduler_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderSchedulerImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_4:
+          cinder_scheduler:
+            image: *cinder_scheduler_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/cinder_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/log/containers/cinder:/var/log/cinder
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/cinder
+      upgrade_tasks:
+        - name: Stop and disable cinder_scheduler service
+          tags: step2
+          service: name=openstack-cinder-scheduler state=stopped enabled=no
diff --git a/docker/services/cinder-volume.yaml b/docker/services/cinder-volume.yaml
new file mode 100644 (file)
index 0000000..5517384
--- /dev/null
@@ -0,0 +1,166 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Volume service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderVolumeImage:
+    description: image
+    default: 'centos-binary-cinder-volume:latest'
+    type: string
+  DockerCinderConfigImage:
+    description: The container image to use for the cinder config_volume
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  # custom parameters for the Cinder volume role
+  CinderEnableIscsiBackend:
+    default: true
+    description: Whether to enable or not the Iscsi backend for Cinder
+    type: boolean
+  CinderLVMLoopDeviceSize:
+    default: 10280
+    description: The size of the loopback file used by the cinder LVM driver.
+    type: number
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CinderBase:
+    type: ../../puppet/services/cinder-volume.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Volume role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CinderBase, role_data, step_config]
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_volume.json:
+          command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_3:
+          cinder_volume_init_logs:
+            start_order: 0
+            image: &cinder_volume_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderVolumeImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_4:
+          cinder_volume:
+            image: *cinder_volume_image
+            net: host
+            privileged: true
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/cinder_volume.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/lib/config-data/ceph/etc/ceph/:/etc/ceph/:ro #FIXME: we need to generate a ceph.conf with puppet for this
+                  - /dev/:/dev/
+                  - /run/:/run/
+                  - /sys:/sys
+                  - /etc/iscsi:/etc/iscsi
+                  - /var/lib/cinder:/var/lib/cinder
+                  - /var/log/containers/cinder:/var/log/cinder
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/cinder
+            - /var/lib/cinder
+        - name: cinder_enable_iscsi_backend fact
+          set_fact:
+            cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
+        - name: cinder create LVM volume group dd
+          command:
+            list_join:
+            - ''
+            - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
+              - str_replace:
+                  template: VALUE
+                  params:
+                    VALUE: {get_param: CinderLVMLoopDeviceSize}
+              - 'M'
+          args:
+            creates: /var/lib/cinder/cinder-volumes
+          when: cinder_enable_iscsi_backend
+        - name: cinder create LVM volume group
+          shell: |
+            if ! losetup /dev/loop2; then
+              losetup /dev/loop2 /var/lib/cinder/cinder-volumes
+            fi
+            if ! pvdisplay | grep cinder-volumes; then
+              pvcreate /dev/loop2
+            fi
+            if ! vgdisplay | grep cinder-volumes; then
+              vgcreate cinder-volumes /dev/loop2
+            fi
+          args:
+            executable: /bin/bash
+            creates: /dev/loop2
+          when: cinder_enable_iscsi_backend
+      upgrade_tasks:
+        - name: Stop and disable cinder_volume service
+          tags: step2
+          service: name=openstack-cinder-volume state=stopped enabled=no
diff --git a/docker/services/collectd.yaml b/docker/services/collectd.yaml
new file mode 100644 (file)
index 0000000..e674115
--- /dev/null
@@ -0,0 +1,115 @@
+heat_template_version: pike
+
+description: >
+  Containerized collectd service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCollectdImage:
+    description: image
+    default: 'centos-binary-collectd:latest'
+    type: string
+  DockerCollectdConfigImage:
+    description: The container image to use for the collectd config_volume
+    default: 'centos-binary-collectd:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CollectdBase:
+    type: ../../puppet/services/metrics/collectd.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the collectd role.
+    value:
+      service_name: {get_attr: [CollectdBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [CollectdBase, role_data, config_settings]
+          - tripleo::profile::base::metrics::collectd::enable_file_logging: true
+            collectd::plugin::logfile::log_file: /var/log/collectd/collectd.log
+      step_config: &step_config
+        get_attr: [CollectdBase, role_data, step_config]
+      service_config_settings: {get_attr: [CollectdBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: collectd
+        puppet_tags:  collectd_client_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCollectdConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/collectd.json:
+          command: /usr/sbin/collectd -f
+          permissions:
+            - path: /var/log/collectd
+              owner: collectd:collectd
+              recurse: true
+      docker_config:
+        step_3:
+          collectd:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCollectdImage} ]
+            net: host
+            privileged: true
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/run/docker.sock:/var/run/docker.sock:rw
+                  - /var/lib/kolla/config_files/collectd.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/collectd/etc/collectd.conf:/etc/collectd.conf:ro
+                  - /var/lib/config-data/collectd/etc/collectd.d:/etc/collectd.d:ro
+                  - /var/log/containers/collectd:/var/log/collectd:rw
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/collectd
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable collectd service
+          tags: step2
+          service: name=collectd.service state=stopped enabled=no
diff --git a/docker/services/congress-api.yaml b/docker/services/congress-api.yaml
new file mode 100644 (file)
index 0000000..52395d5
--- /dev/null
@@ -0,0 +1,134 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Congress API service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCongressApiImage:
+    description: image
+    default: 'centos-binary-congress-api:latest'
+    type: string
+  DockerCongressConfigImage:
+    description: The container image to use for the congress config_volume
+    default: 'centos-binary-congress-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CongressApiBase:
+    type: ../../puppet/services/congress.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Congress API role.
+    value:
+      service_name: {get_attr: [CongressApiBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [CongressApiBase, role_data, config_settings]
+      step_config: &step_config
+        get_attr: [CongressApiBase, role_data, step_config]
+      service_config_settings: {get_attr: [CongressApiBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: congress
+        puppet_tags: congress_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCongressConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/congress_api.json:
+          command: /usr/bin/congress-server --config-file=/etc/congress/congress.conf --log-file=/var/log/congress/api.log
+          permissions:
+            - path: /var/log/congress
+              owner: congress:congress
+              recurse: true
+      docker_config:
+        # db sync runs before permissions set by kolla_config
+        step_2:
+          congress_init_logs:
+            image: &congress_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCongressApiImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/congress:/var/log/congress
+            command: ['/bin/bash', '-c', 'chown -R congress:congress /var/log/congress']
+        step_3:
+          congress_db_sync:
+            image: *congress_api_image
+            net: host
+            privileged: false
+            detach: false
+            user: root
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/congress/etc/:/etc/:ro
+                  - /var/log/containers/congress:/var/log/congress
+            command: "/usr/bin/bootstrap_host_exec congress su congress -s /bin/bash -c 'congress-db-manage --config-file /etc/congress/congress.conf upgrade head'"
+        step_4:
+          congress_api:
+            start_order: 15
+            image: *congress_api_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/congress_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/congress/etc/congress/:/etc/congress/:ro
+                  - /var/log/containers/congress:/var/log/congress
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/congress
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable congress_api service
+          tags: step2
+          service: name=openstack-congress-server state=stopped enabled=no
index a9912a1..d104853 100644 (file)
@@ -3,18 +3,64 @@ heat_template_version: pike
 description: >
   Contains a static list of common things necessary for containers
 
+parameters:
+
+  # Required parameters
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
 outputs:
   volumes:
     description: Common volumes for the containers.
     value:
-      - /etc/hosts:/etc/hosts:ro
-      - /etc/localtime:/etc/localtime:ro
-      # required for bootstrap_host_exec
-      - /etc/puppet:/etc/puppet:ro
-      # OpenSSL trusted CAs
-      - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
-      - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
-      - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
-      - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
-      # Syslog socket
-      - /dev/log:/dev/log
+      list_concat:
+        - - /etc/hosts:/etc/hosts:ro
+          - /etc/localtime:/etc/localtime:ro
+          # required for bootstrap_host_exec
+          - /etc/puppet:/etc/puppet:ro
+          # OpenSSL trusted CAs
+          - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
+          - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
+          - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
+          - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
+          # Syslog socket
+          - /dev/log:/dev/log
+          - /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro
+        - if:
+          - internal_tls_enabled
+          - - {get_param: InternalTLSCAFile}
+          - null
index 96a02f9..7b620c5 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-mongodb:latest'
     type: string
+  DockerMongodbConfigImage:
+    description: The container image to use for the mongodb config_volume
+    default: 'centos-binary-mongodb:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,10 +69,10 @@ outputs:
         config_volume: mongodb
         puppet_tags: file # set this even though file is the default
         step_config: *step_config
-        config_image: &mongodb_image
+        config_image: &mongodb_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerMongodbConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/mongodb.json:
           command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run
@@ -82,12 +86,16 @@ outputs:
       docker_config:
         step_2:
           mongodb:
-            image: *mongodb_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
             net: host
             privileged: false
             volumes: &mongodb_volumes
               - /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json
-              - /var/lib/config-data/mongodb/etc/:/etc/:ro
+              - /var/lib/config-data/mongodb/etc/mongod.conf:/etc/mongod.conf:ro
+              - /var/lib/config-data/mongodb/etc/mongos.conf:/etc/mongos.conf:ro
               - /etc/localtime:/etc/localtime:ro
               - /var/log/containers/mongodb:/var/log/mongodb
               - /var/lib/mongodb:/var/lib/mongodb
@@ -99,7 +107,7 @@ outputs:
           config_volume: 'mongodb_init_tasks'
           puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset'
           step_config: 'include ::tripleo::profile::base::database::mongodb'
-          config_image: *mongodb_image
+          config_image: *mongodb_config_image
           volumes:
             - /var/lib/mongodb:/var/lib/mongodb
             - /var/log/containers/mongodb:/var/log/mongodb
diff --git a/docker/services/database/mysql-client.yaml b/docker/services/database/mysql-client.yaml
new file mode 100644 (file)
index 0000000..38a31e2
--- /dev/null
@@ -0,0 +1,66 @@
+heat_template_version: pike
+
+description: >
+  Configuration for containerized MySQL clients
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerMysqlClientConfigImage:
+    description: The container image to use for the mysql_client config_volume
+    default: 'centos-binary-mariadb:latest'
+    type: string
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
+
+outputs:
+  role_data:
+    description: Role for setting mysql client parameters
+    value:
+      service_name: mysql_client
+      config_settings:
+        tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
+        tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+        tripleo::profile::base::database::mysql::client::ssl_ca: {get_param: InternalTLSCAFile}
+      # BEGIN DOCKER SETTINGS #
+      step_config: ""
+      puppet_config:
+        config_volume: mysql_client
+        puppet_tags: file # set this even though file is the default
+        step_config: "include ::tripleo::profile::base::database::mysql::client"
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlClientConfigImage} ]
+      # no need for a docker config, this service only generates configuration files
+      docker_config: {}
index 73578e1..725b2b4 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-mariadb:latest'
     type: string
+  DockerMysqlConfigImage:
+    description: The container image to use for the mysql config_volume
+    default: 'centos-binary-mariadb:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -74,10 +78,10 @@ outputs:
         config_volume: mysql
         puppet_tags: file # set this even though file is the default
         step_config: *step_config
-        config_image: &mysql_image
+        config_image: &mysql_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/mysql.json:
           command: /usr/bin/mysqld_safe
@@ -87,17 +91,19 @@ outputs:
               recurse: true
       docker_config:
         # Kolla_bootstrap runs before permissions set by kolla_config
-        step_2:
+        step_1:
           mysql_init_logs:
-            start_order: 0
-            image: *mysql_image
+            image: &mysql_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
             privileged: false
             user: root
             volumes:
               - /var/log/containers/mysql:/var/log/mariadb
             command: ['/bin/bash', '-c', 'chown -R mysql:mysql /var/log/mariadb']
+        step_2:
           mysql_bootstrap:
-            start_order: 1
             detach: false
             image: *mysql_image
             net: host
@@ -105,7 +111,7 @@ outputs:
             command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start']
             volumes: &mysql_volumes
               - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json
-              - /var/lib/config-data/mysql/etc/:/etc/:ro
+              - /var/lib/config-data/mysql/etc/my.cnf.d:/etc/my.cnf.d:ro
               - /etc/localtime:/etc/localtime:ro
               - /etc/hosts:/etc/hosts:ro
               - /var/lib/mysql:/var/lib/mysql
@@ -140,7 +146,7 @@ outputs:
           config_volume: 'mysql_init_tasks'
           puppet_tags: 'mysql_database,mysql_grant,mysql_user'
           step_config: 'include ::tripleo::profile::base::database::mysql'
-          config_image: *mysql_image
+          config_image: *mysql_config_image
           volumes:
             - /var/lib/mysql:/var/lib/mysql/:ro
             - /var/log/containers/mysql:/var/log/mariadb
index 73df96c..0a490cd 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-redis:latest'
     type: string
+  DockerRedisConfigImage:
+    description: The container image to use for the redis config_volume
+    default: 'centos-binary-redis:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -64,10 +68,10 @@ outputs:
         # https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763
         puppet_tags: 'exec'
         step_config: *step_config
-        config_image: &redis_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerRedisConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/redis.json:
           command: /usr/bin/redis-server /etc/redis.conf
@@ -77,7 +81,20 @@ outputs:
               recurse: true
       docker_config:
         step_1:
+          redis_init_logs:
+            start_order: 0
+            detach: false
+            image: &redis_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/redis:/var/log/redis
+            command: ['/bin/bash', '-c', 'chown -R redis:redis /var/log/redis']
           redis:
+            start_order: 1
             image: *redis_image
             net: host
             privileged: false
@@ -85,16 +102,19 @@ outputs:
             volumes:
               - /run:/run
               - /var/lib/kolla/config_files/redis.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/redis/etc/:/etc/:ro
+              - /var/lib/config-data/redis/etc/redis.conf:/etc/redis.conf:ro
               - /etc/localtime:/etc/localtime:ro
-              - logs:/var/log/kolla
+              - /var/log/containers/redis:/var/log/redis
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
       host_prep_tasks:
-        - name: create /var/run/redis
+        - name: create persistent directories
           file:
-            path: /var/run/redis
+            path: "{{ item }}"
             state: directory
+          with_items:
+            - /var/log/containers/redis
+            - /var/run/redis
       upgrade_tasks:
         - name: Stop and disable redis service
           tags: step2
diff --git a/docker/services/ec2-api.yaml b/docker/services/ec2-api.yaml
new file mode 100644 (file)
index 0000000..0c65a90
--- /dev/null
@@ -0,0 +1,160 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized EC2 API service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerEc2ApiImage:
+    description: image
+    default: 'centos-binary-ec2-api:latest'
+    type: string
+  DockerEc2ApiConfigImage:
+    description: The container image to use for the ec2api config_volume
+    default: 'centos-binary-ec2-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  Ec2ApiPuppetBase:
+      type: ../../puppet/services/ec2-api.yaml
+      properties:
+        EndpointMap: {get_param: EndpointMap}
+        ServiceNetMap: {get_param: ServiceNetMap}
+        DefaultPasswords: {get_param: DefaultPasswords}
+        RoleName: {get_param: RoleName}
+        RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the EC2 API role.
+    value:
+      service_name: {get_attr: [Ec2ApiPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [Ec2ApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: ec2api
+        puppet_tags: ec2api_api_paste_ini,ec2api_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/ec2_api.json:
+          command: /usr/bin/ec2-api
+          permissions:
+            - path: /var/log/ec2api
+              owner: ec2api:ec2api
+              recurse: true
+        /var/lib/kolla/config_files/ec2_api_metadata.json:
+          command: /usr/bin/ec2-api-metadata
+          permissions:
+            - path: /var/log/ec2api # default log dir for metadata service as well
+              owner: ec2api:ec2api
+              recurse: true
+      docker_config:
+        # db sync runs before permissions set by kolla_config
+        step_2:
+          ec2_api_init_logs:
+            image: &ec2_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/ec2_api:/var/log/ec2api
+              # mount ec2_api_metadata to "ec2api-metadata" only here to fix
+              # permissions of both directories in one go
+              - /var/log/containers/ec2_api_metadata:/var/log/ec2api-metadata
+            command: ['/bin/bash', '-c', 'chown -R ec2api:ec2api /var/log/ec2api /var/log/ec2api-metadata']
+        step_3:
+          ec2_api_db_sync:
+            image: *ec2_api_image
+            net: host
+            detach: false
+            privileged: false
+            user: root
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+                  - /var/log/containers/ec2_api:/var/log/ec2api
+            command: "/usr/bin/bootstrap_host_exec ec2_api su ec2api -s /bin/bash -c '/usr/bin/ec2-api-manage db_sync'"
+        step_4:
+          ec2_api:
+            image: *ec2_api_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ec2_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+                  - /var/log/containers/ec2_api:/var/log/ec2api
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+          ec2_api_metadata:
+            image: *ec2_api_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ec2_api_metadata.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+                  - /var/log/containers/ec2_api_metadata:/var/log/ec2api
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent log directories
+          file:
+            path: /var/log/containers/{{ item }}
+            state: directory
+          with_items:
+            - ec2_api
+            - ec2_api_metadata
+      upgrade_tasks:
+        - name: Stop and disable EC2-API services
+          tags: step2
+          service: name={{ item }} state=stopped enabled=no
+          with_items:
+            - openstack-ec2-api
+            - openstack-ec2-api-metadata
index e5a7096..3c7c81b 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-etcd:latest'
     type: string
+  DockerEtcdConfigImage:
+    description: The container image to use for the etcd config_volume
+    default: 'centos-binary-etcd:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -67,10 +71,10 @@ outputs:
       puppet_config:
         config_volume: etcd
         step_config: *step_config
-        config_image: &etcd_image
+        config_image: &etcd_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerEtcdConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/etcd.json:
           command: /usr/bin/etcd --config-file /etc/etcd/etcd.yml
@@ -81,7 +85,10 @@ outputs:
       docker_config:
         step_2:
           etcd:
-            image: *etcd_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
             net: host
             privileged: false
             restart: always
@@ -98,9 +105,9 @@ outputs:
           config_volume: 'etcd_init_tasks'
           puppet_tags: 'etcd_key'
           step_config: 'include ::tripleo::profile::base::etcd'
-          config_image: *etcd_image
+          config_image: *etcd_config_image
           volumes:
-            - /var/lib/config-data/etcd/etc/:/etc
+            - /var/lib/config-data/etcd/etc/etcd/:/etc/etcd:ro
             - /var/lib/etcd:/var/lib/etcd:ro
       host_prep_tasks:
         - name: create /var/lib/etcd
index df8186d..4fadef9 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-glance-api:latest'
     type: string
+  DockerGlanceApiConfigImage:
+    description: The container image to use for the glance_api config_volume
+    default: 'centos-binary-glance-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -74,29 +78,31 @@ outputs:
         config_volume: glance_api
         puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
         step_config: *step_config
-        config_image: &glance_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/glance-api.json:
+        /var/lib/kolla/config_files/glance_api.json:
           command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
         /var/lib/kolla/config_files/glance_api_tls_proxy.json:
           command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
         # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           glance_init_logs:
-            start_order: 0
-            image: *glance_image
+            image: &glance_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
             privileged: false
             user: root
             volumes:
               - /var/log/containers/glance:/var/log/glance
             command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
+        step_3:
           glance_api_db_sync:
-            start_order: 1
-            image: *glance_image
+            image: *glance_api_image
             net: host
             privileged: false
             detach: false
@@ -105,7 +111,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/kolla/config_files/glance_api.json:/var/lib/kolla/config_files/config.json
                   - /var/lib/config-data/glance_api/etc/glance/:/etc/glance/:ro
                   - /var/log/containers/glance:/var/log/glance
             environment:
@@ -116,7 +122,7 @@ outputs:
           map_merge:
             - glance_api:
                 start_order: 2
-                image: *glance_image
+                image: *glance_api_image
                 net: host
                 privileged: false
                 restart: always
@@ -127,7 +133,7 @@ outputs:
                 - internal_tls_enabled
                 - glance_api_tls_proxy:
                     start_order: 2
-                    image: *glance_image
+                    image: *glance_api_image
                     net: host
                     user: root
                     restart: always
@@ -136,7 +142,9 @@ outputs:
                         - {get_attr: [ContainersCommon, volumes]}
                         -
                           - /var/lib/kolla/config_files/glance_api_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
-                          - /var/lib/config-data/glance_api/etc/httpd/:/etc/httpd/:ro
+                          - /var/lib/config-data/glance_api/etc/httpd/conf/:/etc/httpd/conf/:ro
+                          - /var/lib/config-data/glance_api/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                          - /var/lib/config-data/glance_api/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                           - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
                           - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
                     environment:
index e59d609..cf31d25 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-gnocchi-api:latest'
     type: string
+  DockerGnocchiConfigImage:
+    description: The container image to use for the gnocchi config_volume
+    default: 'centos-binary-gnocchi-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -73,12 +77,12 @@ outputs:
         config_volume: gnocchi
         puppet_tags: gnocchi_api_paste_ini,gnocchi_config
         step_config: *step_config
-        config_image: &gnocchi_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/gnocchi-api.json:
+        /var/lib/kolla/config_files/gnocchi_api.json:
           command: /usr/sbin/httpd -DFOREGROUND
           permissions:
             - path: /var/log/gnocchi
@@ -86,17 +90,19 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           gnocchi_init_log:
-            start_order: 0
-            image: *gnocchi_image
+            image: &gnocchi_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
             user: root
             volumes:
               - /var/log/containers/gnocchi:/var/log/gnocchi
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
+        step_3:
           gnocchi_db_sync:
-            start_order: 1
-            image: *gnocchi_image
+            image: *gnocchi_api_image
             net: host
             detach: false
             privileged: false
@@ -110,7 +116,7 @@ outputs:
             command: "/usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c '/usr/bin/gnocchi-upgrade --skip-storage'"
         step_4:
           gnocchi_api:
-            image: *gnocchi_image
+            image: *gnocchi_api_image
             net: host
             privileged: false
             restart: always
@@ -118,9 +124,11 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/gnocchi_api.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
-                  - /var/lib/config-data/gnocchi/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/gnocchi/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/gnocchi/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/gnocchi/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/lib/config-data/gnocchi/var/www/:/var/www/:ro
                   - /var/log/containers/gnocchi:/var/log/gnocchi
                   -
index 2724805..3a05d57 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-gnocchi-metricd:latest'
     type: string
+  DockerGnocchiConfigImage:
+    description: The container image to use for the gnocchi config_volume
+    default: 'centos-binary-gnocchi-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,12 +67,12 @@ outputs:
         config_volume: gnocchi
         puppet_tags: gnocchi_config
         step_config: *step_config
-        config_image: &gnocchi_metricd_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/gnocchi-metricd.json:
+        /var/lib/kolla/config_files/gnocchi_metricd.json:
           command: /usr/bin/gnocchi-metricd
           permissions:
             - path: /var/log/gnocchi
@@ -77,7 +81,10 @@ outputs:
       docker_config:
         step_4:
           gnocchi_metricd:
-            image: *gnocchi_metricd_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
             net: host
             privileged: false
             restart: always
@@ -85,7 +92,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/gnocchi_metricd.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
                   - /var/log/containers/gnocchi:/var/log/gnocchi
             environment:
index 305971f..c3523b5 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-gnocchi-statsd:latest'
     type: string
+  DockerGnocchiConfigImage:
+    description: The container image to use for the gnocchi config_volume
+    default: 'centos-binary-gnocchi-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,12 +67,12 @@ outputs:
         config_volume: gnocchi
         puppet_tags: gnocchi_config
         step_config: *step_config
-        config_image: &gnocchi_statsd_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/gnocchi-statsd.json:
+        /var/lib/kolla/config_files/gnocchi_statsd.json:
           command: /usr/bin/gnocchi-statsd
           permissions:
             - path: /var/log/gnocchi
@@ -77,7 +81,10 @@ outputs:
       docker_config:
         step_4:
           gnocchi_statsd:
-            image: *gnocchi_statsd_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
             net: host
             privileged: false
             restart: always
@@ -85,7 +92,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/gnocchi_statsd.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
                   - /var/log/containers/gnocchi:/var/log/gnocchi
             environment:
diff --git a/docker/services/haproxy.yaml b/docker/services/haproxy.yaml
new file mode 100644 (file)
index 0000000..242f075
--- /dev/null
@@ -0,0 +1,118 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized HAproxy service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerHAProxyImage:
+    description: image
+    default: 'centos-binary-haproxy:latest'
+    type: string
+  DockerHAProxyConfigImage:
+    description: The container image to use for the haproxy config_volume
+    default: 'centos-binary-haproxy:latest'
+    type: string
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  HAProxyStatsPassword:
+    description: Password for HAProxy stats endpoint
+    hidden: true
+    type: string
+  HAProxyStatsUser:
+    description: User for HAProxy stats endpoint
+    default: admin
+    type: string
+  HAProxySyslogAddress:
+    default: /dev/log
+    description: Syslog address where HAproxy will send its log
+    type: string
+  RedisPassword:
+    description: The password for Redis
+    type: string
+    hidden: true
+  MonitoringSubscriptionHaproxy:
+    default: 'overcloud-haproxy'
+    type: string
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  HAProxyBase:
+    type: ../../puppet/services/haproxy.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the HAproxy role.
+    value:
+      service_name: {get_attr: [HAProxyBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [HAProxyBase, role_data, config_settings]
+          - tripleo::haproxy::haproxy_daemon: false
+      step_config: &step_config
+        get_attr: [HAProxyBase, role_data, step_config]
+      service_config_settings: {get_attr: [HAProxyBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: haproxy
+        puppet_tags: haproxy_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/haproxy.json:
+          command: haproxy -f /etc/haproxy/haproxy.cfg
+      docker_config:
+        step_1:
+          haproxy:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/haproxy.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/haproxy/etc/:/etc/:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      metadata_settings:
+        get_attr: [HAProxyBase, role_data, metadata_settings]
index 37fa4c8..1905281 100644 (file)
@@ -13,8 +13,8 @@ parameters:
     default: 'centos-binary-heat-api-cfn:latest'
     type: string
   # puppet needs the heat-wsgi-api-cfn binary from centos-binary-heat-api-cfn
-  DockerHeatConfigImage:
-    description: image
+  DockerHeatApiCfnConfigImage:
+    description: The container image to use for the heat_api_cfn config_volume
     default: 'centos-binary-heat-api-cfn:latest'
     type: string
   EndpointMap:
@@ -81,7 +81,7 @@ outputs:
         config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/heat_api_cfn.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -108,7 +108,9 @@ outputs:
                 -
                   - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/heat_api_cfn/etc/heat/:/etc/heat/:ro
-                  - /var/lib/config-data/heat_api_cfn/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/heat_api_cfn/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/heat_api_cfn/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/heat_api_cfn/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/lib/config-data/heat_api_cfn/var/www/:/var/www/:ro
                   - /var/log/containers/heat:/var/log/heat
                   -
index 5043aed..c0cec93 100644 (file)
@@ -13,8 +13,8 @@ parameters:
     default: 'centos-binary-heat-api:latest'
     type: string
   # puppet needs the heat-wsgi-api binary from centos-binary-heat-api
-  DockerHeatConfigImage:
-    description: image
+  DockerHeatApiConfigImage:
+    description: The container image to use for the heat_api config_volume
     default: 'centos-binary-heat-api:latest'
     type: string
   EndpointMap:
@@ -81,7 +81,7 @@ outputs:
         config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/heat_api.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -108,7 +108,9 @@ outputs:
                 -
                   - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/heat_api/etc/heat/:/etc/heat/:ro
-                  - /var/lib/config-data/heat_api/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/heat_api/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/heat_api/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/heat_api/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/lib/config-data/heat_api/var/www/:/var/www/:ro
                   - /var/log/containers/heat:/var/log/heat
                   -
index 0adad53..676dbb1 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-heat-engine:latest'
     type: string
+  DockerHeatConfigImage:
+    description: The container image to use for the heat config_volume
+    default: 'centos-binary-heat-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -67,10 +71,10 @@ outputs:
         config_volume: heat
         puppet_tags: heat_config,file,concat,file_line
         step_config: *step_config
-        config_image: &heat_engine_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/heat_engine.json:
           command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
@@ -80,16 +84,18 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           heat_init_log:
-            start_order: 0
-            image: *heat_engine_image
+            image: &heat_engine_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
             user: root
             volumes:
               - /var/log/containers/heat:/var/log/heat
             command: ['/bin/bash', '-c', 'chown -R heat:heat /var/log/heat']
+        step_3:
           heat_engine_db_sync:
-            start_order: 1
             image: *heat_engine_image
             net: host
             privileged: false
diff --git a/docker/services/horizon.yaml b/docker/services/horizon.yaml
new file mode 100644 (file)
index 0000000..5797b20
--- /dev/null
@@ -0,0 +1,135 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Horizon service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerHorizonImage:
+    description: image
+    default: 'centos-binary-horizon:latest'
+    type: string
+  DockerHorizonConfigImage:
+    description: The container image to use for the horizon config_volume
+    default: 'centos-binary-horizon:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  HorizonBase:
+    type: ../../puppet/services/horizon.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Horizon API role.
+    value:
+      service_name: {get_attr: [HorizonBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [HorizonBase, role_data, config_settings]
+          - horizon::vhost_extra_params:
+              add_listen: true
+              priority: 10
+              access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
+              options: ['FollowSymLinks','MultiViews']
+          - horizon::secure_cookies: false
+      step_config: {get_attr: [HorizonBase, role_data, step_config]}
+      service_config_settings: {get_attr: [HorizonBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: horizon
+        puppet_tags: horizon_config
+        step_config: {get_attr: [HorizonBase, role_data, step_config]}
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerHorizonConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/horizon.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/horizon/
+              owner: apache:apache
+              recurse: true
+            # FIXME Apache tries to write a .lock file there
+            - path: /usr/share/openstack-dashboard/openstack_dashboard/local/
+              owner: apache:apache
+              recurse: false
+      docker_config:
+        step_2:
+          horizon_fix_perms:
+            image: &horizon_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHorizonImage} ]
+            user: root
+            # NOTE Set ownership for /var/log/horizon/horizon.log file here,
+            # otherwise it's created by root when generating django cache.
+            # FIXME Apache needs to read files in /etc/openstack-dashboard
+            # Need to set permissions to match the BM case,
+            # http://paste.openstack.org/show/609819/
+            command: ['/bin/bash', '-c', 'touch /var/log/horizon/horizon.log && chown -R apache:apache /var/log/horizon && chmod -R a+rx /etc/openstack-dashboard']
+            volumes:
+              - /var/log/containers/horizon:/var/log/horizon
+              - /var/lib/config-data/horizon/etc/:/etc/
+        step_3:
+          horizon:
+            image: *horizon_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/horizon.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/horizon/etc/httpd:/etc/httpd:ro
+                  - /var/lib/config-data/horizon/etc/openstack-dashboard:/etc/openstack-dashboard:ro
+                  - /var/log/containers/horizon:/var/log/horizon
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/horizon
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable horizon service (running under httpd)
+          tags: step2
+          service: name=httpd state=stopped enabled=no
+      metadata_settings:
+        get_attr: [HorizonBase, role_data, metadata_settings]
index c8978aa..183ed5c 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-ironic-api:latest'
     type: string
   DockerIronicConfigImage:
-    description: image
+    description: The container image to use for the ironic config_volume
     default: 'centos-binary-ironic-pxe:latest'
     type: string
   EndpointMap:
@@ -61,6 +61,7 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [IronicApiBase, role_data, config_settings]
+          - apache::default_vhost: false
       step_config: &step_config
         get_attr: [IronicApiBase, role_data, step_config]
       service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
@@ -75,17 +76,16 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/ironic_api.json:
-          command: /usr/bin/ironic-api
+          command: /usr/sbin/httpd -DFOREGROUND
           permissions:
             - path: /var/log/ironic
               owner: ironic:ironic
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           ironic_init_logs:
-            start_order: 0
-            image: &ironic_image
+            image: &ironic_api_image
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
@@ -94,9 +94,10 @@ outputs:
             volumes:
               - /var/log/containers/ironic:/var/log/ironic
             command: ['/bin/bash', '-c', 'chown -R ironic:ironic /var/log/ironic']
+        step_3:
           ironic_db_sync:
             start_order: 1
-            image: *ironic_image
+            image: *ironic_api_image
             net: host
             privileged: false
             detach: false
@@ -105,22 +106,26 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/config-data/ironic/etc/:/etc/:ro
+                  - /var/lib/config-data/ironic/etc/ironic:/etc/ironic:ro
                   - /var/log/containers/ironic:/var/log/ironic
             command: "/usr/bin/bootstrap_host_exec ironic_api su ironic -s /bin/bash -c 'ironic-dbsync --config-file /etc/ironic/ironic.conf'"
         step_4:
           ironic_api:
             start_order: 10
-            image: *ironic_image
+            image: *ironic_api_image
             net: host
-            privileged: false
+            user: root
             restart: always
             volumes:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
                   - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
-                  - /var/lib/config-data/ironic/etc/:/etc/:ro
+                  - /var/lib/config-data/ironic/etc/ironic:/etc/ironic:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
+                  - /var/lib/config-data/ironic/var/www/:/var/www/:ro
                   - /var/log/containers/ironic:/var/log/ironic
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
index 360eb66..f47a3e4 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-ironic-conductor:latest'
     type: string
   DockerIronicConfigImage:
-    description: image
+    description: The container image to use for the ironic config_volume
     default: 'centos-binary-ironic-pxe:latest'
     type: string
   EndpointMap:
index bc828e6..f518b9d 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-ironic-pxe:latest'
     type: string
   DockerIronicConfigImage:
-    description: image
+    description: The container image to use for the ironic config_volume
     default: 'centos-binary-ironic-pxe:latest'
     type: string
   EndpointMap:
@@ -113,7 +113,9 @@ outputs:
                 -
                   - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
-                  - /var/lib/config-data/ironic/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/ironic/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/lib/config-data/ironic/var/www/:/var/www/:ro
                   - /var/lib/ironic:/var/lib/ironic/
                   - /var/log/containers/ironic:/var/log/ironic
diff --git a/docker/services/iscsid.yaml b/docker/services/iscsid.yaml
new file mode 100644 (file)
index 0000000..86f2d3b
--- /dev/null
@@ -0,0 +1,116 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Iscsid service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerIscsidImage:
+    description: image
+    default: 'centos-binary-iscsid:latest'
+    type: string
+  DockerIscsidConfigImage:
+    description: The container image to use for the iscsid config_volume
+    default: 'centos-binary-iscsid:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+outputs:
+  role_data:
+    description: Role data for the Iscsid API role.
+    value:
+      service_name: iscsid
+      config_settings: {}
+      step_config: ''
+      service_config_settings: {}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: iscsid
+        #puppet_tags: file
+        step_config: ''
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerIscsidConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/iscsid.json:
+          command: /usr/sbin/iscsid -f
+      docker_config:
+        step_3:
+          iscsid:
+            start_order: 2
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerIscsidImage} ]
+            net: host
+            privileged: true
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro
+                  - /dev/:/dev/
+                  - /run/:/run/
+                  - /sys:/sys
+                  - /lib/modules:/lib/modules:ro
+                  - /etc/iscsi:/etc/iscsi
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /etc/iscsi
+          file:
+            path: /etc/iscsi
+            state: directory
+        - name: stat /lib/systemd/system/iscsid.socket
+          stat: path=/lib/systemd/system/iscsid.socket
+          register: stat_iscsid_socket
+        - name: Stop and disable iscsid.socket service
+          service: name=iscsid.socket state=stopped enabled=no
+          when: stat_iscsid_socket.stat.exists
+      upgrade_tasks:
+        - name: stat /lib/systemd/system/iscsid.service
+          stat: path=/lib/systemd/system/iscsid.service
+          register: stat_iscsid_service
+        - name: Stop and disable iscsid service
+          tags: step2
+          service: name=iscsid state=stopped enabled=no
+          when: stat_iscsid_service.stat.exists
+        - name: stat /lib/systemd/system/iscsid.socket
+          stat: path=/lib/systemd/system/iscsid.socket
+          register: stat_iscsid_socket
+        - name: Stop and disable iscsid.socket service
+          tags: step2
+          service: name=iscsid.socket state=stopped enabled=no
+          when: stat_iscsid_socket.stat.exists
+      metadata_settings: {}
index 772859e..011ffaa 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-keystone:latest'
     type: string
+  DockerKeystoneConfigImage:
+    description: The container image to use for the keystone config_volume
+    default: 'centos-binary-keystone:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -86,25 +90,27 @@ outputs:
         config_volume: keystone
         puppet_tags: keystone_config
         step_config: *step_config
-        config_image: &keystone_image
+        config_image: &keystone_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/keystone.json:
           command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
         # Kolla_bootstrap/db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           keystone_init_log:
-            start_order: 0
-            image: *keystone_image
+            image: &keystone_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
             user: root
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
             volumes:
               - /var/log/containers/keystone:/var/log/keystone
+        step_3:
           keystone_db_sync:
-            start_order: 1
             image: *keystone_image
             net: host
             privileged: false
@@ -116,7 +122,9 @@ outputs:
                   - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/keystone/var/www/:/var/www/:ro
                   - /var/lib/config-data/keystone/etc/keystone/:/etc/keystone/:ro
-                  - /var/lib/config-data/keystone/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/keystone/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/keystone/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/keystone/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/log/containers/keystone:/var/log/keystone
                   -
                     if:
@@ -152,7 +160,7 @@ outputs:
           config_volume: 'keystone_init_tasks'
           puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
           step_config: 'include ::tripleo::profile::base::keystone'
-          config_image: *keystone_image
+          config_image: *keystone_config_image
       host_prep_tasks:
         - name: create persistent logs directory
           file:
diff --git a/docker/services/manila-api.yaml b/docker/services/manila-api.yaml
new file mode 100644 (file)
index 0000000..66dc6c3
--- /dev/null
@@ -0,0 +1,125 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Manila API service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerManilaApiImage:
+    description: image
+    default: 'centos-binary-manila-api:latest'
+    type: string
+  DockerManilaConfigImage:
+    description: The container image to use for the manila config_volume
+    default: 'centos-binary-manila-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  ManilaApiPuppetBase:
+    type: ../../puppet/services/manila-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Manila API role.
+    value:
+      service_name: {get_attr: [ManilaApiPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [ManilaApiPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        {get_attr: [ManilaApiPuppetBase, role_data, step_config]}
+      service_config_settings: {get_attr: [ManilaApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: manila
+        puppet_tags: manila_config,manila_api_paste_ini
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerManilaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/manila_api.json:
+          command: /usr/bin/manila-api --config-file /usr/share/manila/manila-dist.conf --config-file /etc/manila/manila.conf
+          permissions:
+            - path: /var/log/manila
+              owner: manila:manila
+              recurse: true
+      docker_config:
+        step_2:
+          manila_init_logs:
+            image: &manila_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerManilaApiImage} ]
+            user: root
+            volumes:
+              - /var/log/containers/manila:/var/log/manila
+            command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R manila:manila /var/log/manila']
+        step_3:
+          manila_api_db_sync:
+            user: root
+            image: *manila_api_image
+            net: host
+            detach: false
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+                  - /var/log/containers/manila:/var/log/manila
+            command: "/usr/bin/bootstrap_host_exec manila_api su manila -s /bin/bash -c '/usr/bin/manila-manage db sync'"
+        step_4:
+          manila_api:
+            image: *manila_api_image
+            net: host
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/manila_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+                  - /var/log/containers/manila:/var/log/manila
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: Create persistent manila logs directory
+          file:
+            path: /var/log/containers/manila
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable manila_api service
+          tags: step2
+          service: name=openstack-manila-api state=stopped enabled=no
diff --git a/docker/services/manila-scheduler.yaml b/docker/services/manila-scheduler.yaml
new file mode 100644 (file)
index 0000000..d4170e4
--- /dev/null
@@ -0,0 +1,105 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Manila Scheduler service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerManilaSchedulerImage:
+    description: image
+    default: 'centos-binary-manila-scheduler:latest'
+    type: string
+  DockerManilaConfigImage:
+    description: The container image to use for the manila config_volume
+    default: 'centos-binary-manila-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  ManilaSchedulerPuppetBase:
+    type: ../../puppet/services/manila-scheduler.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Manila Scheduler role.
+    value:
+      service_name: {get_attr: [ManilaSchedulerPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [ManilaSchedulerPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        {get_attr: [ManilaSchedulerPuppetBase, role_data, step_config]}
+      service_config_settings: {get_attr: [ManilaSchedulerPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: manila
+        puppet_tags: manila_config,manila_scheduler_paste_ini
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerManilaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/manila_scheduler.json:
+          command: /usr/bin/manila-scheduler --config-file /usr/share/manila/manila-dist.conf --config-file /etc/manila/manila.conf
+          permissions:
+            - path: /var/log/manila
+              owner: manila:manila
+              recurse: true
+      docker_config:
+        step_4:
+          manila_scheduler:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerManilaSchedulerImage} ]
+            net: host
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/manila_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+                  - /var/log/containers/manila:/var/log/manila
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: Create persistent manila logs directory
+          file:
+            path: /var/log/containers/manila
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable manila_scheduler service
+          tags: step2
+          service: name=openstack-manila-scheduler state=stopped enabled=no
index d85a087..3d41c17 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-memcached:latest'
     type: string
+  DockerMemcachedConfigImage:
+    description: The container image to use for the memcached config_volume
+    default: 'centos-binary-memcached:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,16 +67,20 @@ outputs:
         config_volume: 'memcached'
         puppet_tags: 'file'
         step_config: *step_config
-        config_image: &memcached_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedConfigImage} ]
       kolla_config: {}
       docker_config:
         step_1:
           memcached_init_logs:
             start_order: 0
-            image: *memcached_image
+            detach: false
+            image: &memcached_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
             privileged: false
             user: root
             volumes:
index 5586d41..f128428 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-mistral-api:latest'
     type: string
   DockerMistralConfigImage:
-    description: image
+    description: The container image to use for the mistral config_volume
     default: 'centos-binary-mistral-api:latest'
     type: string
   EndpointMap:
@@ -82,10 +82,9 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           mistral_init_logs:
-            start_order: 0
-            image: &mistral_image
+            image: &mistral_api_image
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
@@ -94,9 +93,10 @@ outputs:
             volumes:
               - /var/log/containers/mistral:/var/log/mistral
             command: ['/bin/bash', '-c', 'chown -R mistral:mistral /var/log/mistral']
+        step_3:
           mistral_db_sync:
-            start_order: 1
-            image: *mistral_image
+            start_order: 0
+            image: *mistral_api_image
             net: host
             privileged: false
             detach: false
@@ -105,12 +105,12 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/config-data/mistral/etc/:/etc/:ro
+                  - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
                   - /var/log/containers/mistral:/var/log/mistral
             command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head'"
           mistral_db_populate:
-            start_order: 2
-            image: *mistral_image
+            start_order: 1
+            image: *mistral_api_image
             net: host
             privileged: false
             detach: false
@@ -119,7 +119,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/config-data/mistral/etc/:/etc/:ro
+                  - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
                   - /var/log/containers/mistral:/var/log/mistral
             # NOTE: dprince this requires that we install openstack-tripleo-common into
             # the Mistral API image so that we get tripleo* actions
@@ -127,7 +127,7 @@ outputs:
         step_4:
           mistral_api:
             start_order: 15
-            image: *mistral_image
+            image: *mistral_api_image
             net: host
             privileged: false
             restart: always
index d60d847..712f4ba 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-mistral-engine:latest'
     type: string
   DockerMistralConfigImage:
-    description: image
+    description: The container image to use for the mistral config_volume
     default: 'centos-binary-mistral-api:latest'
     type: string
   EndpointMap:
index 76ae052..5a35ba9 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-mistral-executor:latest'
     type: string
   DockerMistralConfigImage:
-    description: image
+    description: The container image to use for the mistral config_volume
     default: 'centos-binary-mistral-api:latest'
     type: string
   EndpointMap:
diff --git a/docker/services/multipathd.yaml b/docker/services/multipathd.yaml
new file mode 100644 (file)
index 0000000..61b0557
--- /dev/null
@@ -0,0 +1,96 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Multipathd service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerMultipathdImage:
+    description: image
+    default: 'centos-binary-multipathd:latest'
+    type: string
+  DockerMultipathdConfigImage:
+    description: The container image to use for the multipathd config_volume
+    default: 'centos-binary-multipathd:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+outputs:
+  role_data:
+    description: Role data for the Multipathd API role.
+    value:
+      service_name: multipathd
+      config_settings: {}
+      step_config: ''
+      service_config_settings: {}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: multipathd
+        #puppet_tags: file
+        step_config: ''
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/multipathd.json:
+          command: /usr/sbin/multipathd -d
+      docker_config:
+        step_3:
+          multipathd:
+            start_order: 1
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdImage} ]
+            net: host
+            privileged: true
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro
+                  - /dev/:/dev/
+                  - /run/:/run/
+                  - /sys:/sys
+                  - /lib/modules:/lib/modules:ro
+                  - /etc/iscsi:/etc/iscsi
+                  - /var/lib/cinder:/var/lib/cinder
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+      upgrade_tasks:
+        - name: Stop and disable multipathd service
+          tags: step2
+          service: name=multipathd state=stopped enabled=no
+      metadata_settings: {}
index 7ce47a1..ad8e87f 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-neutron-server:latest'
     type: string
-  # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   EndpointMap:
@@ -92,9 +91,8 @@ outputs:
           command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           neutron_init_logs:
-            start_order: 0
             image: &neutron_api_image
               list_join:
                 - '/'
@@ -104,8 +102,8 @@ outputs:
             volumes:
               - /var/log/containers/neutron:/var/log/neutron
             command: ['/bin/bash', '-c', 'chown -R neutron:neutron /var/log/neutron']
+        step_3:
           neutron_db_sync:
-            start_order: 1
             image: *neutron_api_image
             net: host
             privileged: false
@@ -150,7 +148,9 @@ outputs:
                         - {get_attr: [ContainersCommon, volumes]}
                         -
                           - /var/lib/kolla/config_files/neutron_server_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
-                          - /var/lib/config-data/neutron/etc/httpd/:/etc/httpd/:ro
+                          - /var/lib/config-data/neutron/etc/httpd/conf/:/etc/httpd/conf/:ro
+                          - /var/lib/config-data/neutron/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                          - /var/lib/config-data/neutron/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                           - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
                           - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
                     environment:
index d14f525..a2bd07f 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-neutron-dhcp-agent:latest'
     type: string
-  # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   EndpointMap:
index 97901bc..f7d0d03 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-neutron-l3-agent:latest'
     type: string
-  # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   ServiceNetMap:
@@ -71,7 +70,7 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/neutron-l3-agent.json:
+        /var/lib/kolla/config_files/neutron_l3_agent.json:
           command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
           permissions:
             - path: /var/log/neutron
@@ -79,7 +78,7 @@ outputs:
               recurse: true
       docker_config:
         step_4:
-          neutronl3agent:
+          neutron_l3_agent:
             image:
               list_join:
                 - '/'
@@ -92,7 +91,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/neutron_l3_agent.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
                   - /lib/modules:/lib/modules:ro
                   - /run:/run
@@ -104,3 +103,8 @@ outputs:
           file:
             path: /var/log/containers/neutron
             state: directory
+      upgrade_tasks:
+        - name: Stop and disable neutron_l3 service
+          tags: step2
+          service: name=neutron-l3-agent state=stopped enabled=no
+
index 88b2ca5..493b97b 100644 (file)
@@ -12,9 +12,8 @@ parameters:
     description: image
     default: 'centos-binary-neutron-metadata-agent:latest'
     type: string
-  # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   ServiceNetMap:
@@ -71,7 +70,7 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/neutron-metadata-agent.json:
+        /var/lib/kolla/config_files/neutron_metadata_agent.json:
           command: /usr/bin/neutron-metadata-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-metadata-agent
           permissions:
             - path: /var/log/neutron
@@ -92,7 +91,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/neutron-metadata-agent.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/neutron_metadata_agent.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
                   - /lib/modules:/lib/modules:ro
                   - /run:/run
index 89bf866..80de2cc 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-neutron-openvswitch-agent:latest'
     type: string
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   ServiceNetMap:
@@ -70,7 +70,7 @@ outputs:
           - '/'
           - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/neutron-openvswitch-agent.json:
+        /var/lib/kolla/config_files/neutron_ovs_agent.json:
           command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
           permissions:
             - path: /var/log/neutron
@@ -78,8 +78,8 @@ outputs:
               recurse: true
       docker_config:
         step_4:
-          neutronovsagent:
-            image: &neutron_ovs_agent_image
+          neutron_ovs_agent:
+            image:
               list_join:
               - '/'
               - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
@@ -91,7 +91,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/neutron_ovs_agent.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
                   - /lib/modules:/lib/modules:ro
                   - /run:/run
index 1739a5b..7211014 100644 (file)
@@ -20,7 +20,7 @@ parameters:
     default: 'tripleoupstream'
     type: string
   DockerNeutronConfigImage:
-    description: image
+    description: The container image to use for the neutron config_volume
     default: 'centos-binary-neutron-server:latest'
     type: string
   DefaultPasswords:
index 2375dad..5d410fb 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-nova-api:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   EndpointMap:
@@ -62,6 +62,9 @@ outputs:
         map_merge:
           - get_attr: [NovaApiBase, role_data, config_settings]
           - apache::default_vhost: false
+            nova_wsgi_enabled: false
+            nova::api::service_name: '%{::nova::params::api_service_name}'
+            nova::wsgi::apache_api::ssl: false
       step_config: &step_config
         list_join:
           - "\n"
@@ -86,9 +89,8 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           nova_init_logs:
-            start_order: 0
             image: &nova_api_image
               list_join:
                 - '/'
@@ -98,8 +100,9 @@ outputs:
             volumes:
               - /var/log/containers/nova:/var/log/nova
             command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
+        step_3:
           nova_api_db_sync:
-            start_order: 1
+            start_order: 0
             image: *nova_api_image
             net: host
             detach: false
@@ -116,7 +119,7 @@ outputs:
           # to be capable of upgrading a baremetal setup. This is to ensure the name
           # of the cell is 'default'
           nova_api_map_cell0:
-            start_order: 2
+            start_order: 1
             image: *nova_api_image
             net: host
             detach: false
@@ -124,7 +127,7 @@ outputs:
             volumes: *nova_api_volumes
             command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 map_cell0'"
           nova_api_create_default_cell:
-            start_order: 3
+            start_order: 2
             image: *nova_api_image
             net: host
             detach: false
@@ -136,7 +139,7 @@ outputs:
             user: root
             command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 create_cell --name=default'"
           nova_db_sync:
-            start_order: 4
+            start_order: 3
             image: *nova_api_image
             net: host
             detach: false
index 4f10a1a..1277a8f 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-nova-compute:latest'
     type: string
+  DockerNovaLibvirtConfigImage:
+    description: The container image to use for the nova_libvirt config_volume
+    default: 'centos-binary-nova-compute:latest'
+    type: string
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -69,12 +73,12 @@ outputs:
         config_volume: nova_libvirt
         puppet_tags: nova_config,nova_paste_api_ini
         step_config: *step_config
-        config_image: &nova_compute_image
+        config_image:
           list_join:
           - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/nova-compute.json:
+        /var/lib/kolla/config_files/nova_compute.json:
           command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
           permissions:
             - path: /var/log/nova
@@ -86,17 +90,20 @@ outputs:
       docker_config:
         # FIXME: run discover hosts here
         step_4:
-          novacompute:
-            image: *nova_compute_image
+          nova_compute:
+            image: &nova_compute_image
+              list_join:
+              - '/'
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
             net: host
             privileged: true
-            user: root
+            user: nova
             restart: always
             volumes:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/nova_compute.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/nova_libvirt/etc/nova/:/etc/nova/:ro
                   - /dev:/dev
                   - /etc/iscsi:/etc/iscsi
index 131355d..266180c 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-nova-conductor:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   EndpointMap:
@@ -82,7 +82,7 @@ outputs:
       docker_config:
         step_4:
           nova_conductor:
-            image: &nova_conductor_image
+            image:
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
diff --git a/docker/services/nova-consoleauth.yaml b/docker/services/nova-consoleauth.yaml
new file mode 100644 (file)
index 0000000..d836797
--- /dev/null
@@ -0,0 +1,108 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Nova Consoleauth service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerNovaConsoleauthImage:
+    description: image
+    default: 'centos-binary-nova-consoleauth:latest'
+    type: string
+  DockerNovaConfigImage:
+    description: The container image to use for the nova config_volume
+    default: 'centos-binary-nova-base:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  NovaConsoleauthPuppetBase:
+    type: ../../puppet/services/nova-consoleauth.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Nova Consoleauth service.
+    value:
+      service_name: {get_attr: [NovaConsoleauthPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [NovaConsoleauthPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [NovaConsoleauthPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [NovaConsoleauthPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: nova
+        puppet_tags: nova_config
+        step_config: *step_config
+        config_image:
+          list_join:
+          - '/'
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/nova_consoleauth.json:
+          command: /usr/bin/nova-consoleauth
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
+      docker_config:
+        step_4:
+          nova_consoleauth:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNovaConsoleauthImage} ]
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_consoleauth.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+                  - /var/log/containers/nova:/var/log/nova
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/nova
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable nova_consoleauth service
+          tags: step2
+          service: name=openstack-nova-consoleauth state=stopped enabled=no
index be0dd11..8f98839 100644 (file)
@@ -8,12 +8,12 @@ parameters:
     description: namespace
     default: 'tripleoupstream'
     type: string
-  DockerNovaComputeImage:
+  DockerNovaComputeIronicImage:
     description: image
     default: 'centos-binary-nova-compute-ironic:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   ServiceNetMap:
@@ -81,11 +81,11 @@ outputs:
               recurse: true
       docker_config:
         step_5:
-          novacompute:
+          nova_compute:
             image:
               list_join:
               - '/'
-              - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeIronicImage} ]
             net: host
             privileged: true
             user: root
index 9779d67..f1a48cf 100644 (file)
@@ -8,14 +8,14 @@ parameters:
     description: namespace
     default: 'tripleoupstream'
     type: string
-  DockerLibvirtImage:
+  DockerNovaLibvirtImage:
     description: image
     default: 'centos-binary-nova-libvirt:latest'
     type: string
   # we configure libvirt via the nova-compute container due to coupling
   # in the puppet modules
-  DockerNovaConfigImage:
-    description: image
+  DockerNovaLibvirtConfigImage:
+    description: The container image to use for the nova_libvirt config_volume
     default: 'centos-binary-nova-compute:latest'
     type: string
   EnablePackageInstall:
@@ -44,6 +44,26 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  UseTLSTransportForLiveMigration:
+    type: boolean
+    default: true
+    description: If set to true and if EnableInternalTLS is enabled, it will
+                 set the libvirt URI's transport to tls and configure the
+                 relevant keys for libvirt.
+
+conditions:
+
+  use_tls_for_live_migration:
+    and:
+    - equals:
+      - {get_param: EnableInternalTLS}
+      - true
+    - equals:
+      - {get_param: UseTLSTransportForLiveMigration}
+      - true
 
 resources:
 
@@ -81,11 +101,15 @@ outputs:
         step_config: *step_config
         config_image:
           list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/nova-libvirt.json:
-          command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
+        /var/lib/kolla/config_files/nova_libvirt.json:
+          command:
+            if:
+              - use_tls_for_live_migration
+              - /usr/sbin/libvirtd --listen --config /etc/libvirt/libvirtd.conf
+              - /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
           permissions:
             - path: /var/log/nova
               owner: nova:nova
@@ -96,7 +120,7 @@ outputs:
             image:
               list_join:
               - '/'
-              - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtImage} ]
             net: host
             pid: host
             privileged: true
@@ -105,7 +129,7 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/nova_libvirt.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/nova_libvirt/etc/libvirt/:/etc/libvirt/:ro
                   - /lib/modules:/lib/modules:ro
                   - /dev:/dev
index ae4ccf6..251bbaa 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-nova-placement-api:latest'
     type: string
+  DockerNovaPlacementConfigImage:
+    description: The container image to use for the nova_placement config_volume
+    default: 'centos-binary-nova-placement-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -66,10 +70,10 @@ outputs:
         config_volume: nova_placement
         puppet_tags: nova_config
         step_config: *step_config
-        config_image: &nova_placement_image
+        config_image:
           list_join:
           - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/nova_placement.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -82,7 +86,10 @@ outputs:
         step_3:
           nova_placement:
             start_order: 1
-            image: *nova_placement_image
+            image:
+              list_join:
+              - '/'
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
             net: host
             user: root
             restart: always
@@ -92,7 +99,9 @@ outputs:
                 -
                   - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/nova_placement/etc/nova/:/etc/nova/:ro
-                  - /var/lib/config-data/nova_placement/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/nova_placement/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/nova_placement/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/nova_placement/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/lib/config-data/nova_placement/var/www/:/var/www/:ro
                   - /var/log/containers/nova:/var/log/nova
             environment:
index 6285e98..fbb3abc 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-nova-scheduler:latest'
     type: string
   DockerNovaConfigImage:
-    description: image
+    description: The container image to use for the nova config_volume
     default: 'centos-binary-nova-base:latest'
     type: string
   EndpointMap:
diff --git a/docker/services/nova-vnc-proxy.yaml b/docker/services/nova-vnc-proxy.yaml
new file mode 100644 (file)
index 0000000..c2b9c3b
--- /dev/null
@@ -0,0 +1,108 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Nova Vncproxy service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerNovaVncProxyImage:
+    description: image
+    default: 'centos-binary-nova-novncproxy:latest'
+    type: string
+  DockerNovaConfigImage:
+    description: The container image to use for the nova config_volume
+    default: 'centos-binary-nova-base:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  NovaVncProxyPuppetBase:
+    type: ../../puppet/services/nova-vnc-proxy.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Nova Vncproxy service.
+    value:
+      service_name: {get_attr: [NovaVncProxyPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [NovaVncProxyPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [NovaVncProxyPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [NovaVncProxyPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: nova
+        puppet_tags: nova_config
+        step_config: *step_config
+        config_image:
+          list_join:
+          - '/'
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/nova_vnc_proxy.json:
+          command: /usr/bin/nova-novncproxy --web /usr/share/novnc/
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
+      docker_config:
+        step_4:
+          nova_vnc_proxy:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNovaVncProxyImage} ]
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_vnc_proxy.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+                  - /var/log/containers/nova:/var/log/nova
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/nova
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable nova_vnc_proxy service
+          tags: step2
+          service: name=openstack-nova-novncproxy state=stopped enabled=no
diff --git a/docker/services/octavia-api.yaml b/docker/services/octavia-api.yaml
new file mode 100644 (file)
index 0000000..728162f
--- /dev/null
@@ -0,0 +1,155 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Octavia service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOctaviaApiImage:
+    description: image
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  DockerOctaviaConfigImage:
+    description: The container image to use for the octavia config_volume
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OctaviaApiPuppetBase:
+    type: ../../puppet/services/octavia-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Octavia API role.
+    value:
+      service_name: {get_attr: [OctaviaApiPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [OctaviaApiPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [OctaviaApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [OctaviaApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: octavia
+        puppet_tags: octavia_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/octavia_api.json:
+          command: /usr/bin/octavia-api --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/api.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-api
+        /var/lib/kolla/config_files/octavia_api_tls_proxy.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+      docker_config:
+        # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
+        step_2:
+          octavia_api_init_dirs:
+            start_order: 0
+            image: &octavia_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaApiImage} ]
+            user: root
+            volumes:
+              - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+              - /var/log/containers/octavia:/var/log/octavia
+            command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-api; chown -R octavia:octavia /etc/octavia/conf.d/octavia-api; chown -R octavia:octavia /var/log/octavia']
+        step_3:
+          octavia_db_sync:
+            start_order: 0
+            image: *octavia_api_image
+            net: host
+            privileged: false
+            detach: false
+            user: root
+            volumes: &octavia_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/octavia_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+                  - /var/log/containers/octavia:/var/log/octavia
+            command: "/usr/bin/bootstrap_host_exec octavia_api su octavia -s /bin/bash -c '/usr/bin/octavia-db-manage upgrade head'"
+        step_4:
+          map_merge:
+            - octavia_api:
+                start_order: 2
+                image: *octavia_api_image
+                net: host
+                privileged: false
+                restart: always
+                volumes: *octavia_volumes
+                environment:
+                  - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+            - if:
+                - internal_tls_enabled
+                - octavia_api_tls_proxy:
+                    start_order: 2
+                    image: *octavia_api_image
+                    net: host
+                    user: root
+                    restart: always
+                    volumes:
+                      list_concat:
+                        - {get_attr: [ContainersCommon, volumes]}
+                        -
+                          - /var/lib/kolla/config_files/octavia_api_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+                          - /var/lib/config-data/octavia/etc/httpd/:/etc/httpd/:ro
+                          - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                          - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                    environment:
+                      - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+                - {}
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/octavia
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable octavia_api service
+          tags: step2
+          service: name=openstack-octavia-api state=stopped enabled=no
diff --git a/docker/services/octavia-health-manager.yaml b/docker/services/octavia-health-manager.yaml
new file mode 100644 (file)
index 0000000..2228e36
--- /dev/null
@@ -0,0 +1,114 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Octavia health-manager service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOctaviaHealthManagerImage:
+    description: image
+    default: 'centos-binary-octavia-health-manager:latest'
+    type: string
+  DockerOctaviaConfigImage:
+    description: The container image to use for the octavia config_volume
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OctaviaHealthManagerPuppetBase:
+    type: ../../puppet/services/octavia-health-manager.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Octavia health-manager role.
+    value:
+      service_name: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [OctaviaHealthManagerPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: octavia
+        puppet_tags: octavia_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/octavia_health_manager.json:
+          command: /usr/bin/octavia-health-manager --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/health-manager.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-health-manager
+      docker_config:
+        step_2:
+          octavia_health_manager_init_dirs:
+            start_order: 0
+            image: &octavia_health_manager_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaHealthManagerImage} ]
+            user: root
+            volumes:
+              - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+            command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-health-manager; chown -R octavia:octavia /etc/octavia/conf.d/octavia-health-manager']
+        step_4:
+          octavia_health_manager:
+            start_order: 2
+            image: *octavia_health_manager_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/octavia_health_manager.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+                  - /var/log/containers/octavia:/var/log/octavia
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/octavia
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable octavia_health_manager service
+          tags: step2
+          service: name=openstack-octavia-health-manager state=stopped enabled=no
diff --git a/docker/services/octavia-housekeeping.yaml b/docker/services/octavia-housekeeping.yaml
new file mode 100644 (file)
index 0000000..c2986c6
--- /dev/null
@@ -0,0 +1,114 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Octavia service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOctaviaHousekeepingImage:
+    description: image
+    default: 'centos-binary-octavia-housekeeping:latest'
+    type: string
+  DockerOctaviaConfigImage:
+    description: The container image to use for the octavia config_volume
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OctaviaHousekeepingPuppetBase:
+    type: ../../puppet/services/octavia-housekeeping.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Octavia housekeeping role.
+    value:
+      service_name: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [OctaviaHousekeepingPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: octavia
+        puppet_tags: octavia_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/octavia_housekeeping.json:
+          command: /usr/bin/octavia-housekeeping --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/housekeeping.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-housekeeping
+      docker_config:
+        step_2:
+          octavia_housekeeping_init_dirs:
+            start_order: 0
+            image: &octavia_housekeeping_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaHousekeepingImage} ]
+            user: root
+            volumes:
+              - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+            command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-housekeeping; chown -R octavia:octavia /etc/octavia/conf.d/octavia-housekeeping']
+        step_4:
+          octavia_housekeeping:
+            start_order: 2
+            image: *octavia_housekeeping_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/octavia_housekeeping.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+                  - /var/log/containers/octavia:/var/log/octavia
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/octavia
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable octavia_housekeeping service
+          tags: step2
+          service: name=openstack-octavia-housekeeping state=stopped enabled=no
diff --git a/docker/services/octavia-worker.yaml b/docker/services/octavia-worker.yaml
new file mode 100644 (file)
index 0000000..4129512
--- /dev/null
@@ -0,0 +1,114 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Octavia worker service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerOctaviaWorkerImage:
+    description: image
+    default: 'centos-binary-octavia-worker:latest'
+    type: string
+  DockerOctaviaConfigImage:
+    description: The container image to use for the octavia config_volume
+    default: 'centos-binary-octavia-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  OctaviaWorkerPuppetBase:
+    type: ../../puppet/services/octavia-worker.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Octavia worker role.
+    value:
+      service_name: {get_attr: [OctaviaWorkerPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [OctaviaWorkerPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [OctaviaWorkerPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [OctaviaWorkerPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: octavia
+        puppet_tags: octavia_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/octavia_worker.json:
+          command: /usr/bin/octavia-worker --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/worker.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-worker
+      docker_config:
+        step_2:
+          octavia_worker_init_dirs:
+            start_order: 0
+            image: &octavia_worker_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaWorkerImage} ]
+            user: root
+            volumes:
+              - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+            command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-worker; chown -R octavia:octavia /etc/octavia/conf.d/octavia-worker']
+        step_4:
+          octavia_worker:
+            start_order: 2
+            image: *octavia_worker_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/octavia_worker.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+                  - /var/log/containers/octavia:/var/log/octavia
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/octavia
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable octavia_worker service
+          tags: step2
+          service: name=openstack-octavia-worker state=stopped enabled=no
diff --git a/docker/services/pacemaker/cinder-backup.yaml b/docker/services/pacemaker/cinder-backup.yaml
new file mode 100644 (file)
index 0000000..d15c920
--- /dev/null
@@ -0,0 +1,151 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Backup service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderBackupImage:
+    description: image
+    default: 'centos-binary-cinder-backup:latest'
+    type: string
+  DockerCinderConfigImage:
+    description: The container image to use for the cinder config_volume
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  CinderBackupBackend:
+    default: swift
+    description: The short name of the Cinder Backup backend to use.
+    type: string
+    constraints:
+    - allowed_values: ['swift', 'ceph']
+  CinderBackupRbdPoolName:
+    default: backups
+    type: string
+  CephClientUserName:
+    default: openstack
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  CinderBackupBase:
+    type: ../../../puppet/services/cinder-backup.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      CinderBackupBackend: {get_param: CinderBackupBackend}
+      CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
+      CephClientUserName: {get_param: CephClientUserName}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Backup role.
+    value:
+      service_name: {get_attr: [CinderBackupBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [CinderBackupBase, role_data, config_settings]
+          - tripleo::profile::pacemaker::cinder::backup_bundle::cinder_backup_docker_image: &cinder_backup_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+            cinder::backup::manage_service: false
+            cinder::backup::enabled: false
+      step_config: ""
+      service_config_settings: {get_attr: [CinderBackupBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: {get_attr: [CinderBackupBase, role_data, step_config]}
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_backup.json:
+          command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/lib/cinder
+              owner: cinder:cinder
+              recurse: true
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_3:
+          cinder_backup_init_logs:
+            start_order: 0
+            image: *cinder_backup_image
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_5:
+          cinder_backup_init_bundle:
+            start_order: 1
+            detach: false
+            net: host
+            user: root
+            command:
+              - '/bin/bash'
+              - '-c'
+              - str_replace:
+                  template:
+                    list_join:
+                      - '; '
+                      - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+                        - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+                  params:
+                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+                    CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::backup_bundle'
+            image: *cinder_backup_image
+            volumes:
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /etc/puppet:/tmp/puppet-etc:ro
+              - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+              - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+              - /dev/shm:/dev/shm:rw
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/lib/cinder
+            - /var/log/containers/cinder
+      upgrade_tasks:
+        - name: Stop and disable cinder_backup service
+          tags: step2
+          service: name=openstack-cinder-backup state=stopped enabled=no
diff --git a/docker/services/pacemaker/cinder-volume.yaml b/docker/services/pacemaker/cinder-volume.yaml
new file mode 100644 (file)
index 0000000..07e5fc2
--- /dev/null
@@ -0,0 +1,169 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Volume service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderVolumeImage:
+    description: image
+    default: 'centos-binary-cinder-volume:latest'
+    type: string
+  DockerCinderConfigImage:
+    description: The container image to use for the cinder config_volume
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  # custom parameters for the Cinder volume role
+  CinderEnableIscsiBackend:
+    default: true
+    description: Whether to enable or not the Iscsi backend for Cinder
+    type: boolean
+  CinderLVMLoopDeviceSize:
+    default: 10280
+    description: The size of the loopback file used by the cinder LVM driver.
+    type: number
+
+resources:
+
+  CinderBase:
+    type: ../../../puppet/services/cinder-volume.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Volume role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [CinderBase, role_data, config_settings]
+          - tripleo::profile::pacemaker::cinder::volume_bundle::cinder_volume_docker_image: &cinder_volume_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderVolumeImage} ]
+            cinder::volume::manage_service: false
+            cinder::volume::enabled: false
+            cinder::host: hostgroup
+      step_config: ""
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: {get_attr: [CinderBase, role_data, step_config]}
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_volume.json:
+          command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_3:
+          cinder_volume_init_logs:
+            start_order: 0
+            image: *cinder_volume_image
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_5:
+          cinder_volume_init_bundle:
+            start_order: 0
+            detach: false
+            net: host
+            user: root
+            command:
+              - '/bin/bash'
+              - '-c'
+              - str_replace:
+                  template:
+                    list_join:
+                      - '; '
+                      - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+                        - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+                  params:
+                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+                    CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::volume_bundle'
+            image: *cinder_volume_image
+            volumes:
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /etc/puppet:/tmp/puppet-etc:ro
+              - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+              - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+              - /dev/shm:/dev/shm:rw
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/cinder
+            - /var/lib/cinder
+        #FIXME: all of this should be conditional on the CinderEnableIscsiBackend value being set to true
+        - name: cinder create LVM volume group dd
+          command:
+            list_join:
+            - ''
+            - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
+              - str_replace:
+                  template: VALUE
+                  params:
+                    VALUE: {get_param: CinderLVMLoopDeviceSize}
+              - 'M'
+          args:
+            creates: /var/lib/cinder/cinder-volumes
+        - name: cinder create LVM volume group
+          shell: |
+            if ! losetup /dev/loop2; then
+              losetup /dev/loop2 /var/lib/cinder/cinder-volumes
+            fi
+            if ! pvdisplay | grep cinder-volumes; then
+              pvcreate /dev/loop2
+            fi
+            if ! vgdisplay | grep cinder-volumes; then
+              vgcreate cinder-volumes /dev/loop2
+            fi
+          args:
+            executable: /bin/bash
+            creates: /dev/loop2
+      upgrade_tasks:
+        - name: Stop and disable cinder_volume service
+          tags: step2
+          service: name=openstack-cinder-volume state=stopped enabled=no
diff --git a/docker/services/pacemaker/clustercheck.yaml b/docker/services/pacemaker/clustercheck.yaml
new file mode 100644 (file)
index 0000000..9fd9402
--- /dev/null
@@ -0,0 +1,110 @@
+heat_template_version: pike
+
+description: >
+  MySQL HA clustercheck service deployment using puppet
+  This service is used by HAProxy in a HA scenario to report whether
+  the local galera node is synced
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerClustercheckImage:
+    description: image
+    default: 'centos-binary-mariadb:latest'
+    type: string
+  DockerClustercheckConfigImage:
+    description: The container image to use for the clustercheck config_volume
+    default: 'centos-binary-mariadb:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ../containers-common.yaml
+
+  MysqlPuppetBase:
+    type: ../../../puppet/services/pacemaker/database/mysql.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Containerized service clustercheck using composable services.
+    value:
+      service_name: clustercheck
+      config_settings: {get_attr: [MysqlPuppetBase, role_data, config_settings]}
+      step_config: "include ::tripleo::profile::pacemaker::clustercheck"
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: clustercheck
+        puppet_tags: file # set this even though file is the default
+        step_config: "include ::tripleo::profile::pacemaker::clustercheck"
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/clustercheck.json:
+          command: /usr/sbin/xinetd -dontfork
+          config_files:
+          - dest: /etc/xinetd.conf
+            source: /var/lib/kolla/config_files/src/etc/xinetd.conf
+            owner: mysql
+            perm: '0644'
+          - dest: /etc/xinetd.d/galera-monitor
+            source: /var/lib/kolla/config_files/src/etc/xinetd.d/galera-monitor
+            owner: mysql
+            perm: '0644'
+          - dest: /etc/sysconfig/clustercheck
+            source: /var/lib/kolla/config_files/src/etc/sysconfig/clustercheck
+            owner: mysql
+            perm: '0600'
+      docker_config:
+        step_2:
+          clustercheck:
+            start_order: 1
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckImage} ]
+            restart: always
+            net: host
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/clustercheck.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/clustercheck/:/var/lib/kolla/config_files/src:ro
+                  - /var/lib/mysql:/var/lib/mysql
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+      upgrade_tasks:
diff --git a/docker/services/pacemaker/database/mysql.yaml b/docker/services/pacemaker/database/mysql.yaml
new file mode 100644 (file)
index 0000000..fb1400f
--- /dev/null
@@ -0,0 +1,187 @@
+heat_template_version: pike
+
+description: >
+  MySQL service deployment with pacemaker bundle
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerMysqlImage:
+    description: image
+    default: 'centos-binary-mariadb:latest'
+    type: string
+  DockerMysqlConfigImage:
+    description: The container image to use for the mysql config_volume
+    default: 'centos-binary-mariadb:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  MysqlRootPassword:
+    type: string
+    hidden: true
+    default: ''
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ../../containers-common.yaml
+
+  MysqlPuppetBase:
+    type: ../../../../puppet/services/pacemaker/database/mysql.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Containerized service MySQL using composable services.
+    value:
+      service_name: {get_attr: [MysqlPuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - {get_attr: [MysqlPuppetBase, role_data, config_settings]}
+          - tripleo::profile::pacemaker::database::mysql_bundle::mysql_docker_image: &mysql_image
+              list_join:
+                - '/'
+                - - {get_param: DockerNamespace}
+                  - {get_param: DockerMysqlImage}
+      step_config: ""
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: mysql
+        puppet_tags: file # set this even though file is the default
+        step_config:
+          list_join:
+            - "\n"
+            - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }"
+              - "exec {'wait-for-settle': command => '/bin/true' }"
+              - "include ::tripleo::profile::pacemaker::database::mysql_bundle"
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/mysql.json:
+          command: /usr/sbin/pacemaker_remoted
+          config_files:
+          - dest: /etc/libqb/force-filesystem-sockets
+            source: /dev/null
+            owner: root
+            perm: '0644'
+          - dest: /etc/my.cnf
+            source: /var/lib/kolla/config_files/src/etc/my.cnf
+            owner: mysql
+            perm: '0644'
+          - dest: /etc/my.cnf.d/galera.cnf
+            source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf
+            owner: mysql
+            perm: '0644'
+          - dest: /etc/sysconfig/clustercheck
+            source: /var/lib/kolla/config_files/src/etc/sysconfig/clustercheck
+            owner: root
+            perm: '0600'
+      docker_config:
+        step_1:
+          mysql_data_ownership:
+            start_order: 0
+            detach: false
+            image: *mysql_image
+            net: host
+            user: root
+            # Kolla does only non-recursive chown
+            command: ['chown', '-R', 'mysql:', '/var/lib/mysql']
+            volumes:
+              - /var/lib/mysql:/var/lib/mysql
+          mysql_bootstrap:
+            start_order: 1
+            detach: false
+            image: *mysql_image
+            net: host
+            # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done
+            command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start']
+            volumes: &mysql_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro
+                  - /var/lib/mysql:/var/lib/mysql
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+              - KOLLA_BOOTSTRAP=True
+              # NOTE(mandre) skip wsrep cluster status check
+              - KOLLA_KUBERNETES=True
+              -
+                list_join:
+                  - '='
+                  - - 'DB_ROOT_PASSWORD'
+                    -
+                      yaql:
+                        expression: $.data.passwords.where($ != '').first()
+                        data:
+                          passwords:
+                            - {get_param: MysqlRootPassword}
+                            - {get_param: [DefaultPasswords, mysql_root_password]}
+        step_2:
+          mysql_init_bundle:
+            start_order: 1
+            detach: false
+            net: host
+            user: root
+            command:
+              - '/bin/bash'
+              - '-c'
+              - str_replace:
+                  template:
+                    list_join:
+                      - '; '
+                      - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+                        - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+                  params:
+                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation,galera_ready,mysql_database,mysql_grant,mysql_user'
+                    CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::mysql_bundle'
+            image: *mysql_image
+            volumes:
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /etc/puppet:/tmp/puppet-etc:ro
+              - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+              - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+              - /dev/shm:/dev/shm:rw
+              - /var/lib/config-data/mysql/etc/my.cnf:/etc/my.cnf:ro
+              - /var/lib/config-data/mysql/etc/my.cnf.d:/etc/my.cnf.d:ro
+              - /var/lib/mysql:/var/lib/mysql:rw
+      host_prep_tasks:
+        - name: create /var/lib/mysql
+          file:
+            path: /var/lib/mysql
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable mysql service
+          tags: step2
+          service: name=mariadb state=stopped enabled=no
diff --git a/docker/services/pacemaker/database/redis.yaml b/docker/services/pacemaker/database/redis.yaml
new file mode 100644 (file)
index 0000000..2ff15fe
--- /dev/null
@@ -0,0 +1,147 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Redis services
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerRedisImage:
+    description: image
+    default: 'centos-binary-redis:latest'
+    type: string
+  DockerRedisConfigImage:
+    description: The container image to use for the redis config_volume
+    default: 'centos-binary-redis:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  RedisBase:
+    type: ../../../../puppet/services/database/redis.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Redis API role.
+    value:
+      service_name: {get_attr: [RedisBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - {get_attr: [RedisBase, role_data, config_settings]}
+          - redis::service_manage: false
+            redis::notify_service: false
+            redis::managed_by_cluster_manager: true
+            tripleo::profile::pacemaker::database::redis_bundle::redis_docker_image: &redis_image
+              list_join:
+                - '/'
+                - - {get_param: DockerNamespace}
+                  - {get_param: DockerRedisImage}
+
+      step_config: ""
+      service_config_settings: {get_attr: [RedisBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: 'redis'
+        # NOTE: we need the exec tag to copy /etc/redis.conf.puppet to
+        # /etc/redis.conf
+        # https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763
+        puppet_tags: 'exec'
+        step_config:
+          get_attr: [RedisBase, role_data, step_config]
+        config_image: &redis_config_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerRedisConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/redis.json:
+          command: /usr/sbin/pacemaker_remoted
+          config_files:
+            - dest: /etc/libqb/force-filesystem-sockets
+              source: /dev/null
+              owner: root
+              perm: '0644'
+          permissions:
+            - path: /var/run/redis
+              owner: redis:redis
+              recurse: true
+            - path: /var/lib/redis
+              owner: redis:redis
+              recurse: true
+            - path: /var/log/redis
+              owner: redis:redis
+              recurse: true
+      docker_config:
+        step_2:
+          redis_init_bundle:
+            start_order: 2
+            detach: false
+            net: host
+            user: root
+            config_volume: 'redis_init_bundle'
+            command:
+              - '/bin/bash'
+              - '-c'
+              - str_replace:
+                  template:
+                    list_join:
+                      - '; '
+                      - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+                        - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+                  params:
+                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+                    CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::redis_bundle'
+            image: *redis_config_image
+            volumes:
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /etc/puppet:/tmp/puppet-etc:ro
+              - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+              - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+              - /dev/shm:/dev/shm:rw
+      host_prep_tasks:
+        - name: create /var/run/redis
+          file:
+            path: /var/run/redis
+            state: directory
+        - name: create /var/log/redis
+          file:
+            path: /var/log/redis
+            state: directory
+        - name: create /var/lib/redis
+          file:
+            path: /var/lib/redis
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable redis service
+          tags: step2
+          service: name=redis state=stopped enabled=no
diff --git a/docker/services/pacemaker/haproxy.yaml b/docker/services/pacemaker/haproxy.yaml
new file mode 100644 (file)
index 0000000..704ffab
--- /dev/null
@@ -0,0 +1,126 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized HAproxy service for pacemaker
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerHAProxyImage:
+    description: image
+    default: 'centos-binary-haproxy:latest'
+    type: string
+  DockerHAProxyConfigImage:
+    description: The container image to use for the haproxy config_volume
+    default: 'centos-binary-haproxy:latest'
+    type: string
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  HAProxyBase:
+    type: ../../../puppet/services/pacemaker/haproxy.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the HAproxy role.
+    value:
+      service_name: {get_attr: [HAProxyBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [HAProxyBase, role_data, config_settings]
+          - tripleo::haproxy::haproxy_daemon: false
+            haproxy_docker: true
+            tripleo::profile::pacemaker::haproxy_bundle::haproxy_docker_image: &haproxy_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
+      step_config: ""
+      service_config_settings: {get_attr: [HAProxyBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: haproxy
+        puppet_tags: haproxy_config
+        step_config:
+          list_join:
+            - "\n"
+            - - "exec {'wait-for-settle': command => '/bin/true' }"
+              - "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
+              - "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
+              - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/haproxy.json:
+          command: haproxy -f /etc/haproxy/haproxy.cfg
+      docker_config:
+        step_2:
+          haproxy_init_bundle:
+            start_order: 3
+            detach: false
+            net: host
+            user: root
+            privileged: true
+            command:
+              - '/bin/bash'
+              - '-c'
+              - str_replace:
+                  template:
+                    list_join:
+                      - '; '
+                      - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+                        - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+                  params:
+                    TAGS: 'tripleo::firewall::rule,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+                    CONFIG:
+                      list_join:
+                        - ';'
+                        - - 'include ::tripleo::profile::base::pacemaker'
+                          - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
+            image: *haproxy_image
+            volumes:
+              # puppet saves iptables rules in /etc/sysconfig
+              - /etc/sysconfig:/etc/sysconfig:rw
+              # saving rules require accessing /usr/libexec/iptables/iptables.init, just bind-mount
+              # the necessary bit and prevent systemd to try to reload the service in the container
+              - /usr/libexec/iptables:/usr/libexec/iptables:ro
+              - /usr/libexec/initscripts/legacy-actions:/usr/libexec/initscripts/legacy-actions:ro
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /etc/puppet:/tmp/puppet-etc:ro
+              - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+              - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+              - /dev/shm:/dev/shm:rw
+      metadata_settings:
+        get_attr: [HAProxyBase, role_data, metadata_settings]
diff --git a/docker/services/pacemaker/rabbitmq.yaml b/docker/services/pacemaker/rabbitmq.yaml
new file mode 100644 (file)
index 0000000..ab1a612
--- /dev/null
@@ -0,0 +1,166 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Rabbitmq service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerRabbitmqImage:
+    description: image
+    default: 'centos-binary-rabbitmq:latest'
+    type: string
+  DockerRabbitmqConfigImage:
+    description: The container image to use for the rabbitmq config_volume
+    default: 'centos-binary-rabbitmq:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RabbitCookie:
+    type: string
+    default: ''
+    hidden: true
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  RabbitmqBase:
+    type: ../../../puppet/services/rabbitmq.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Rabbitmq API role.
+    value:
+      service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - {get_attr: [RabbitmqBase, role_data, config_settings]}
+          - rabbitmq::service_manage: false
+            tripleo::profile::pacemaker::rabbitmq_bundle::rabbitmq_docker_image: &rabbitmq_image
+              list_join:
+                - '/'
+                - - {get_param: DockerNamespace}
+                  - {get_param: DockerRabbitmqImage}
+      step_config: &step_config
+        get_attr: [RabbitmqBase, role_data, step_config]
+      service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: rabbitmq
+        puppet_tags: file
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/rabbitmq.json:
+          command: /usr/sbin/pacemaker_remoted
+          config_files:
+          - dest: /etc/libqb/force-filesystem-sockets
+            source: /dev/null
+            owner: root
+            perm: '0644'
+          permissions:
+           - path: /var/lib/rabbitmq
+             owner: rabbitmq:rabbitmq
+             recurse: true
+           - path: /var/log/rabbitmq
+             owner: rabbitmq:rabbitmq
+             recurse: true
+      # When using pacemaker we don't launch the container, instead that is done by pacemaker
+      # itself.
+      docker_config:
+        step_1:
+          rabbitmq_bootstrap:
+            start_order: 0
+            image: *rabbitmq_image
+            net: host
+            privileged: false
+            volumes:
+              - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+              - /var/lib/config-data/rabbitmq/etc/rabbitmq:/etc/rabbitmq:ro
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /var/lib/rabbitmq:/var/lib/rabbitmq
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+              - KOLLA_BOOTSTRAP=True
+              -
+                list_join:
+                  - '='
+                  - - 'RABBITMQ_CLUSTER_COOKIE'
+                    -
+                      yaql:
+                        expression: $.data.passwords.where($ != '').first()
+                        data:
+                          passwords:
+                            - {get_param: RabbitCookie}
+                            - {get_param: [DefaultPasswords, rabbit_cookie]}
+        step_2:
+          rabbitmq_init_bundle:
+            start_order: 0
+            detach: false
+            net: host
+            user: root
+            command:
+              - '/bin/bash'
+              - '-c'
+              - str_replace:
+                  template:
+                    list_join:
+                      - '; '
+                      - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+                        - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+                  params:
+                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+                    CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::rabbitmq_bundle'
+            image: *rabbitmq_image
+            volumes:
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /etc/puppet:/tmp/puppet-etc:ro
+              - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+              - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+              - /dev/shm:/dev/shm:rw
+      host_prep_tasks:
+        - name: create /var/lib/rabbitmq
+          file:
+            path: /var/lib/rabbitmq
+            state: directory
+        - name: stop the Erlang port mapper on the host and make sure it cannot bind to the port used by container
+          shell: |
+            echo 'export ERL_EPMD_ADDRESS=127.0.0.1' > /etc/rabbitmq/rabbitmq-env.conf
+            echo 'export ERL_EPMD_PORT=4370' >> /etc/rabbitmq/rabbitmq-env.conf
+            for pid in $(pgrep epmd); do if [ "$(lsns -o NS -p $pid)" == "$(lsns -o NS -p 1)" ]; then kill $pid; break; fi; done
+      upgrade_tasks:
+        - name: Stop and disable rabbitmq service
+          tags: step2
+          service: name=rabbitmq-server state=stopped enabled=no
index b9e6e93..0f55606 100644 (file)
@@ -14,6 +14,10 @@ parameters:
     description: image
     default: 'centos-binary-panko-api:latest'
     type: string
+  DockerPankoConfigImage:
+    description: The container image to use for the panko config_volume
+    default: 'centos-binary-panko-api:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -75,29 +79,31 @@ outputs:
         config_volume: panko
         puppet_tags: panko_api_paste_ini,panko_config
         step_config: *step_config
-        config_image: &panko_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerPankoConfigImage} ]
       kolla_config:
-        /var/lib/kolla/config_files/panko-api.json:
+        /var/lib/kolla/config_files/panko_api.json:
           command: /usr/sbin/httpd -DFOREGROUND
           permissions:
             - path: /var/log/panko
               owner: panko:panko
               recurse: true
       docker_config:
-        step_3:
-          panko-init-log:
-            start_order: 0
-            image: *panko_image
+        step_2:
+          panko_init_log:
+            image: &panko_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
             user: root
             volumes:
               - /var/log/containers/panko:/var/log/panko
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
+        step_3:
           panko_db_sync:
-            start_order: 1
-            image: *panko_image
+            image: *panko_api_image
             net: host
             detach: false
             privileged: false
@@ -112,7 +118,7 @@ outputs:
         step_4:
           panko_api:
             start_order: 2
-            image: *panko_image
+            image: *panko_api_image
             net: host
             privileged: false
             restart: always
@@ -120,9 +126,11 @@ outputs:
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
                 -
-                  - /var/lib/kolla/config_files/panko-api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/kolla/config_files/panko_api.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/panko/etc/panko/:/etc/panko/:ro
-                  - /var/lib/config-data/panko/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/panko/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/panko/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/panko/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/lib/config-data/panko/var/www/:/var/www/:ro
                   - /var/log/containers/panko:/var/log/panko
                   -
index e2f8228..f42f2ed 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-rabbitmq:latest'
     type: string
+  DockerRabbitmqConfigImage:
+    description: The container image to use for the rabbitmq config_volume
+    default: 'centos-binary-rabbitmq:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -73,10 +77,10 @@ outputs:
       puppet_config:
         config_volume: rabbitmq
         step_config: *step_config
-        config_image: &rabbitmq_image
+        config_image: &rabbitmq_config_image
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/rabbitmq.json:
           command: /usr/lib/rabbitmq/bin/rabbitmq-server
@@ -89,7 +93,11 @@ outputs:
         step_1:
           rabbitmq_init_logs:
             start_order: 0
-            image: *rabbitmq_image
+            detach: false
+            image: &rabbitmq_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
             privileged: false
             user: root
             volumes:
@@ -97,6 +105,7 @@ outputs:
             command: ['/bin/bash', '-c', 'chown -R rabbitmq:rabbitmq /var/log/rabbitmq']
           rabbitmq_bootstrap:
             start_order: 1
+            detach: false
             image: *rabbitmq_image
             net: host
             privileged: false
@@ -144,9 +153,9 @@ outputs:
           config_volume: 'rabbit_init_tasks'
           puppet_tags: 'rabbitmq_policy,rabbitmq_user'
           step_config: 'include ::tripleo::profile::base::rabbitmq'
-          config_image: *rabbitmq_image
+          config_image: *rabbitmq_config_image
           volumes:
-            - /var/lib/config-data/rabbitmq/etc/:/etc/
+            - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
             - /var/lib/rabbitmq:/var/lib/rabbitmq:ro
       host_prep_tasks:
         - name: create persistent directories
diff --git a/docker/services/sahara-api.yaml b/docker/services/sahara-api.yaml
new file mode 100644 (file)
index 0000000..55c42ab
--- /dev/null
@@ -0,0 +1,126 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Sahara service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSaharaApiImage:
+    description: image
+    default: 'centos-binary-sahara-api:latest'
+    type: string
+  DockerSaharaConfigImage:
+    description: The container image to use for the sahara config_volume
+    default: 'centos-binary-sahara-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SaharaApiPuppetBase:
+    type: ../../puppet/services/sahara-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sahara API role.
+    value:
+      service_name: {get_attr: [SaharaApiPuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SaharaApiPuppetBase, role_data, config_settings]
+          - sahara::sync_db: false
+      step_config: &step_config
+        get_attr: [SaharaApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [SaharaApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: sahara
+        puppet_tags: sahara_api_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sahara-api.json:
+          command: /usr/bin/sahara-api --config-file /etc/sahara/sahara.conf
+          permissions:
+            - path: /var/lib/sahara
+              owner: sahara:sahara
+              recurse: true
+            - path: /var/log/sahara
+              owner: sahara:sahara
+              recurse: true
+      docker_config:
+        step_3:
+          sahara_db_sync:
+            image: &sahara_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
+            net: host
+            privileged: false
+            detach: false
+            volumes: &sahara_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/sahara-api.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /var/lib/sahara:/var/lib/sahara
+                  - /var/log/containers/sahara:/var/log/sahara
+            command: "/usr/bin/bootstrap_host_exec sahara_api su sahara -s /bin/bash -c 'sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head'"
+        step_4:
+          sahara_api:
+            image: *sahara_api_image
+            net: host
+            privileged: false
+            restart: always
+            volumes: *sahara_volumes
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /var/lib/sahara
+          file:
+            path: /var/lib/sahara
+            state: directory
+        - name: create persistent sahara logs directory
+          file:
+            path: /var/log/containers/sahara
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable sahara_api service
+          tags: step2
+          service: name=openstack-sahara-api state=stopped enabled=no
diff --git a/docker/services/sahara-engine.yaml b/docker/services/sahara-engine.yaml
new file mode 100644 (file)
index 0000000..99a51c9
--- /dev/null
@@ -0,0 +1,117 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Sahara service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSaharaEngineImage:
+    description: image
+    default: 'centos-binary-sahara-engine:latest'
+    type: string
+  DockerSaharaConfigImage:
+    description: The container image to use for the sahara config_volume
+    default: 'centos-binary-sahara-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SaharaEnginePuppetBase:
+    type: ../../puppet/services/sahara-engine.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sahara Engine role.
+    value:
+      service_name: {get_attr: [SaharaEnginePuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SaharaEnginePuppetBase, role_data, config_settings]
+          - sahara::sync_db: false
+      step_config: &step_config
+        get_attr: [SaharaEnginePuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [SaharaEnginePuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: sahara
+        puppet_tags: sahara_engine_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sahara-engine.json:
+          command: /usr/bin/sahara-engine --config-file /etc/sahara/sahara.conf
+          permissions:
+            - path: /var/lib/sahara
+              owner: sahara:sahara
+              recurse: true
+            - path: /var/log/sahara
+              owner: sahara:sahara
+              recurse: true
+      docker_config:
+        step_4:
+          sahara_engine:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
+            net: host
+            privileged: false
+            restart: always
+            volumes: &sahara_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/sahara-engine.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+                  - /var/lib/sahara:/var/lib/sahara
+                  - /var/log/containers/sahara:/var/log/sahara
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /var/lib/sahara
+          file:
+            path: /var/lib/sahara
+            state: directory
+        - name: create persistent sahara logs directory
+          file:
+            path: /var/log/containers/sahara
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable sahara_engine service
+          tags: step2
+          service: name=openstack-sahara-engine state=stopped enabled=no
diff --git a/docker/services/sensu-client.yaml b/docker/services/sensu-client.yaml
new file mode 100644 (file)
index 0000000..42b0c57
--- /dev/null
@@ -0,0 +1,148 @@
+heat_template_version: pike
+
+description: >
+  Containerized Sensu client service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSensuClientImage:
+    description: image
+    default: 'centos-binary-sensu-client:latest'
+    type: string
+  DockerSensuConfigImage:
+    description: The container image to use for the sensu config_volume
+    default: 'centos-binary-sensu-client:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  SensuDockerCheckCommand:
+    type: string
+    default: |
+      for i in $(docker ps --format '{{.ID}}'); do
+        if result=$(docker inspect --format='{{.State.Health.Status}}' $i 2>/dev/null); then
+          if [ "$result" != 'healthy' ]; then
+            echo "$(docker inspect --format='{{.Name}}' $i) ($i): $(docker inspect --format='{{json .State}}' $i)" && exit 2;
+          fi
+        fi
+      done
+  SensuDockerCheckInterval:
+    type: number
+    description: The frequency in seconds the docker health check is executed.
+    default: 10
+  SensuDockerCheckHandlers:
+    default: []
+    description: The Sensu event handler to use for events
+                 created by the docker health check.
+    type: comma_delimited_list
+  SensuDockerCheckOccurrences:
+    type: number
+    description: The number of event occurrences before sensu-plugin-aware handler should take action.
+    default: 3
+  SensuDockerCheckRefresh:
+    type: number
+    description: The number of seconds sensu-plugin-aware handlers should wait before taking second action.
+    default: 90
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SensuClientBase:
+    type: ../../puppet/services/monitoring/sensu-client.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sensu client role.
+    value:
+      service_name: {get_attr: [SensuClientBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SensuClientBase, role_data, config_settings]
+          - sensu::checks:
+              check-docker-health:
+                standalone: true
+                command: {get_param: SensuDockerCheckCommand}
+                interval: {get_param: SensuDockerCheckInterval}
+                handlers: {get_param: SensuDockerCheckHandlers}
+                occurrences: {get_param: SensuDockerCheckOccurrences}
+                refresh: {get_param: SensuDockerCheckRefresh}
+      step_config: &step_config
+        get_attr: [SensuClientBase, role_data, step_config]
+      service_config_settings: {get_attr: [SensuClientBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: sensu
+        puppet_tags:  sensu_rabbitmq_config,sensu_client_config,sensu_check_config,sensu_check
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSensuConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sensu-client.json:
+          command: /usr/bin/sensu-client -d /etc/sensu/conf.d/ -l /var/log/sensu/sensu-client.log
+          permissions:
+            - path: /var/log/sensu
+              owner: sensu:sensu
+              recurse: true
+      docker_config:
+        step_3:
+          sensu_client:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
+            net: host
+            privileged: true
+            # NOTE(mmagr) kolla image changes the user to 'sensu', we need it
+            # to be root have rw permission to docker.sock to run successfully
+            # "docker inspect" command
+            user: root
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/run/docker.sock:/var/run/docker.sock:rw
+                  - /var/lib/kolla/config_files/sensu-client.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/sensu/etc/sensu/:/etc/sensu/:ro
+                  - /var/log/containers/sensu:/var/log/sensu:rw
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/sensu
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable sensu-client service
+          tags: step2
+          service: name=sensu-client.service state=stopped enabled=no
diff --git a/docker/services/services.yaml b/docker/services/services.yaml
deleted file mode 100644 (file)
index 2ad3b63..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-heat_template_version: pike
-
-description: >
-  Utility stack to convert an array of services into a set of combined
-  role configs.
-
-parameters:
-  Services:
-    default: []
-    description: |
-        List nested stack service templates.
-    type: comma_delimited_list
-  ServiceNetMap:
-    default: {}
-    description: Mapping of service_name -> network name. Typically set
-                 via parameter_defaults in the resource registry.  This
-                 mapping overrides those in ServiceNetMapDefaults.
-    type: json
-  EndpointMap:
-    default: {}
-    description: Mapping of service endpoint -> protocol. Typically set
-                 via parameter_defaults in the resource registry.
-    type: json
-  DefaultPasswords:
-    default: {}
-    description: Mapping of service -> default password. Used to help
-                 pass top level passwords managed by Heat into services.
-    type: json
-  RoleName:
-    default: ''
-    description: Role name on which the service is applied
-    type: string
-  RoleParameters:
-    default: {}
-    description: Parameters specific to the role
-    type: json
-
-resources:
-
-  PuppetServices:
-    type: ../../puppet/services/services.yaml
-    properties:
-      Services: {get_param: Services}
-      ServiceNetMap: {get_param: ServiceNetMap}
-      EndpointMap: {get_param: EndpointMap}
-      DefaultPasswords: {get_param: DefaultPasswords}
-      RoleName: {get_param: RoleName}
-      RoleParameters: {get_param: RoleParameters}
-
-  ServiceChain:
-    type: OS::Heat::ResourceChain
-    properties:
-      resources: {get_param: Services}
-      concurrent: true
-      resource_properties:
-        ServiceNetMap: {get_param: ServiceNetMap}
-        EndpointMap: {get_param: EndpointMap}
-        DefaultPasswords: {get_param: DefaultPasswords}
-        RoleName: {get_param: RoleName}
-        RoleParameters: {get_param: RoleParameters}
-
-outputs:
-  role_data:
-    description: Combined Role data for this set of services.
-    value:
-      service_names:
-        {get_attr: [PuppetServices, role_data, service_names]}
-      monitoring_subscriptions:
-        {get_attr: [PuppetServices, role_data, monitoring_subscriptions]}
-      logging_sources:
-        {get_attr: [PuppetServices, role_data, logging_sources]}
-      logging_groups:
-        {get_attr: [PuppetServices, role_data, logging_groups]}
-      service_config_settings:
-        {get_attr: [PuppetServices, role_data, service_config_settings]}
-      config_settings:
-        {get_attr: [PuppetServices, role_data, config_settings]}
-      global_config_settings:
-        {get_attr: [PuppetServices, role_data, global_config_settings]}
-      step_config:
-        {get_attr: [ServiceChain, role_data, step_config]}
-      puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
-      kolla_config:
-        map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
-      docker_config:
-        {get_attr: [ServiceChain, role_data, docker_config]}
-      docker_puppet_tasks:
-        {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
-      host_prep_tasks:
-        yaql:
-          # Note we use distinct() here to filter any identical tasks
-          expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
-          data: {get_attr: [ServiceChain, role_data]}
-      upgrade_tasks:
-        yaql:
-          # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
-          expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
-          data: {get_attr: [ServiceChain, role_data]}
-      upgrade_batch_tasks:
-        yaql:
-          # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
-          expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
-          data: {get_attr: [ServiceChain, role_data]}
-      service_metadata_settings:
-        get_attr: [PuppetServices, role_data, service_metadata_settings]
index 04c4ba1..d7a7fe4 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-swift-proxy-server:latest'
     type: string
+  DockerSwiftConfigImage:
+    description: The container image to use for the swift config_volume
+    default: 'centos-binary-swift-proxy-server:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -70,10 +74,10 @@ outputs:
         config_volume: swift
         puppet_tags: swift_proxy_config
         step_config: *step_config
-        config_image: &swift_proxy_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/swift_proxy.json:
           command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
@@ -87,7 +91,10 @@ outputs:
         step_4:
           map_merge:
             - swift_proxy:
-                image: *swift_proxy_image
+                image: &swift_proxy_image
+                  list_join:
+                    - '/'
+                    - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
                 net: host
                 user: swift
                 restart: always
@@ -117,7 +124,9 @@ outputs:
                         - {get_attr: [ContainersCommon, volumes]}
                         -
                           - /var/lib/kolla/config_files/swift_proxy_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
-                          - /var/lib/config-data/swift/etc/httpd/:/etc/httpd/:ro
+                          - /var/lib/config-data/swift/etc/httpd/conf/:/etc/httpd/conf/:ro
+                          - /var/lib/config-data/swift/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                          - /var/lib/config-data/swift/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                           - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
                           - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
                     environment:
index bfd445d..00a772d 100644 (file)
@@ -8,8 +8,8 @@ parameters:
     description: namespace
     default: 'tripleoupstream'
     type: string
-  DockerSwiftProxyImage:
-    description: image
+  DockerSwiftConfigImage:
+    description: The container image to use for the swift config_volume
     default: 'centos-binary-swift-proxy-server:latest'
     type: string
   ServiceNetMap:
@@ -58,6 +58,14 @@ parameters:
     default: true
     description: 'Use a local directory for Swift storage services when building rings'
     type: boolean
+  SwiftRingGetTempurl:
+    default: ''
+    description: A temporary Swift URL to download rings from.
+    type: string
+  SwiftRingPutTempurl:
+    default: ''
+    description: A temporary Swift URL to upload rings to.
+    type: string
 
 resources:
 
@@ -75,18 +83,21 @@ outputs:
     description: Role data for Swift Ringbuilder configuration in containers.
     value:
       service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]}
-      config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+      config_settings:
+        map_merge:
+          - {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+          - tripleo::profile::base::swift::ringbuilder:skip_consistency_check: true
       step_config: &step_config
         get_attr: [SwiftRingbuilderBase, role_data, step_config]
       service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
         config_volume: 'swift'
-        puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
+        puppet_tags: exec,fetch_swift_ring_tarball,extract_swift_ring_tarball,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance,create_swift_ring_tarball,upload_swift_ring_tarball
         step_config: *step_config
         config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
       kolla_config: {}
       docker_config: {}
index 017fb12..011e6d4 100644 (file)
@@ -24,6 +24,10 @@ parameters:
     description: image
     default: 'centos-binary-swift-object:latest'
     type: string
+  DockerSwiftConfigImage:
+    description: The container image to use for the swift config_volume
+    default: 'centos-binary-swift-proxy-server:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -46,6 +50,11 @@ parameters:
                  via parameter_defaults in the resource registry.  This
                  mapping overrides those in ServiceNetMapDefaults.
     type: json
+  SwiftRawDisks:
+    default: {}
+    description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
+    type: json
+
 
 resources:
 
@@ -66,7 +75,11 @@ outputs:
     description: Role data for the swift storage services.
     value:
       service_name: {get_attr: [SwiftStorageBase, role_data, service_name]}
-      config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]}
+      config_settings:
+        map_merge:
+          - {get_attr: [SwiftStorageBase, role_data, config_settings]}
+          # FIXME (cschwede): re-enable this once checks works inside containers
+          - swift::storage::all::mount_check: false
       step_config: &step_config
         get_attr: [SwiftStorageBase, role_data, step_config]
       service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
@@ -75,10 +88,10 @@ outputs:
         config_volume: swift
         puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
         step_config: *step_config
-        config_image: &swift_proxy_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/swift_account_auditor.json:
           command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf
@@ -277,7 +290,10 @@ outputs:
                   - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_object_expirer:
-            image: *swift_proxy_image
+            image: &swift_proxy_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
             net: host
             user: swift
             restart: always
@@ -348,6 +364,18 @@ outputs:
           with_items:
             - /var/log/containers/swift
             - /srv/node
+        - name: Format and mount devices defined in SwiftRawDisks
+          mount:
+            name: /srv/node/{{ item }}
+            src: /dev/{{ item }}
+            fstype: xfs
+            opts: noatime
+            state: mounted
+          with_items:
+            - repeat:
+                template: 'DEVICE'
+                for_each:
+                  DEVICE: {get_param: SwiftRawDisks}
       upgrade_tasks:
         - name: Stop and disable swift storage services
           tags: step2
diff --git a/docker/services/tacker.yaml b/docker/services/tacker.yaml
new file mode 100644 (file)
index 0000000..84175c5
--- /dev/null
@@ -0,0 +1,133 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Tacker service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerTackerImage:
+    description: image
+    default: 'centos-binary-tacker:latest'
+    type: string
+  DockerTackerConfigImage:
+    description: The container image to use for the tacker config_volume
+    default: 'centos-binary-tacker:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  TackerBase:
+    type: ../../puppet/services/tacker.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Tacker role.
+    value:
+      service_name: {get_attr: [TackerBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [TackerBase, role_data, config_settings]
+      step_config: &step_config
+        get_attr: [TackerBase, role_data, step_config]
+      service_config_settings: {get_attr: [TackerBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: tacker
+        puppet_tags: tacker_config
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerTackerConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/tacker_api.json:
+          command: /usr/bin/tacker-server --config-file=/etc/tacker/tacker.conf --log-file=/var/log/tacker/api.log
+          permissions:
+            - path: /var/log/tacker
+              owner: tacker:tacker
+              recurse: true
+      docker_config:
+        # db sync runs before permissions set by kolla_config
+        step_2:
+          tacker_init_logs:
+            image: &tacker_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerTackerImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/tacker:/var/log/tacker
+            command: ['/bin/bash', '-c', 'chown -R tacker:tacker /var/log/tacker']
+        step_3:
+          tacker_db_sync:
+            image: *tacker_image
+            net: host
+            privileged: false
+            detach: false
+            user: root
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/tacker/etc/:/etc/:ro
+                  - /var/log/containers/tacker:/var/log/tacker
+            command: "/usr/bin/bootstrap_host_exec tacker su tacker -s /bin/bash -c 'tacker-db-manage --config-file /etc/tacker/tacker.conf upgrade head'"
+        step_4:
+          tacker_api:
+            image: *tacker_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/tacker_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/tacker/etc/tacker/:/etc/tacker/:ro
+                  - /var/log/containers/tacker:/var/log/tacker
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/tacker
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable tacker-server service
+          tags: step2
+          service: name=openstack-tacker-server state=stopped enabled=no
index 594df69..17524e5 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-zaqar:latest'
     type: string
+  DockerZaqarConfigImage:
+    description: The container image to use for the zaqar config_volume
+    default: 'centos-binary-zaqar:latest'
+    type: string
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -63,10 +67,10 @@ outputs:
         config_volume: zaqar
         puppet_tags: zaqar_config
         step_config: *step_config
-        config_image: &zaqar_image
+        config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerZaqarConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/zaqar.json:
           command: /usr/sbin/httpd -DFOREGROUND
@@ -79,7 +83,10 @@ outputs:
       docker_config:
         step_4:
           zaqar:
-            image: *zaqar_image
+            image: &zaqar_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
             net: host
             privileged: false
             restart: always
@@ -93,7 +100,9 @@ outputs:
                   - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
                   - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
                   - /var/lib/config-data/zaqar/var/www/:/var/www/:ro
-                  - /var/lib/config-data/zaqar/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/zaqar/etc/httpd/conf/:/etc/httpd/conf/:ro
+                  - /var/lib/config-data/zaqar/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+                  - /var/lib/config-data/zaqar/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
                   - /var/log/containers/zaqar:/var/log/zaqar
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
index 99e517b..681a2fe 100644 (file)
@@ -19,3 +19,4 @@ parameter_defaults:
   CinderDellScSecondarySanLogin: 'Admin'
   CinderDellScSecondarySanPassword: ''
   CinderDellScSecondaryScApiPort: 3033
+  CinderDellScExcludedDomainIp: ''
index dfd1589..83ecbbe 100644 (file)
@@ -1,3 +1,7 @@
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/storage/cinder-netapp-config.yaml
+# instead.
+# *************************************************************************************
 # A Heat environment file which can be used to enable a
 # a Cinder NetApp backend, configured via puppet
 resource_registry:
index e37f251..3ca0469 100644 (file)
@@ -35,13 +35,11 @@ resource_registry:
   OS::TripleO::PostDeploySteps: ../docker/post.yaml
   OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
 
-  OS::TripleO::Services: ../docker/services/services.yaml
-
 parameter_defaults:
-  # Defaults to 'tripleoupstream'.  Specify a local docker registry
-  # Example: 192.168.24.1:8787/tripleoupstream
-  DockerNamespace: tripleoupstream
-  DockerNamespaceIsRegistry: false
+  # To specify a local docker registry, enable these
+  # where 192.168.24.1 is the host running docker-distribution
+  #DockerNamespace: 192.168.24.1:8787/tripleoupstream
+  #DockerNamespaceIsRegistry: true
 
   ComputeServices:
     - OS::TripleO::Services::CACerts
@@ -50,3 +48,4 @@ parameter_defaults:
     - OS::TripleO::Services::NovaLibvirt
     - OS::TripleO::Services::ComputeNeutronOvsAgent
     - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Sshd
index cbd5b68..03713e8 100644 (file)
@@ -2,10 +2,11 @@ resource_registry:
   # This can be used when you don't want to run puppet on the host,
   # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
   # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
-  OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
   # The compute node still needs extra initialization steps
   OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
 
+  OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+
   #NOTE (dprince) add roles to be docker enabled as we support them
   OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
   OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
@@ -18,7 +19,9 @@ resource_registry:
   OS::TripleO::Services::NovaApi: ../docker/services/nova-api.yaml
   OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
   OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
+  OS::TripleO::Services::NovaConsoleauth: ../docker/services/nova-consoleauth.yaml
   OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
+  OS::TripleO::Services::NovaVncProxy: ../docker/services/nova-vnc-proxy.yaml
   OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
   OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
   OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
@@ -27,6 +30,7 @@ resource_registry:
   OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
   OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
   OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
+  OS::TripleO::Services::MySQLClient: ../docker/services/database/mysql-client.yaml
   OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
   OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
   OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
@@ -43,14 +47,21 @@ resource_registry:
   OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
   OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
   OS::TripleO::Services::CeilometerAgentCentral: ../docker/services/ceilometer-agent-central.yaml
+  OS::TripleO::Services::CeilometerAgentIpmi: ../docker/services/ceilometer-agent-ipmi.yaml
   OS::TripleO::Services::CeilometerAgentCompute: ../docker/services/ceilometer-agent-compute.yaml
   OS::TripleO::Services::CeilometerAgentNotification: ../docker/services/ceilometer-agent-notification.yaml
+  OS::TripleO::Services::Horizon: ../docker/services/horizon.yaml
+  OS::TripleO::Services::Iscsid: ../docker/services/iscsid.yaml
+  OS::TripleO::Services::Multipathd: ../docker/services/multipathd.yaml
+  # FIXME: Had to remove these to unblock containers CI. They should be put back when fixed.
+  # OS::TripleO::Services::CinderApi: ../docker/services/cinder-api.yaml
+  # OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
+  # OS::TripleO::Services::CinderBackup: ../docker/services/cinder-backup.yaml
+  # OS::TripleO::Services::CinderVolume: ../docker/services/cinder-volume.yaml
 
   OS::TripleO::PostDeploySteps: ../docker/post.yaml
   OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
 
-  OS::TripleO::Services: ../docker/services/services.yaml
-
 parameter_defaults:
   # To specify a local docker registry, enable these
   # where 192.168.24.1 is the host running docker-distribution
@@ -63,3 +74,4 @@ parameter_defaults:
     - OS::TripleO::Services::ComputeNeutronOvsAgent
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::CeilometerAgentCompute
+    - OS::TripleO::Services::Sshd
index 39ded65..175e1fd 100644 (file)
@@ -1,7 +1,11 @@
+# ********************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/enable-tls.yaml instead.
+# ********************************************************************************
 # Use this environment to pass in certificates for SSL deployments.
 # For these values to take effect, one of the tls-endpoints-*.yaml environments
 # must also be used.
 parameter_defaults:
+  HorizonSecureCookies: True
   SSLCertificate: |
     The contents of your certificate go here
   SSLIntermediateCertificate: ''
diff --git a/environments/host-config-and-reboot.j2.yaml b/environments/host-config-and-reboot.j2.yaml
new file mode 100644 (file)
index 0000000..d5f69ec
--- /dev/null
@@ -0,0 +1,18 @@
+resource_registry:
+{% for role in roles %}
+  OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/host_config_and_reboot.yaml
+{% endfor %}
+
+#parameter_defaults:
+  # Note: There are no global parameters which can be applied to all roles as
+  # these configuration have to be specific to role.
+
+  # Sample parameters for Compute and ComputeOvsDpdk roles
+  #ComputeParameters:
+    #KernelArgs: ""
+    #TunedProfileName: ""
+    #HostIsolatedCoreList: ""
+  #ComputeOvsDpdkParameters:
+    #KernelArgs: ""
+    #TunedProfileName: ""
+    #HostIsolatedCoreList: ""
diff --git a/environments/host-config-pre-network.j2.yaml b/environments/host-config-pre-network.j2.yaml
deleted file mode 100644 (file)
index c79e28b..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-resource_registry:
-# Create the registry only for roles with the word "Compute" in it. Like ComputeOvsDpdk, ComputeSriov, etc.,
-{%- for role in roles -%}
-{% if "Compute" in role.name %}
-  OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/{{role.name.lower()}}-host_config_and_reboot.yaml
-{%- endif -%}
-{% endfor %}
-
-#parameter_defaults:
-  # Sample parameters for Compute and ComputeOvsDpdk roles
-  #ComputeKernelArgs: ""
-  #ComputeTunedProfileName: ""
-  #ComputeHostCpuList: ""
-  #ComputeOvsDpdkKernelArgs: ""
-  #ComputeOvsDpdkTunedProfileName: ""
-  #ComputeOvsDpdkHostCpuList: ""
index 6fd7101..05a3a39 100644 (file)
@@ -19,6 +19,7 @@ parameter_defaults:
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::ComputeNeutronCorePlugin
     - OS::TripleO::Services::ComputeNeutronOvsAgent
+    - OS::TripleO::Services::NeutronLinuxbridgeAgent
     - OS::TripleO::Services::ComputeCeilometerAgent
     - OS::TripleO::Services::ComputeNeutronL3Agent
     - OS::TripleO::Services::ComputeNeutronMetadataAgent
@@ -35,3 +36,4 @@ parameter_defaults:
     - OS::TripleO::Services::NeutronVppAgent
     - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Iscsid
index b4908c1..95d2de9 100644 (file)
@@ -1,3 +1,7 @@
+# **************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/inject-trust-anchor-hiera.yaml
+# instead.
+# **************************************************************************************
 parameter_defaults:
   CAMap:
     first-ca-name:
index 3ecb0d2..1b0f706 100644 (file)
@@ -1,3 +1,7 @@
+# ********************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml
+# instead.
+# ********************************************************************************
 parameter_defaults:
   SSLRootCertificate: |
     The contents of your root CA certificate go here
diff --git a/environments/network-isolation.j2.yaml b/environments/network-isolation.j2.yaml
new file mode 100644 (file)
index 0000000..6a7318f
--- /dev/null
@@ -0,0 +1,37 @@
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+  {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+    {%- set _ = primary_role.pop() -%}
+    {%- set _ = primary_role.append(role) -%}
+  {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# Enable the creation of Neutron networks for isolated Overcloud
+# traffic and configure each role to assign ports (related
+# to that role) on these networks.
+# primary role is: {{primary_role_name}}
+resource_registry:
+  # networks as defined in network_data.yaml
+  {%- for network in networks if network.enabled|default(true) %}
+  OS::TripleO::Network::{{network.name}}: ../network/{{network.name_lower|default(network.name.lower())}}.yaml
+  {%- endfor %}
+
+  # Port assignments for the VIPs
+  {%- for network in networks if network.vip %}
+  OS::TripleO::Network::Ports::{{network.name}}VipPort: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+  {%- endfor %}
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+
+  OS::TripleO::{{primary_role_name}}::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+{%- for role in roles %}
+  # Port assignments for the {{role.name}}
+  {%- for network in networks %}
+    {%- if network.name in role.networks|default([]) and network.enabled|default(true) %}
+  OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+    {%- else %}
+  OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/noop.yaml
+    {%- endif %}
+  {%- endfor %}
+{%- endfor %}
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
deleted file mode 100644 (file)
index a6b4b8a..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# Enable the creation of Neutron networks for isolated Overcloud
-# traffic and configure each role to assign ports (related
-# to that role) on these networks.
-resource_registry:
-  OS::TripleO::Network::External: ../network/external.yaml
-  OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
-  OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
-  OS::TripleO::Network::Storage: ../network/storage.yaml
-  OS::TripleO::Network::Tenant: ../network/tenant.yaml
-  # Management network is optional and disabled by default.
-  # To enable it, include environments/network-management.yaml
-  #OS::TripleO::Network::Management: ../network/management.yaml
-
-  # Port assignments for the VIPs
-  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
-  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
-  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
-
-  # Port assignments for the controller role
-  OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
-  OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
-  #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the compute role
-  OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
-  #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the ceph storage role
-  OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the swift storage role
-  OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the block storage role
-  OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/networking/neutron-midonet.yaml b/environments/networking/neutron-midonet.yaml
new file mode 100644 (file)
index 0000000..ad8da8c
--- /dev/null
@@ -0,0 +1,66 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable the Neutron MidoNet Services
+# description: |
+#   A Heat environment that can be used to deploy MidoNet Services
+parameter_defaults:
+  # Native Transport Port
+  # Type: string
+  CassandraClientPort: 9042
+
+  # The port for the Thrift RPC service, which is used for client connections
+  # Type: string
+  CassandraClientPortThrift: 9160
+
+  # The SSL port for encrypted communication. Unused unless enabled in encryption_options
+  # Type: string
+  CassandraSslStoragePort: 7001
+
+  # The Cassandra port for inter-node communication
+  # Type: string
+  CassandraStoragePort: 7000
+
+  # Name of the tunnel zone used to tunnel packages
+  # Type: string
+  TunnelZoneName: tunnelzone_tripleo
+
+  # Type of the tunnels on the overlay. Choose between `gre` and `vxlan`
+  # Type: string
+  TunnelZoneType: vxlan
+
+  # ******************************************************
+  # Static parameters - these are values that must be
+  # included in the environment but should not be changed.
+  # ******************************************************
+  # Whether enable Cassandra cluster on Controller
+  # Type: boolean
+  EnableCassandraOnController: True
+
+  # Whether enable Zookeeper cluster on Controller
+  # Type: boolean
+  EnableZookeeperOnController: True
+
+  # The core plugin for Neutron. The value should be the entrypoint to be loaded
+  # from neutron.core_plugins namespace.
+  # Type: string
+  NeutronCorePlugin: midonet.neutron.plugin_v1.MidonetPluginV2
+
+  # If True, DHCP provide metadata route to VM.
+  # Type: boolean
+  NeutronEnableIsolatedMetadata: True
+
+  # *********************
+  # End static parameters
+  # *********************
+resource_registry:
+  OS::TripleO::AllNodesExtraConfig: ../../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+  OS::TripleO::Controller::Net::SoftwareConfig: ../../net-config-linux-bridge.yaml
+  OS::TripleO::Services::ComputeNeutronCorePlugin: ../../puppet/services/neutron-compute-plugin-midonet.yaml
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet
+  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
diff --git a/environments/neutron-bgpvpn-opendaylight.yaml b/environments/neutron-bgpvpn-opendaylight.yaml
new file mode 100644 (file)
index 0000000..1d2e077
--- /dev/null
@@ -0,0 +1,12 @@
+# A Heat environment file that can be used to deploy Neutron BGPVPN service
+#
+#  This environment file deploys Neutron BGPVPN service and configures
+#  Opendaylight as its service provider.
+#
+# - OpenDaylight: BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default
+resource_registry:
+  OS::TripleO::Services::NeutronBgpVpnApi: ../puppet/services/neutron-bgpvpn-api.yaml
+
+parameter_defaults:
+  NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+  BgpvpnServiceProvider: 'BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default'
diff --git a/environments/neutron-linuxbridge.yaml b/environments/neutron-linuxbridge.yaml
new file mode 100644 (file)
index 0000000..c8045cc
--- /dev/null
@@ -0,0 +1,8 @@
+## A Heat environment that can be used to deploy linuxbridge
+resource_registry:
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronLinuxbridgeAgent: ../puppet/services/neutron-linuxbridge-agent.yaml
+
+parameter_defaults:
+  NeutronMechanismDrivers: ['linuxbridge']
index c120d0b..64cea2a 100644 (file)
@@ -1,3 +1,7 @@
+# ******************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/networking/neutron-midonet
+# instead.
+# ******************************************************************************
 # A Heat environment that can be used to deploy MidoNet Services
 resource_registry:
   OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
diff --git a/environments/neutron-ml2-ovn-ha.yaml b/environments/neutron-ml2-ovn-ha.yaml
new file mode 100644 (file)
index 0000000..c592d57
--- /dev/null
@@ -0,0 +1,24 @@
+# A Heat environment file which can be used to enable OVN
+# extensions, configured via puppet
+resource_registry:
+  OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2OVN
+  OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-ovn.yaml
+  OS::TripleO::Services::OVNDBs: ../puppet/services/pacemaker/ovn-dbs.yaml
+# Disabling Neutron services that overlap with OVN
+  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+
+parameter_defaults:
+  NeutronMechanismDrivers: ovn
+  OVNVifType: ovs
+  OVNNeutronSyncMode: log
+  OVNQosDriver: ovn-qos
+  OVNTunnelEncapType: geneve
+  NeutronEnableDHCPAgent: false
+  NeutronTypeDrivers: 'geneve,vxlan,vlan,flat'
+  NeutronNetworkType: 'geneve'
+  NeutronServicePlugins: 'qos,ovn-router'
+  NeutronVniRanges: ['1:65536', ]
diff --git a/environments/neutron-opendaylight-dpdk.yaml b/environments/neutron-opendaylight-dpdk.yaml
new file mode 100644 (file)
index 0000000..9ee4eb7
--- /dev/null
@@ -0,0 +1,37 @@
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK
+resource_registry:
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+  OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
+  OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
+  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+
+parameter_defaults:
+  NeutronEnableForceMetadata: true
+  NeutronMechanismDrivers: 'opendaylight_v2'
+  NeutronServicePlugins: 'odl-router_v2'
+  NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+  ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+  ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+  ## This can be done using ComputeKernelArgs as shown below.
+  ComputeParameters:
+    #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+    ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+    ## due to CPU contention of DPDK PMD threads.
+    OvsEnableDpdk: True
+    ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+    #OvsDpdkSocketMemory: ""       # Sets the amount of hugepage memory to assign per NUMA node.
+                                   # It is recommended to use the socket closest to the PCIe slot used for the
+                                   # desired DPDK NIC.  Format should be comma separated per socket string such as:
+                                   # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+    #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+    #OvsPmdCoreList: ""            # List or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+                                   # location to cores on socket, number of hyper-threaded logical cores, and
+                                   # desired number of PMD threads can all play a role in configuring this setting.
+                                   # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+                                   # If using hyperthreading then specify both logical cores that would equal the
+                                   # physical core.  Also, specifying more than one core will trigger multiple PMD
+                                   # threads to be spawned, which may improve dataplane performance.
+    #NovaVcpuPinSet: ""            # Cores to pin Nova instances to.  For maximum performance, select cores
+                                   # on the same NUMA node(s) selected for previous settings.
index 004b8ac..ecfd0fe 100644 (file)
@@ -1,18 +1,31 @@
-## A Heat environment that can be used to deploy DPDK with OVS
+# A Heat environment that can be used to deploy DPDK with OVS
+# Deploying DPDK requires enabling hugepages for the overcloud nodes
 resource_registry:
   OS::TripleO::Services::ComputeNeutronOvsAgent: ../puppet/services/neutron-ovs-dpdk-agent.yaml
 
 parameter_defaults:
-  ## NeutronDpdkCoreList and NeutronDpdkMemoryChannels are REQUIRED settings.
-  ## Attempting to deploy DPDK without appropriate values will cause deployment to fail or lead to unstable deployments.
-  #NeutronDpdkCoreList: ""
-  #NeutronDpdkMemoryChannels: ""
-
   NeutronDatapathType: "netdev"
-  NeutronVhostuserSocketDir: "/var/run/openvswitch"
-
-  #NeutronDpdkSocketMemory: ""
-  #NeutronDpdkDriverType: "vfio-pci"
-  #NovaReservedHostMemory: 4096
-  #NovaVcpuPinSet: ""
-
+  NeutronVhostuserSocketDir: "/var/lib/vhost_sockets"
+  NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+  ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+  ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+  ## This can be done using ComputeKernelArgs as shown below.
+  #ComputeParameters:
+    #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+    ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+    ## due to CPU contention of DPDK PMD threads.
+    ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+    #OvsDpdkSocketMemory: ""       # Sets the amount of hugepage memory to assign per NUMA node.
+                                   # It is recommended to use the socket closest to the PCIe slot used for the
+                                   # desired DPDK NIC.  Format should be comma separated per socket string such as:
+                                   # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+    #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+    #OvsPmdCoreList: ""            # List or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+                                   # location to cores on socket, number of hyper-threaded logical cores, and
+                                   # desired number of PMD threads can all play a role in configuring this setting.
+                                   # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+                                   # If using hyperthreading then specify both logical cores that would equal the
+                                   # physical core.  Also, specifying more than one core will trigger multiple PMD
+                                   # threads to be spawned, which may improve dataplane performance.
+    #NovaVcpuPinSet: ""            # Cores to pin Nova instances to.  For maximum performance, select cores
+                                   # on the same NUMA node(s) selected for previous settings.
diff --git a/environments/nonha-arch.yaml b/environments/nonha-arch.yaml
new file mode 100644 (file)
index 0000000..7fdcc10
--- /dev/null
@@ -0,0 +1,16 @@
+# An environment which creates an Overcloud without the use of pacemaker
+# (i.e. only with keepalived and systemd for all resources)
+resource_registry:
+  OS::TripleO::Tasks::ControllerPreConfig:  OS::Heat::None
+  OS::TripleO::Tasks::ControllerPostConfig:  OS::Heat::None
+  OS::TripleO::Tasks::ControllerPostPuppetRestart:  OS::Heat::None
+
+  OS::TripleO::Services::CinderVolume: ../puppet/services/cinder-volume.yaml
+  OS::TripleO::Services::RabbitMQ: ../puppet/services/rabbitmq.yaml
+  OS::TripleO::Services::HAproxy: ../puppet/services/haproxy.yaml
+  OS::TripleO::Services::Redis: ../puppet/services/database/redis.yaml
+  OS::TripleO::Services::MySQL: ../puppet/services/database/mysql.yaml
+  OS::TripleO::Services::Keepalived: OS::Heat::None
+  OS::TripleO::Services::Pacemaker: OS::Heat::None
+  OS::TripleO::Services::PacemakerRemote: OS::Heat::None
+
diff --git a/environments/overcloud-baremetal.j2.yaml b/environments/overcloud-baremetal.j2.yaml
new file mode 100644 (file)
index 0000000..668e28d
--- /dev/null
@@ -0,0 +1,19 @@
+resource_registry:
+  OS::TripleO::AllNodes::SoftwareConfig: OS::Heat::None
+  OS::TripleO::PostDeploySteps: OS::Heat::None
+  OS::TripleO::DefaultPasswords: OS::Heat::None
+  OS::TripleO::RandomString: OS::Heat::None
+  OS::TripleO::AllNodesDeployment: OS::Heat::None
+
+parameter_defaults:
+  # Deploy no services
+{% for role in roles %}
+  {{role.name}}Services: []
+{% endfor %}
+
+  # Consistent Hostname format
+  ControllerHostnameFormat: overcloud-controller-%index%
+  ComputeHostnameFormat: overcloud-novacompute-%index%
+  ObjectStorageHostnameFormat: overcloud-objectstorage-%index%
+  CephStorageHostnameFormat: overcloud-cephstorage-%index%
+  BlockStorageHostnameFormat: overcloud-blockstorage-%index%
diff --git a/environments/overcloud-services.yaml b/environments/overcloud-services.yaml
new file mode 100644 (file)
index 0000000..c409b89
--- /dev/null
@@ -0,0 +1,7 @@
+parameter_defaults:
+  # Consistent Hostname format
+  ControllerDeployedServerHostnameFormat: overcloud-controller-%index%
+  ComputeDeployedServerHostnameFormat: overcloud-novacompute-%index%
+  ObjectStorageDeployedServerHostnameFormat: overcloud-objectstorage-%index%
+  CephStorageDeployedServerHostnameFormat: overcloud-cephstorage-%index%
+  BlockStorageDeployedServerHostnameFormat: overcloud-blockstorage-%index%
diff --git a/environments/predictable-placement/custom-hostnames.yaml b/environments/predictable-placement/custom-hostnames.yaml
new file mode 100644 (file)
index 0000000..0d9d520
--- /dev/null
@@ -0,0 +1,33 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Custom Hostnames
+# description: |
+#   Hostname format for each role
+#   Note %index% is translated into the index of the node, e.g 0/1/2 etc
+#   and %stackname% is replaced with OS::stack_name in the template below.
+#   If you want to use the heat generated names, pass '' (empty string).
+parameter_defaults:
+  # Format for BlockStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+  # Type: string
+  BlockStorageHostnameFormat: '%stackname%-blockstorage-%index%'
+
+  # Format for CephStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+  # Type: string
+  CephStorageHostnameFormat: '%stackname%-cephstorage-%index%'
+
+  # Format for Compute node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+  # Type: string
+  ComputeHostnameFormat: '%stackname%-novacompute-%index%'
+
+  # Format for Controller node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+  # Type: string
+  ControllerHostnameFormat: '%stackname%-controller-%index%'
+
+  # Format for ObjectStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+  # Type: string
+  ObjectStorageHostnameFormat: '%stackname%-objectstorage-%index%'
+
index 8fc4bf2..6a69914 100644 (file)
@@ -20,5 +20,5 @@ parameter_defaults:
   GlanceBackend: rbd
   GnocchiBackend: rbd
   CinderEnableIscsiBackend: false
-  CephPoolDefaultSite: 1
+  CephPoolDefaultSize: 1
 
index 5f8b02a..2f577c2 100644 (file)
@@ -1,3 +1,7 @@
+# ******************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/storage/ceph-external.yaml
+# instead.
+# ******************************************************************************
 # A Heat environment file which can be used to enable the
 # use of an externally managed Ceph cluster.
 resource_registry:
diff --git a/environments/services-docker/collectd.yaml b/environments/services-docker/collectd.yaml
new file mode 100644 (file)
index 0000000..1623a88
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::Collectd: ../../docker/services/collectd.yaml
diff --git a/environments/services-docker/congress.yaml b/environments/services-docker/congress.yaml
new file mode 100644 (file)
index 0000000..5d4c730
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::Congress: ../../docker/services/congress-api.yaml
diff --git a/environments/services-docker/ec2-api.yaml b/environments/services-docker/ec2-api.yaml
new file mode 100644 (file)
index 0000000..24cbb03
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
diff --git a/environments/services-docker/manila.yaml b/environments/services-docker/manila.yaml
new file mode 100644 (file)
index 0000000..795309f
--- /dev/null
@@ -0,0 +1,3 @@
+resource_registry:
+  OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+  OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
diff --git a/environments/services-docker/octavia.yaml b/environments/services-docker/octavia.yaml
new file mode 100644 (file)
index 0000000..b677a4f
--- /dev/null
@@ -0,0 +1,5 @@
+resource_registry:
+  OS::TripleO::Services::OctaviaApi: ../../docker/services/octavia-api.yaml
+  OS::TripleO::Services::OctaviaHousekeeping: ../../docker/services/octavia-housekeeping.yaml
+  OS::TripleO::Services::OctaviaHealthManager: ../../docker/services/octavia-health-manager.yaml
+  OS::TripleO::Services::OctaviaWorker: ../../docker/services/octavia-worker.yaml
diff --git a/environments/services-docker/sahara.yaml b/environments/services-docker/sahara.yaml
new file mode 100644 (file)
index 0000000..d0bf9fe
--- /dev/null
@@ -0,0 +1,3 @@
+resource_registry:
+  OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+  OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
diff --git a/environments/services-docker/sensu-client.yaml b/environments/services-docker/sensu-client.yaml
new file mode 100644 (file)
index 0000000..c03104d
--- /dev/null
@@ -0,0 +1,3 @@
+
+resource_registry:
+  OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
diff --git a/environments/services-docker/tacker.yaml b/environments/services-docker/tacker.yaml
new file mode 100644 (file)
index 0000000..cba8d6b
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::Tacker: ../../docker/services/tacker.yaml
index 07a61c2..ca55b4d 100644 (file)
@@ -1,3 +1,4 @@
 resource_registry:
   OS::TripleO::Services::UndercloudCeilometerAgentCentral: ../../docker/services/ceilometer-agent-central.yaml
   OS::TripleO::Services::UndercloudCeilometerAgentNotification: ../../docker/services/ceilometer-agent-notification.yaml
+  OS::TripleO::Services::UndercloudCeilometerAgentIpmi: ../../docker/services/ceilometer-agent-ipmi.yaml
index b131738..b81b026 100644 (file)
@@ -1,5 +1,6 @@
 resource_registry:
   OS::TripleO::Services::IronicApi: ../../puppet/services/ironic-api.yaml
   OS::TripleO::Services::IronicConductor: ../../puppet/services/ironic-conductor.yaml
-  OS::TripleO::Services::IronicPxe: ../../puppet/services/ironic-pxe.yaml
   OS::TripleO::Services::NovaIronic: ../../puppet/services/nova-ironic.yaml
+parameter_defaults:
+  NovaSchedulerDiscoverHostsInCellsInterval: 15
diff --git a/environments/ssl/enable-tls.yaml b/environments/ssl/enable-tls.yaml
new file mode 100644 (file)
index 0000000..c8ed2bd
--- /dev/null
@@ -0,0 +1,41 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable SSL on OpenStack Public Endpoints
+# description: |
+#   Use this environment to pass in certificates for SSL deployments.
+#   For these values to take effect, one of the tls-endpoints-*.yaml environments
+#   must also be used.
+parameter_defaults:
+  # The content of the SSL certificate (without Key) in PEM format.
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  SSLCertificate: |
+    The contents of your certificate go here
+
+  # The content of an SSL intermediate CA certificate in PEM format.
+  # Type: string
+  SSLIntermediateCertificate: ''
+
+  # The content of the SSL Key in PEM format.
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  SSLKey: |
+    The contents of the private key go here
+
+  # ******************************************************
+  # Static parameters - these are values that must be
+  # included in the environment but should not be changed.
+  # ******************************************************
+  # The filepath of the certificate as it will be stored in the controller.
+  # Type: string
+  DeployedSSLCertificatePath: /etc/pki/tls/private/overcloud_endpoint.pem
+
+  # *********************
+  # End static parameters
+  # *********************
+resource_registry:
+  OS::TripleO::NodeTLSData: ../../puppet/extraconfig/tls/tls-cert-inject.yaml
diff --git a/environments/ssl/inject-trust-anchor-hiera.yaml b/environments/ssl/inject-trust-anchor-hiera.yaml
new file mode 100644 (file)
index 0000000..db3f267
--- /dev/null
@@ -0,0 +1,22 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Inject SSL Trust Anchor on Overcloud Nodes
+# description: |
+#   When using an SSL certificate signed by a CA that is not in the default
+#   list of CAs, this environment allows adding a custom CA certificate to
+#   the overcloud nodes.
+parameter_defaults:
+  # Map containing the CA certs and information needed for deploying them.
+  # Type: json
+  CAMap:
+    first-ca-name:
+      content: |
+        The content of the CA cert goes here
+    second-ca-name:
+      content: |
+        The content of the CA cert goes here
+
diff --git a/environments/ssl/inject-trust-anchor.yaml b/environments/ssl/inject-trust-anchor.yaml
new file mode 100644 (file)
index 0000000..521a419
--- /dev/null
@@ -0,0 +1,20 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Inject SSL Trust Anchor on Overcloud Nodes
+# description: |
+#   When using an SSL certificate signed by a CA that is not in the default
+#   list of CAs, this environment allows adding a custom CA certificate to
+#   the overcloud nodes.
+parameter_defaults:
+  # The content of a CA's SSL certificate file in PEM format. This is evaluated on the client side.
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  SSLRootCertificate: |
+    The contents of your certificate go here
+
+resource_registry:
+  OS::TripleO::NodeTLSCAData: ../../puppet/extraconfig/tls/ca-inject.yaml
diff --git a/environments/ssl/tls-endpoints-public-dns.yaml b/environments/ssl/tls-endpoints-public-dns.yaml
new file mode 100644 (file)
index 0000000..216afec
--- /dev/null
@@ -0,0 +1,131 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Public SSL Endpoints as DNS Names
+# description: |
+#   Use this environment when deploying an SSL-enabled overcloud where the public
+#   endpoint is a DNS name.
+parameter_defaults:
+  # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+  # Type: json
+  EndpointMap:
+    AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+    AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+    AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+    BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+    BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+    BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+    CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+    CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+    CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+    CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+    CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+    CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+    CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+    CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+    CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+    CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+    ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+    Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+    Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+    GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+    GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+    GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+    GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+    GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+    GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+    HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+    HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+    HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+    HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+    HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+    HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+    HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+    IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+    IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+    IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+    IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+    IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+    IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+    KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+    KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+    KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+    ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+    ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+    ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+    MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+    MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+    MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+    MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+    NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+    NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+    NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+    NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+    NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+    NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+    NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+    NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+    NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+    NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+    NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+    NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+    OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+    OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+    OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+    PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+    PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+    PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+    SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+    SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+    SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+    SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+    TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+    TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+    TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+    ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+    ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+    ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+    ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+    ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+    ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+
diff --git a/environments/ssl/tls-endpoints-public-ip.yaml b/environments/ssl/tls-endpoints-public-ip.yaml
new file mode 100644 (file)
index 0000000..d216ab7
--- /dev/null
@@ -0,0 +1,131 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Public SSL Endpoints as IP Addresses
+# description: |
+#   Use this environment when deploying an SSL-enabled overcloud where the public
+#   endpoint is an IP address.
+parameter_defaults:
+  # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+  # Type: json
+  EndpointMap:
+    AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+    AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+    AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'}
+    BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+    BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+    BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'}
+    CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+    CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+    CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
+    CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    CephRgwPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+    CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+    CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+    CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
+    CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+    CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+    CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'}
+    ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+    Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+    Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
+    GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+    GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+    GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'}
+    GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+    GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+    GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'}
+    HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+    HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+    HeatPublic: {protocol: 'https', port: '13004', host: 'IP_ADDRESS'}
+    HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+    HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+    HeatCfnPublic: {protocol: 'https', port: '13005', host: 'IP_ADDRESS'}
+    HorizonPublic: {protocol: 'https', port: '443', host: 'IP_ADDRESS'}
+    IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+    IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+    IronicPublic: {protocol: 'https', port: '13385', host: 'IP_ADDRESS'}
+    IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+    IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+    IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'IP_ADDRESS'}
+    KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+    KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+    KeystonePublic: {protocol: 'https', port: '13000', host: 'IP_ADDRESS'}
+    ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+    ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+    ManilaPublic: {protocol: 'https', port: '13786', host: 'IP_ADDRESS'}
+    MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+    MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+    MistralPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+    MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+    NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+    NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+    NeutronPublic: {protocol: 'https', port: '13696', host: 'IP_ADDRESS'}
+    NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+    NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+    NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'}
+    NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+    NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+    NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'}
+    NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+    NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+    NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'}
+    OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+    OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+    OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
+    PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+    PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+    PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+    SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+    SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+    SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
+    SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+    TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+    TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+    TackerPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+    ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+    ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+    ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
+    ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+    ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+    ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
+
diff --git a/environments/ssl/tls-everywhere-endpoints-dns.yaml b/environments/ssl/tls-everywhere-endpoints-dns.yaml
new file mode 100644 (file)
index 0000000..63157dd
--- /dev/null
@@ -0,0 +1,131 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy All SSL Endpoints as DNS Names
+# description: |
+#   Use this environment when deploying an overcloud where all the endpoints are
+#   DNS names and there's TLS in all endpoint types.
+parameter_defaults:
+  # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+  # Type: json
+  EndpointMap:
+    AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+    AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+    AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+    BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+    BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+    BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+    CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+    CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+    CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+    CephRgwAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+    CephRgwInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+    CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+    CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+    CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+    CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+    CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+    CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+    CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+    ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+    host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+    ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+    ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+    Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+    Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+    Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+    GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+    GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+    GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+    GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+    GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+    GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+    HeatAdmin: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+    HeatInternal: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+    HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+    HeatCfnAdmin: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+    HeatCfnInternal: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+    HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+    HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+    IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+    IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+    IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+    IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+    IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+    IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+    KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
+    KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
+    KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+    ManilaAdmin: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+    ManilaInternal: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+    ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+    MistralAdmin: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+    MistralInternal: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+    MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+    MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'CLOUDNAME'}
+    NeutronAdmin: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+    NeutronInternal: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+    NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+    NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+    NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+    NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+    NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+    NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+    NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+    NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+    NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+    NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+    OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+    OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+    OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+    PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+    PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+    PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+    SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+    SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+    SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+    SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+    SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+    SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+    TackerAdmin: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+    TackerInternal: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+    TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+    ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+    ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+    ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+    ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+    ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+    ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+
diff --git a/environments/storage/cinder-netapp-config.yaml b/environments/storage/cinder-netapp-config.yaml
new file mode 100644 (file)
index 0000000..4cdba09
--- /dev/null
@@ -0,0 +1,119 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable the Cinder NetApp Backend
+# description: |
+#   A Heat environment file which can be used to enable a
+#   a Cinder NetApp backend, configured via puppet
+parameter_defaults:
+  # 
+  # Type: string
+  CinderNetappBackendName: tripleo_netapp
+
+  # 
+  # Type: string
+  CinderNetappControllerIps: ''
+
+  # 
+  # Type: string
+  CinderNetappCopyOffloadToolPath: ''
+
+  # 
+  # Type: string
+  CinderNetappEseriesHostType: linux_dm_mp
+
+  # 
+  # Type: string
+  CinderNetappHostType: ''
+
+  # 
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  CinderNetappLogin: <None>
+
+  # 
+  # Type: string
+  CinderNetappNfsMountOptions: ''
+
+  # 
+  # Type: string
+  CinderNetappNfsShares: ''
+
+  # 
+  # Type: string
+  CinderNetappNfsSharesConfig: /etc/cinder/shares.conf
+
+  # 
+  # Type: string
+  CinderNetappPartnerBackendName: ''
+
+  # 
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  CinderNetappPassword: <None>
+
+  # 
+  # Type: string
+  CinderNetappSaPassword: ''
+
+  # 
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  CinderNetappServerHostname: <None>
+
+  # 
+  # Type: string
+  CinderNetappServerPort: 80
+
+  # 
+  # Type: string
+  CinderNetappSizeMultiplier: 1.2
+
+  # 
+  # Type: string
+  CinderNetappStorageFamily: ontap_cluster
+
+  # 
+  # Type: string
+  CinderNetappStoragePools: ''
+
+  # 
+  # Type: string
+  CinderNetappStorageProtocol: nfs
+
+  # 
+  # Type: string
+  CinderNetappTransportType: http
+
+  # 
+  # Type: string
+  CinderNetappVfiler: ''
+
+  # 
+  # Type: string
+  CinderNetappVolumeList: ''
+
+  # 
+  # Type: string
+  CinderNetappVserver: ''
+
+  # 
+  # Type: string
+  CinderNetappWebservicePath: /devmgr/v2
+
+  # ******************************************************
+  # Static parameters - these are values that must be
+  # included in the environment but should not be changed.
+  # ******************************************************
+  # 
+  # Type: boolean
+  CinderEnableNetappBackend: True
+
+  # *********************
+  # End static parameters
+  # *********************
+resource_registry:
+  OS::TripleO::ControllerExtraConfigPre: ../../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
diff --git a/environments/storage/cinder-nfs.yaml b/environments/storage/cinder-nfs.yaml
new file mode 100644 (file)
index 0000000..2de3e78
--- /dev/null
@@ -0,0 +1,27 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Cinder NFS Backend
+# description: |
+#   Configure and include this environment to enable the use of an NFS
+#   share as the backend for Cinder.
+parameter_defaults:
+  # Whether to enable or not the Iscsi backend for Cinder
+  # Type: boolean
+  CinderEnableIscsiBackend: False
+
+  # Whether to enable or not the NFS backend for Cinder
+  # Type: boolean
+  CinderEnableNfsBackend: True
+
+  # Mount options for NFS mounts used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true.
+  # Type: string
+  CinderNfsMountOptions: ''
+
+  # NFS servers used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true.
+  # Type: comma_delimited_list
+  CinderNfsServers: 192.168.122.1:/export/cinder
+
diff --git a/environments/storage/enable-ceph.yaml b/environments/storage/enable-ceph.yaml
new file mode 100644 (file)
index 0000000..c629f74
--- /dev/null
@@ -0,0 +1,35 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Ceph Storage Backend
+# description: |
+#   Include this environment to enable Ceph as the backend for
+#   Cinder, Nova, Gnocchi, and Glance.
+parameter_defaults:
+  # The short name of the Cinder Backup backend to use.
+  # Type: string
+  CinderBackupBackend: rbd
+
+  # Whether to enable or not the Iscsi backend for Cinder
+  # Type: boolean
+  CinderEnableIscsiBackend: False
+
+  # Whether to enable or not the Rbd backend for Cinder
+  # Type: boolean
+  CinderEnableRbdBackend: True
+
+  # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+  # Type: string
+  GlanceBackend: rbd
+
+  # The short name of the Gnocchi backend to use. Should be one of swift, rbd, or file
+  # Type: string
+  GnocchiBackend: rbd
+
+  # Whether to enable or not the Rbd backend for Nova
+  # Type: boolean
+  NovaEnableRbdBackend: True
+
diff --git a/environments/storage/external-ceph.yaml b/environments/storage/external-ceph.yaml
new file mode 100644 (file)
index 0000000..f1c9d51
--- /dev/null
@@ -0,0 +1,78 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Using an External Ceph Cluster
+# description: |
+#   A Heat environment file which can be used to enable the
+#   use of an externally managed Ceph cluster.
+parameter_defaults:
+  # The Ceph admin client key. Can be created with ceph-authtool --gen-print-key.
+  # Type: string
+  CephAdminKey: ''
+
+  # The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  CephClientKey: <None>
+
+  # 
+  # Type: string
+  CephClientUserName: openstack
+
+  # The Ceph cluster FSID. Must be a UUID.
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  CephClusterFSID: <None>
+
+  # List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
+  # Type: string
+  CephExternalMonHost: ''
+
+  # Whether to enable or not the Iscsi backend for Cinder
+  # Type: boolean
+  CinderEnableIscsiBackend: False
+
+  # Whether to enable or not the Rbd backend for Cinder
+  # Type: boolean
+  CinderEnableRbdBackend: True
+
+  # 
+  # Type: string
+  CinderRbdPoolName: volumes
+
+  # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+  # Type: string
+  GlanceBackend: rbd
+
+  # 
+  # Type: string
+  GlanceRbdPoolName: images
+
+  # The short name of the Gnocchi backend to use. Should be one of swift, rbd, or file
+  # Type: string
+  GnocchiBackend: rbd
+
+  # 
+  # Type: string
+  GnocchiRbdPoolName: metrics
+
+  # Whether to enable or not the Rbd backend for Nova
+  # Type: boolean
+  NovaEnableRbdBackend: True
+
+  # 
+  # Type: string
+  NovaRbdPoolName: vms
+
+  # The default features enabled when creating a block device image. Only applies to format 2 images. Set to '1' for Jewel clients using older Ceph servers.
+  # Type: string
+  RbdDefaultFeatures: ''
+
+resource_registry:
+  OS::TripleO::Services::CephClient: OS::Heat::None
+  OS::TripleO::Services::CephExternal: ../../puppet/services/ceph-external.yaml
+  OS::TripleO::Services::CephMon: OS::Heat::None
+  OS::TripleO::Services::CephOSD: OS::Heat::None
diff --git a/environments/storage/glance-nfs.yaml b/environments/storage/glance-nfs.yaml
new file mode 100644 (file)
index 0000000..3c13930
--- /dev/null
@@ -0,0 +1,34 @@
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Glance NFS Backend
+# description: |
+#   Configure and include this environment to enable the use of an NFS
+#   share as the backend for Glance.
+parameter_defaults:
+  # NFS mount options for image storage (when GlanceNfsEnabled is true)
+  # Type: string
+  GlanceNfsOptions: intr,context=system_u:object_r:glance_var_lib_t:s0
+
+  # NFS share to mount for image storage (when GlanceNfsEnabled is true)
+  # Type: string
+  GlanceNfsShare: ''
+
+  # ******************************************************
+  # Static parameters - these are values that must be
+  # included in the environment but should not be changed.
+  # ******************************************************
+  # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+  # Type: string
+  GlanceBackend: file
+
+  # When using GlanceBackend 'file', mount NFS share for image storage.
+  # Type: boolean
+  GlanceNfsEnabled: True
+
+  # *********************
+  # End static parameters
+  # *********************
index 4443221..83b3249 100644 (file)
@@ -1,3 +1,7 @@
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/tls-endpoints-public-dns.yaml
+# instead.
+# *************************************************************************************
 # Use this environment when deploying an SSL-enabled overcloud where the public
 # endpoint is a DNS name.
 parameter_defaults:
@@ -100,9 +104,9 @@ parameter_defaults:
     OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
     OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
     OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
-    PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
-    PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
-    PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+    PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+    PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+    PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'}
     SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
index 5ac2918..8e50297 100644 (file)
@@ -1,3 +1,7 @@
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/tls-endpoints-public-ip.yaml
+# instead.
+# *************************************************************************************
 # Use this environment when deploying an SSL-enabled overcloud where the public
 # endpoint is an IP address.
 parameter_defaults:
@@ -100,9 +104,9 @@ parameter_defaults:
     OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
     OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
     OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
-    PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
-    PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
-    PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+    PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+    PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+    PankoPublic: {protocol: 'https', port: '13977', host: 'IP_ADDRESS'}
     SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
index 865ed4c..84cabf1 100644 (file)
@@ -72,8 +72,8 @@ parameter_defaults:
     IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
     IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
     IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
-    IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
-    IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+    IronicInspectorAdmin: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
+    IronicInspectorInternal: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
     IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
     KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
     KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
@@ -100,9 +100,9 @@ parameter_defaults:
     OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
     OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
     OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
-    PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
-    PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
-    PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+    PankoAdmin: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
+    PankoInternal: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
+    PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'}
     SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
     SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
     SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
index 7a2716d..559d81d 100644 (file)
@@ -18,3 +18,5 @@ parameter_defaults:
   HeatConvergenceEngine: false
   HeatMaxResourcesPerStack: -1
   HeatMaxJsonBodySize: 2097152
+  IronicInspectorInterface: br-ctlplane
+  IronicInspectorIpRange: '192.168.24.100,192.168.24.200'
index 8bcae1d..3c508d1 100755 (executable)
@@ -45,57 +45,61 @@ if ! grep "$(cat /root/.ssh/id_rsa.pub)" /root/.ssh/authorized_keys; then
     cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
 fi
 
-PHYSICAL_NETWORK=ctlplane
-
-ctlplane_id=$(openstack network list -f csv -c ID -c Name --quote none | tail -n +2 | grep ctlplane | cut -d, -f1)
-subnet_ids=$(openstack subnet list -f csv -c ID --quote none | tail -n +2)
-subnet_id=
+if [ "$(hiera neutron_api_enabled)" = "true" ]; then
+    PHYSICAL_NETWORK=ctlplane
+
+    ctlplane_id=$(openstack network list -f csv -c ID -c Name --quote none | tail -n +2 | grep ctlplane | cut -d, -f1)
+    subnet_ids=$(openstack subnet list -f csv -c ID --quote none | tail -n +2)
+    subnet_id=
+
+    for subnet_id in $subnet_ids; do
+        network_id=$(openstack subnet show -f value -c network_id $subnet_id)
+        if [ "$network_id" = "$ctlplane_id" ]; then
+            break
+        fi
+    done
 
-for subnet_id in $subnet_ids; do
-    network_id=$(openstack subnet show -f value -c network_id $subnet_id)
-    if [ "$network_id" = "$ctlplane_id" ]; then
-        break
-    fi
-done
-
-net_create=1
-if [ -n "$subnet_id" ]; then
-    cidr=$(openstack subnet show $subnet_id -f value -c cidr)
-    if [ "$cidr" = "$undercloud_network_cidr" ]; then
-        net_create=0
-    else
-        echo "New cidr $undercloud_network_cidr does not equal old cidr $cidr"
-        echo "Will attempt to delete and recreate subnet $subnet_id"
+    net_create=1
+    if [ -n "$subnet_id" ]; then
+        cidr=$(openstack subnet show $subnet_id -f value -c cidr)
+        if [ "$cidr" = "$undercloud_network_cidr" ]; then
+            net_create=0
+        else
+            echo "New cidr $undercloud_network_cidr does not equal old cidr $cidr"
+            echo "Will attempt to delete and recreate subnet $subnet_id"
+        fi
     fi
-fi
 
-if [ "$net_create" -eq "1" ]; then
-    # Delete the subnet and network to make sure it doesn't already exist
-    if openstack subnet list | grep start; then
-        openstack subnet delete $(openstack subnet list | grep start | awk '{print $4}')
-    fi
-    if openstack network show ctlplane; then
-        openstack network delete ctlplane
+    if [ "$net_create" -eq "1" ]; then
+        # Delete the subnet and network to make sure it doesn't already exist
+        if openstack subnet list | grep start; then
+            openstack subnet delete $(openstack subnet list | grep start | awk '{print $4}')
+        fi
+        if openstack network show ctlplane; then
+            openstack network delete ctlplane
+        fi
+
+
+        NETWORK_ID=$(openstack network create --provider-network-type=flat --provider-physical-network=ctlplane ctlplane | grep " id " | awk '{print $4}')
+
+        NAMESERVER_ARG=""
+        if [ -n "${undercloud_nameserver:-}" ]; then
+            NAMESERVER_ARG="--dns-nameserver $undercloud_nameserver"
+        fi
+
+        openstack subnet create --network=$NETWORK_ID \
+            --gateway=$undercloud_network_gateway \
+            --subnet-range=$undercloud_network_cidr \
+            --allocation-pool start=$undercloud_dhcp_start,end=$undercloud_dhcp_end \
+            --host-route destination=169.254.169.254/32,gateway=$local_ip \
+            $NAMESERVER_ARG ctlplane
     fi
-
-
-    NETWORK_ID=$(openstack network create --provider-network-type=flat --provider-physical-network=ctlplane ctlplane | grep " id " | awk '{print $4}')
-
-    NAMESERVER_ARG=""
-    if [ -n "${undercloud_nameserver:-}" ]; then
-        NAMESERVER_ARG="--dns-nameserver $undercloud_nameserver"
-    fi
-
-    openstack subnet create --network=$NETWORK_ID \
-        --gateway=$undercloud_network_gateway \
-        --subnet-range=$undercloud_network_cidr \
-        --allocation-pool start=$undercloud_dhcp_start,end=$undercloud_dhcp_end \
-        --host-route destination=169.254.169.254/32,gateway=$local_ip \
-        $NAMESERVER_ARG ctlplane
 fi
 
-# Disable nova quotas
-openstack quota set --cores -1 --instances -1 --ram -1 $(openstack project show admin | awk '$2=="id" {print $4}')
+if [ "$(hiera nova_api_enabled)" = "true" ]; then
+    # Disable nova quotas
+    openstack quota set --cores -1 --instances -1 --ram -1 $(openstack project show admin | awk '$2=="id" {print $4}')
+fi
 
 # MISTRAL WORKFLOW CONFIGURATION
 if [ "$(hiera mistral_api_enabled)" = "true" ]; then
           lineinfile:
             dest: /etc/tuned/cpu-partitioning-variables.conf
             regexp: '^isolated_cores=.*'
-            line: 'isolated_cores={{ _HOST_CPUS_LIST_ }}'
-          when: _HOST_CPUS_LIST_|default("") != ""
+            line: 'isolated_cores={{ _TUNED_CORES_ }}'
+          when: _TUNED_CORES_|default("") != ""
 
-        - name: Tune-d provile activation
+        - name: Tune-d profile activation
           shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
       become: true
       when: _TUNED_PROFILE_NAME_|default("") != ""
@@ -52,7 +52,7 @@
         when:
           - item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') != "lo"
           # This condition will list all the interfaces except the one with valid IP (which is Provisioning network at this stage)
-          # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4']['address'] is undefined
-          - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4']['address'] is undefined
+          # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4'] is undefined
+          - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4'] is undefined
         with_items:
           - "{{ ifcfg_files.files }}"
index 658fea7..41d8f4f 100644 (file)
@@ -7,6 +7,9 @@ description: >
 parameters:
   server:
     type: string
+  # Deprecated Parameters, these configuration are deprecated in favor or role-specific parameters.
+  # Use: extraconfig/pre_network/host_config_and_reboot.yaml.
+  # Deprecated in Pike and will be removed in Queens.
   {{role}}KernelArgs:
     type: string
     default: ""
@@ -17,6 +20,13 @@ parameters:
     type: string
     default: ""
 
+parameter_group:
+  - label: deprecated
+    parameters:
+      - {{role}}KernelArgs
+      - {{role}}TunedProfileName
+      - {{role}}HostCpusList
+
 conditions:
   param_exists:
     or:
diff --git a/extraconfig/pre_network/host_config_and_reboot.yaml b/extraconfig/pre_network/host_config_and_reboot.yaml
new file mode 100644 (file)
index 0000000..009a087
--- /dev/null
@@ -0,0 +1,246 @@
+heat_template_version: pike
+
+description: >
+  All configurations which require reboot should be initiated via PreNetworkConfig. After
+  this configuration is completed, the corresponding node will be rebooted.
+
+parameters:
+  server:
+    type: string
+  RoleParameters:
+    type: json
+    description: Role Specific parameters
+    default: {}
+  ServiceNames:
+    type: comma_delimited_list
+    default: []
+  IsolCpusList:
+    default: "0"
+    description: List of cores to be isolated by tuned
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]+"
+  OvsEnableDpdk:
+    default: false
+    description: Whether or not to configure enable DPDK in OVS
+    type: boolean
+  OvsDpdkCoreList:
+    description: >
+      List of cores to be used for DPDK lcore threads.  Note, these threads
+      are used by the OVS control path for validator and handling functions.
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ""
+  OvsDpdkMemoryChannels:
+    description: Number of memory channels per socket to be used for DPDK
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9]*"
+    default: ""
+  OvsDpdkSocketMemory:
+    default: ""
+    description: >
+      Sets the amount of hugepage memory to assign per NUMA node. It is
+      recommended to use the socket closest to the PCIe slot used for the
+      desired DPDK NIC.  The format should be in "<socket 0 mem>, <socket 1
+      mem>, <socket n mem>", where the value is specified in MB.  For example:
+      "1024,0".
+    type: string
+  OvsDpdkDriverType:
+    default: "vfio-pci"
+    description: >
+      DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+      this UIO/PMD driver.
+    type: string
+  OvsPmdCoreList:
+    description: >
+      A list or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+      location to cores on socket, number of hyper-threaded logical cores, and
+      desired number of PMD threads can all play a role in configuring this
+      setting.  These cores should be on the same socket where
+      OvsDpdkSocketMemory is assigned.  If using hyperthreading then specify
+      both logical cores that would equal the physical core. Also, specifying
+      more than one core will trigger multiple PMD threads to be spawned which
+      may improve dataplane performance.
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    type: string
+    default: ""
+  # DEPRECATED: the following options are deprecated and are currently maintained
+  # for backwards compatibility. They will be removed in the Queens cycle.
+  HostCpusList:
+    description: List of cores to be used for host process
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]+"
+    default: '0'
+  NeutronDpdkCoreList:
+    description: List of cores to be used for DPDK Poll Mode Driver
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ''
+  NeutronDpdkMemoryChannels:
+    description: Number of memory channels to be used for DPDK
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9]*"
+    default: ''
+  NeutronDpdkSocketMemory:
+    default: ''
+    description: Memory allocated for each socket
+    type: string
+  NeutronDpdkDriverType:
+    default: "vfio-pci"
+    description: DPDK Driver type
+    type: string
+
+conditions:
+  is_host_config_required: {not: {equals: [{get_param: [RoleParameters, KernelArgs]}, ""]}}
+  # YAQL is enabled in conditions with https://review.openstack.org/#/c/467506/
+  is_dpdk_config_required:
+    or:
+      - yaql:
+        expression: $.data.service_names.contains('neutron_ovs_dpdk_agent')
+        data:
+          service_names: {get_param: ServiceNames}
+      - {get_param: OvsEnableDpdk}
+      - {get_param: [RoleParameters, OvsEnableDpdk]}
+  is_reboot_config_required:
+    or:
+      - is_host_config_required
+      - is_dpdk_config_required
+  l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+  pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+  mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+  socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+  driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+  isol_cpus_empty: {equals: [{get_param: IsolCpusList}, '0']}
+
+resources:
+  RoleParametersValue:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+        map_replace:
+          - map_replace:
+            - IsolCpusList: IsolCpusList
+              OvsDpdkCoreList: OvsDpdkCoreList
+              OvsDpdkMemoryChannels: OvsDpdkMemoryChannels
+              OvsDpdkSocketMemory: OvsDpdkSocketMemory
+              OvsDpdkDriverType: OvsDpdkDriverType
+              OvsPmdCoreList: OvsDpdkCoreList
+            - values: {get_param: [RoleParameters]}
+          - values:
+              IsolCpusList: {if: [isol_cpus_empty, {get_param: HostCpusList}, {get_param: IsolCpusList}]}
+              OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+              OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+              OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+              OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+              OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+  HostParametersConfig:
+    type: OS::Heat::SoftwareConfig
+    condition: is_host_config_required
+    properties:
+      group: ansible
+      inputs:
+        - name: _KERNEL_ARGS_
+        - name: _TUNED_PROFILE_NAME_
+        - name: _TUNED_CORES_
+      outputs:
+        - name: result
+      config:
+        get_file: ansible_host_config.yaml
+
+  HostParametersDeployment:
+    type: OS::Heat::SoftwareDeployment
+    condition: is_host_config_required
+    properties:
+      name: HostParametersDeployment
+      server:  {get_param: server}
+      config: {get_resource: HostParametersConfig}
+      actions: ['CREATE'] # Only do this on CREATE
+      input_values:
+        _KERNEL_ARGS_: {get_param: [RoleParameters, KernelArgs]}
+        _TUNED_PROFILE_NAME_: {get_param: [RoleParameters, TunedProfileName]}
+        _TUNED_CORES_: {get_param: [RoleParameters, IsolCpusList]}
+
+  EnableDpdkConfig:
+    type: OS::Heat::SoftwareConfig
+    condition: is_dpdk_config_required
+    properties:
+      group: script
+      config:
+        str_replace:
+          template: |
+            #!/bin/bash
+            set -x
+            # DO NOT use --detailed-exitcodes
+            puppet apply --logdest console \
+              --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+              -e '
+                class {"vswitch::dpdk":
+                  host_core_list  => "$HOST_CORES",
+                  pmd_core_list   => "$PMD_CORES",
+                  memory_channels => "$MEMORY_CHANNELS",
+                  socket_mem      => "$SOCKET_MEMORY",
+                }
+              '
+          params:
+            $HOST_CORES: {get_attr: [RoleParametersValue, value, OvsDpdkCoreList]}
+            $PMD_CORES: {get_attr: [RoleParametersValue, value, OvsPmdCoreList]}
+            $MEMORY_CHANNELS: {get_attr: [RoleParametersValue, value, OvsDpdkMemoryChannels]}
+            $SOCKET_MEMORY: {get_attr: [RoleParametersValue, value, OvsDpdkSocketMemory]}
+
+  EnableDpdkDeployment:
+    type: OS::Heat::SoftwareDeployment
+    condition: is_dpdk_config_required
+    properties:
+      name: EnableDpdkDeployment
+      server:  {get_param: server}
+      config: {get_resource: EnableDpdkConfig}
+      actions: ['CREATE'] # Only do this on CREATE
+
+  RebootConfig:
+    type: OS::Heat::SoftwareConfig
+    condition: is_reboot_config_required
+    properties:
+      group: script
+      config: |
+        #!/bin/bash
+        # Stop os-collect-config to avoid any race collecting another
+        # deployment before reboot happens
+        systemctl stop os-collect-config.service
+        /sbin/reboot
+
+  RebootDeployment:
+    type: OS::Heat::SoftwareDeployment
+    depends_on: HostParametersDeployment
+    condition: is_reboot_config_required
+    properties:
+      name: RebootDeployment
+      server:  {get_param: server}
+      config: {get_resource: RebootConfig}
+      actions: ['CREATE'] # Only do this on CREATE
+      signal_transport: NO_SIGNAL
+
+outputs:
+  result:
+    condition: is_host_config_required
+    value:
+      get_attr: [HostParametersDeployment, result]
+  stdout:
+    condition: is_host_config_required
+    value:
+      get_attr: [HostParametersDeployment, deploy_stdout]
+  stderr:
+    condition: is_host_config_required
+    value:
+      get_attr: [HostParametersDeployment, deploy_stderr]
+  status_code:
+    condition: is_host_config_required
+    value:
+      get_attr: [HostParametersDeployment, deploy_status_code]
index f17a073..d1dd5d1 100755 (executable)
@@ -11,7 +11,7 @@ function log_debug {
 }
 
 function is_bootstrap_node {
-  if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+  if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid | tr '[:upper:]' '[:lower:]')" = "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]; then
     log_debug "Node is bootstrap"
     echo "true"
   fi
index 7fc258d..6bf5afb 100644 (file)
@@ -10,8 +10,8 @@ parameters:
 
 resources:
 
-{%- for role in roles -%}
-{% if "controller" in role.tags %}
+{%- for role in roles %}
+  {%- if 'controller' in role.tags %}
   {{role.name}}PostPuppetMaintenanceModeConfig:
     type: OS::Heat::SoftwareConfig
     properties:
@@ -37,6 +37,6 @@ resources:
     properties:
       servers: {get_param: [servers, {{role.name}}]}
       input_values: {get_param: input_values}
-{%- endif -%}
-{% endfor %}
+  {%- endif %}
+{%- endfor %}
 
index cb9cc5b..0c4a792 100755 (executable)
@@ -49,7 +49,7 @@ fi
 # of packages to update (the check for -z "$update_identifier" guarantees that this
 # is run only on overcloud stack update -i)
 if [[ "$pacemaker_status" == "active" && \
-        "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \
+        "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name | tr '[:upper:]' '[:lower:]')" == "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]] ; then \
     # OCF scripts don't cope with -eu
     echo "Verifying if we need to fix up any IPv6 VIPs"
     set +eu
index f92f9a1..95b4745 100644 (file)
@@ -4,19 +4,14 @@ description: >
 parameters:
   BondInterfaceOvsOptions:
     default: ''
-    description: 'The ovs_options string for the bond interface. Set things like
-
-      lacp=active and/or bond_mode=balance-slb using this option.
-
-      '
+    description: The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
-      description: 'The balance-tcp bond mode is known to cause packet loss and
-
+      description: The balance-tcp bond mode is known to cause packet loss and
         should not be used in BondInterfaceOvsOptions.
-
-        '
   ControlPlaneIp:
     default: ''
     description: IP address/subnet on the ctlplane network
index 97177c4..9683456 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 5456c2c..3ad6d65 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 607d346..095c497 100644 (file)
@@ -32,8 +32,9 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
   ExternalNetworkVlanID:
     default: 10
index 448d4e2..8fff137 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 8ac5cda..4901f94 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 2579648..33c6fa6 100644 (file)
@@ -34,16 +34,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: bond_mode=active-backup
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index e4b3012..100821b 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: bond_mode=active-backup
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: 'The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.'
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
       description: 'The balance-tcp bond mode is known to cause packet loss and
-
-        should not be used in BondInterfaceOvsOptions.
-
-        '
+        should not be used in BondInterfaceOvsOptions.'
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index 6371ceb..0ede081 100644 (file)
@@ -32,16 +32,14 @@ parameters:
     type: string
   BondInterfaceOvsOptions:
     default: ''
-    description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
-      this option.
+    description: The ovs_options or bonding_options string for the bond
+      interface. Set things like lacp=active and/or bond_mode=balance-slb
+      for OVS bonds or like mode=4 for Linux bonds using this option.
     type: string
     constraints:
     - allowed_pattern: ^((?!balance.tcp).)*$
-      description: 'The balance-tcp bond mode is known to cause packet loss and
-
+      description: The balance-tcp bond mode is known to cause packet loss and
         should not be used in BondInterfaceOvsOptions.
-
-        '
   ExternalNetworkVlanID:
     default: 10
     description: Vlan ID for the external network traffic.
index f5f2b97..ece4008 100644 (file)
@@ -134,7 +134,7 @@ Panko:
         net_param: Public
     Admin:
         net_param: PankoApi
-    port: 8779
+    port: 8977
 
 Cinder:
     Internal:
index 4509bca..42d1fbd 100644 (file)
@@ -117,9 +117,9 @@ parameters:
       OctaviaAdmin: {protocol: http, port: '9876', host: IP_ADDRESS}
       OctaviaInternal: {protocol: http, port: '9876', host: IP_ADDRESS}
       OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
-      PankoAdmin: {protocol: http, port: '8779', host: IP_ADDRESS}
-      PankoInternal: {protocol: http, port: '8779', host: IP_ADDRESS}
-      PankoPublic: {protocol: http, port: '8779', host: IP_ADDRESS}
+      PankoAdmin: {protocol: http, port: '8977', host: IP_ADDRESS}
+      PankoInternal: {protocol: http, port: '8977', host: IP_ADDRESS}
+      PankoPublic: {protocol: http, port: '8977', host: IP_ADDRESS}
       SaharaAdmin: {protocol: http, port: '8386', host: IP_ADDRESS}
       SaharaInternal: {protocol: http, port: '8386', host: IP_ADDRESS}
       SaharaPublic: {protocol: http, port: '8386', host: IP_ADDRESS}
index 386520c..bb54ca6 100644 (file)
@@ -34,7 +34,7 @@ parameters:
 
 resources:
   VipPort:
-    type: OS::Neutron::Port
+    type: OS::TripleO::Network::Ports::ControlPlaneVipPort
     properties:
       network: {get_param: ControlPlaneNetwork}
       name: {get_param: PortName}
index c3734af..a9111ed 100644 (file)
@@ -133,6 +133,20 @@ outputs:
                           SERVICE: {get_attr: [EnabledServicesValue, value]}
                   - values: {get_param: ServiceNetMap}
               - values: {get_attr: [NetIpMapValue, value]}
+  ctlplane_service_ips:
+    description: >
+      Map of enabled services to a list of their ctlplane IP addresses
+    value:
+      yaql:
+        expression: dict($.data.map.items().where(len($[1]) > 0))
+        data:
+          map:
+            map_merge:
+              repeat:
+                template:
+                  SERVICE_ctlplane_node_ips: {get_param: ControlPlaneIpList}
+                for_each:
+                  SERVICE: {get_attr: [EnabledServicesValue, value]}
   service_hostnames:
     description: >
       Map of enabled services to a list of hostnames where they're running
index 8fe2d27..864da24 100755 (executable)
@@ -110,7 +110,7 @@ EOF_CAT
 }
 
 if [ -n '$network_config' ]; then
-    if [ -z "${disable_configure_safe_defaults:-''}" ]; then
+    if [ -z "${disable_configure_safe_defaults:-}" ]; then
         trap configure_safe_defaults EXIT
     fi
 
index d3d8cbd..ba8e556 100644 (file)
@@ -42,7 +42,7 @@ parameters:
       CinderApiNetwork: internal_api
       CinderIscsiNetwork: storage
       CongressApiNetwork: internal_api
-      GlanceApiNetwork: storage
+      GlanceApiNetwork: internal_api
       IronicApiNetwork: ctlplane
       IronicNetwork: ctlplane
       IronicInspectorNetwork: ctlplane
index c99fa3f..b1a3529 100644 (file)
@@ -4,10 +4,12 @@ resource_registry:
   OS::TripleO::PostDeploySteps: puppet/post.yaml
   OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
   OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
+  OS::TripleO::AllNodesDeployment: OS::Heat::StructuredDeployments
   OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
   OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
   OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
   OS::TripleO::DefaultPasswords: default_passwords.yaml
+  OS::TripleO::RandomString: OS::Heat::RandomString
 
   # Tasks (for internal TripleO usage)
   OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
@@ -106,7 +108,7 @@ resource_registry:
   OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml
 
   # services
-  OS::TripleO::Services: puppet/services/services.yaml
+  OS::TripleO::Services: services.yaml
   OS::TripleO::Services::Apache: puppet/services/apache.yaml
   OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
   OS::TripleO::Services::CephMds: OS::Heat::None
@@ -154,6 +156,7 @@ resource_registry:
 
   OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
   OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
+  OS::TripleO::Services::NeutronLinuxbridgeAgent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
   OS::TripleO::Services::Pacemaker: OS::Heat::None
   OS::TripleO::Services::PacemakerRemote: OS::Heat::None
@@ -199,6 +202,7 @@ resource_registry:
   # Undercloud Telemetry services
   OS::TripleO::Services::UndercloudCeilometerAgentCentral: OS::Heat::None
   OS::TripleO::Services::UndercloudCeilometerAgentNotification: OS::Heat::None
+  OS::TripleO::Services::UndercloudCeilometerAgentIpmi: OS::Heat::None
 
   #Gnocchi services
   OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
@@ -235,6 +239,7 @@ resource_registry:
   OS::TripleO::Services::MistralExecutor: OS::Heat::None
   OS::TripleO::Services::IronicApi: OS::Heat::None
   OS::TripleO::Services::IronicConductor: OS::Heat::None
+  OS::TripleO::Services::IronicInspector: OS::Heat::None
   OS::TripleO::Services::NovaIronic: OS::Heat::None
   OS::TripleO::Services::TripleoPackages: puppet/services/tripleo-packages.yaml
   OS::TripleO::Services::TripleoFirewall: puppet/services/tripleo-firewall.yaml
@@ -262,6 +267,7 @@ resource_registry:
   OS::TripleO::Services::NeutronVppAgent: OS::Heat::None
   OS::TripleO::Services::Docker: OS::Heat::None
   OS::TripleO::Services::CertmongerUser: OS::Heat::None
+  OS::TripleO::Services::Iscsid: OS::Heat::None
 
 parameter_defaults:
   EnablePackageInstall: false
index cd9369f..1848e09 100644 (file)
@@ -101,8 +101,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   ServerMetadata:
     default: {}
     description: >
@@ -197,6 +197,12 @@ parameters:
     description: >
       Set to true to append per network Vips to /etc/hosts on each node.
 
+  DeploymentServerBlacklist:
+    default: []
+    type: comma_delimited_list
+    description: >
+      List of server hostnames to blacklist from any triggered deployments.
+
 conditions:
   add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]}
 
@@ -236,15 +242,15 @@ resources:
                 HOST: {get_param: CloudNameStorageManagement}
 
   HeatAuthEncryptionKey:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
 
   PcsdPassword:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
     properties:
       length: 16
 
   HorizonSecret:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
     properties:
       length: 10
 
@@ -318,17 +324,17 @@ resources:
     properties:
       name: {{role.name}}HostsDeployment
       config: {get_attr: [hostsConfig, config_id]}
-      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+      servers: {get_attr: [{{role.name}}Servers, value]}
 
   {{role.name}}SshKnownHostsDeployment:
     type: OS::Heat::StructuredDeployments
     properties:
       name: {{role.name}}SshKnownHostsDeployment
       config: {get_resource: SshKnownHostsConfig}
-      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+      servers: {get_attr: [{{role.name}}Servers, value]}
 
   {{role.name}}AllNodesDeployment:
-    type: OS::Heat::StructuredDeployments
+    type: OS::TripleO::AllNodesDeployment
     depends_on:
 {% for role_inner in roles %}
       - {{role_inner.name}}HostsDeployment
@@ -336,7 +342,7 @@ resources:
     properties:
       name: {{role.name}}AllNodesDeployment
       config: {get_attr: [allNodesConfig, config_id]}
-      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+      servers: {get_attr: [{{role.name}}Servers, value]}
       input_values:
         # Note we have to use yaql to look up the first hostname/ip in the
         # list because heat path based attributes operate on the attribute
@@ -358,7 +364,7 @@ resources:
     properties:
       name: {{role.name}}AllNodesValidationDeployment
       config: {get_resource: AllNodesValidationConfig}
-      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+      servers: {get_attr: [{{role.name}}Servers, value]}
 
   {{role.name}}IpListMap:
     type: OS::TripleO::Network::Ports::NetIpListMap
@@ -439,8 +445,40 @@ resources:
           ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
           MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
           ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
+          DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
+          RoleParameters: {get_param: {{role.name}}Parameters}
 {% endfor %}
 
+{% for role in roles %}
+  {{role.name}}Servers:
+    type: OS::Heat::Value
+    depends_on: {{role.name}}
+    properties:
+      type: json
+      value:
+        yaql:
+          expression: let(servers=>switch(isDict($.data.servers) => $.data.servers, true => {})) -> $servers.deleteAll($servers.keys().where($servers[$] = null))
+          data:
+            servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
+
+  # This resource just creates a dict out of the DeploymentServerBlacklist,
+  # which is a list. The dict is used in the role templates to set a condition
+  # on whether to create the deployment resources. We can't use the list
+  # directly because there is no way to ask Heat if a list contains a specific
+  # value.
+  DeploymentServerBlacklistDict:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+        map_merge:
+          repeat:
+            template:
+              hostname: 1
+            for_each:
+              hostname: {get_param: DeploymentServerBlacklist}
+
   hostsConfig:
     type: OS::TripleO::Hosts::SoftwareConfig
     properties:
@@ -537,12 +575,12 @@ resources:
       UpdateIdentifier: {get_param: UpdateIdentifier}
 
   MysqlRootPassword:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
     properties:
       length: 10
 
   RabbitCookie:
-    type: OS::Heat::RandomString
+    type: OS::TripleO::RandomString
     properties:
       length: 20
       salt: {get_param: RabbitCookieSalt}
@@ -663,7 +701,7 @@ resources:
     properties:
       servers:
 {% for role in roles %}
-        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+        {{role.name}}: {get_attr: [{{role.name}}Servers, value]}
 {% endfor %}
       input_values:
         deploy_identifier: {get_param: DeployIdentifier}
@@ -681,7 +719,7 @@ resources:
     properties:
       servers:
 {% for role in roles %}
-        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+        {{role.name}}: {get_attr: [{{role.name}}Servers, value]}
 {% endfor %}
 
   # Post deployment steps for all roles
@@ -695,14 +733,36 @@ resources:
     properties:
       servers:
 {% for role in roles %}
-        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+        {{role.name}}: {get_attr: [{{role.name}}Servers, value]}
 {% endfor %}
+      stack_name: {get_param: 'OS::stack_name'}
       EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+      ctlplane_service_ips:
+        # Note (shardy) this somewhat complex yaql may be replaced
+        # with a map_deep_merge function in ocata.  It merges the
+        # list of maps, but appends to colliding lists when a service
+        # is deployed on more than one role
+        yaql:
+          expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+          data:
+            l:
+{% for role in roles %}
+              - {get_attr: [{{role.name}}IpListMap, ctlplane_service_ips]}
+{% endfor %}
       role_data:
 {% for role in roles %}
         {{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
 {% endfor %}
 
+  ServerOsCollectConfigData:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+{% for role in roles %}
+        {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+
 outputs:
   ManagedEndpoints:
     description: Asserts that the keystone endpoints have been provisioned.
@@ -753,3 +813,15 @@ outputs:
 {% for role in roles %}
       {{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
 {% endfor %}
+  ServerOsCollectConfigData:
+    description: The os-collect-config configuration associated with each server resource
+    value:
+{% for role in roles %}
+      {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+  VipMap:
+    description: Mapping of each network to VIP addresses. Also includes the Redis VIP.
+    value:
+      map_merge:
+        - {get_attr: [VipMap, net_ip_map]}
+        - redis: {get_attr: [RedisVirtualIP, ip_address]}
diff --git a/plan-samples/README.rst b/plan-samples/README.rst
new file mode 100644 (file)
index 0000000..44b9d0c
--- /dev/null
@@ -0,0 +1,22 @@
+=================================
+Samples for plan-environment.yaml
+=================================
+
+The ``plan-environment.yaml`` file provides the details of the plan to be
+deployed by TripleO. Along with the details of the heat environments and
+parameters, it is also possible to provide workflow specific parameters to the
+TripleO mistral workflows. A new section ``workflow_parameters`` has been
+added to provide workflow specific parameters. This provides a clear
+separation of heat environment parameters and the workflow only parameters.
+These customized plan environment files can be provided as with ``-p`` option
+to the ``openstack overcloud deploy`` and ``openstack overcloud plan create``
+commands. The sample format to provide the workflow specific parameters::
+
+  workflow_parameters:
+    tripleo.derive_params.v1.derive_parameters:
+      # DPDK Parameters
+      number_of_pmd_cpu_threads_per_numa_node: 2
+
+
+All the parameters specified under the workflow name will be passed as
+``user_input`` to the workflow, while invoking from the tripleoclient.
\ No newline at end of file
diff --git a/plan-samples/plan-environment-derived-params.yaml b/plan-samples/plan-environment-derived-params.yaml
new file mode 100644 (file)
index 0000000..964e57d
--- /dev/null
@@ -0,0 +1,35 @@
+version: 1.0
+
+name: overcloud
+description: >
+  Default Deployment plan
+template: overcloud.yaml
+environments:
+  - path: overcloud-resource-registry-puppet.yaml
+workflow_parameters:
+  tripleo.derive_params.v1.derive_parameters:
+    ######### DPDK Parameters #########
+    # Specifices the minimum number of CPU threads to be allocated for DPDK
+    # PMD threads. The actual allocation will be based on network config, if
+    # the a DPDK port is associated with a numa node, then this configuration
+    # will be used, else 0.
+    number_of_pmd_cpu_threads_per_numa_node: 4
+    # Amount of memory to be configured as huge pages in percentage. Ouf the
+    # total available memory (excluding the NovaReservedHostMemory), the
+    # specified percentage of the remaining is configured as huge pages.
+    huge_page_allocation_percentage: 90
+    ######### HCI Parameters #########
+    hci_profile: default
+    hci_profile_config:
+      default:
+        average_guest_memory_size_in_mb: 2048
+        average_guest_cpu_utilization_percentage: 50
+      many_small_vms:
+        average_guest_memory_size_in_mb: 1024
+        average_guest_cpu_utilization_percentage: 20
+      few_large_vms:
+        average_guest_memory_size_in_mb: 4096
+        average_guest_cpu_utilization_percentage: 80
+      nfv_default:
+        average_guest_memory_size_in_mb: 8192
+        average_guest_cpu_utilization_percentage: 90
index baafe03..b128445 100644 (file)
@@ -12,10 +12,8 @@ parameters:
     type: string
   cloud_name_ctlplane:
     type: string
-  # FIXME(shardy) this can be comma_delimited_list when
-  # https://bugs.launchpad.net/heat/+bug/1617019 is fixed
   enabled_services:
-    type: string
+    type: comma_delimited_list
   controller_ips:
     type: comma_delimited_list
   logging_groups:
@@ -118,7 +116,10 @@ resources:
            map_merge:
               - tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
               - tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
-              - enabled_services: {get_param: enabled_services}
+              - enabled_services:
+                  yaql:
+                    expression: $.data.distinct()
+                    data: {get_param: enabled_services}
               # This writes out a mapping of service_name_enabled: 'true'
               # For any services not enabled, hiera foo_enabled will
               # return nil, as it's undefined
@@ -129,8 +130,7 @@ resources:
                       # https://bugs.launchpad.net/heat/+bug/1617203
                       SERVICE_enabled: 'true'
                     for_each:
-                      SERVICE:
-                        str_split: [',', {get_param: enabled_services}]
+                      SERVICE: {get_param: enabled_services}
               # Dynamically generate per-service network data
               # This works as follows (outer->inner functions)
               # yaql - filters services where no mapping exists in ServiceNetMap
@@ -150,8 +150,7 @@ resources:
                               template:
                                 SERVICE_network: SERVICE_network
                               for_each:
-                                SERVICE:
-                                  str_split: [',', {get_param: enabled_services}]
+                                SERVICE: {get_param: enabled_services}
                         - values: {get_param: ServiceNetMap}
               # Keystone doesn't provide separate entries for the public
               # and admin endpoints, so we need to add them here manually
@@ -203,8 +202,7 @@ resources:
                                   template:
                                     SERVICE_vip: SERVICE_network
                                   for_each:
-                                    SERVICE:
-                                      str_split: [',', {get_param: enabled_services}]
+                                    SERVICE: {get_param: enabled_services}
                             - values: {get_param: ServiceNetMap}
                         - values: {get_param: NetVipMap}
               - keystone_admin_api_vip:
index 3fc663f..612a4a0 100644 (file)
@@ -69,8 +69,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   BlockStorageServerMetadata:
     default: {}
     description: >
@@ -132,6 +132,48 @@ parameters:
       major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
       environment files.
     default: ''
+  DeploymentServerBlacklistDict:
+    default: {}
+    type: json
+    description: >
+      Map of server hostnames to blacklist from any triggered
+      deployments. If the value is 1, the server will be blacklisted. This
+      parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
+
+conditions:
+  server_not_blacklisted:
+    not:
+      equals:
+        - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+        - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
   BlockStorage:
@@ -160,6 +202,12 @@ resources:
           - {get_param: BlockStorageServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: BlockStorageSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -358,6 +406,8 @@ resources:
     type: OS::TripleO::BlockStorage::PreNetworkConfig
     properties:
       server: {get_resource: BlockStorage}
+      RoleParameters: {get_param: RoleParameters}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -366,7 +416,11 @@ resources:
       name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: BlockStorage}
-      actions: {get_param: NetworkDeploymentActions}
+      actions:
+        if:
+          - server_not_blacklisted
+          - {get_param: NetworkDeploymentActions}
+          - []
 
   BlockStorageUpgradeInitConfig:
     type: OS::Heat::SoftwareConfig
@@ -389,6 +443,11 @@ resources:
       name: BlockStorageUpgradeInitDeployment
       server: {get_resource: BlockStorage}
       config: {get_resource: BlockStorageUpgradeInitConfig}
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   BlockStorageDeployment:
     type: OS::Heat::StructuredDeployment
@@ -399,6 +458,11 @@ resources:
       config: {get_resource: BlockStorageConfig}
       input_values:
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   # Map heat metadata into hiera datafiles
   BlockStorageConfig:
@@ -409,6 +473,7 @@ resources:
         hierarchy:
           - '"%{::uuid}"'
           - heat_config_%{::deploy_config_name}
+          - config_step
           - volume_extraconfig
           - extraconfig
           - service_names
@@ -437,6 +502,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
@@ -466,6 +532,11 @@ resources:
       input_values:
         update_identifier:
           get_param: UpdateIdentifier
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
@@ -555,6 +626,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
     description: Heat resource handle for the block storage server
     value:
       {get_resource: BlockStorage}
+    condition: server_not_blacklisted
   external_ip_address:
     description: IP address of the server in the external network
     value: {get_attr: [ExternalPort, ip_address]}
@@ -573,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   management_ip_address:
     description: IP address of the server in the management network
     value: {get_attr: [ManagementPort, ip_address]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [BlockStorage, os_collect_config]}
index 295e64f..e7afcb4 100644 (file)
@@ -75,8 +75,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   CephStorageServerMetadata:
     default: {}
     description: >
@@ -138,6 +138,48 @@ parameters:
       major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
       environment files.
     default: ''
+  DeploymentServerBlacklistDict:
+    default: {}
+    type: json
+    description: >
+      Map of server hostnames to blacklist from any triggered
+      deployments. If the value is 1, the server will be blacklisted. This
+      parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
+
+conditions:
+  server_not_blacklisted:
+    not:
+      equals:
+        - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+        - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
   CephStorage:
@@ -166,6 +208,12 @@ resources:
           - {get_param: CephStorageServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: CephStorageSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -364,6 +412,8 @@ resources:
     type: OS::TripleO::CephStorage::PreNetworkConfig
     properties:
       server: {get_resource: CephStorage}
+      RoleParameters: {get_param: RoleParameters}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -372,7 +422,11 @@ resources:
       name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: CephStorage}
-      actions: {get_param: NetworkDeploymentActions}
+      actions:
+        if:
+          - server_not_blacklisted
+          - {get_param: NetworkDeploymentActions}
+          - []
 
   CephStorageUpgradeInitConfig:
     type: OS::Heat::SoftwareConfig
@@ -395,6 +449,11 @@ resources:
       name: CephStorageUpgradeInitDeployment
       server: {get_resource: CephStorage}
       config: {get_resource: CephStorageUpgradeInitConfig}
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   CephStorageDeployment:
     type: OS::Heat::StructuredDeployment
@@ -405,6 +464,11 @@ resources:
       server: {get_resource: CephStorage}
       input_values:
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   CephStorageConfig:
     type: OS::Heat::StructuredConfig
@@ -414,6 +478,7 @@ resources:
         hierarchy:
           - '"%{::uuid}"'
           - heat_config_%{::deploy_config_name}
+          - config_step
           - ceph_extraconfig
           - extraconfig
           - service_names
@@ -442,6 +507,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
@@ -477,6 +543,11 @@ resources:
       input_values:
         update_identifier:
           get_param: UpdateIdentifier
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
@@ -566,6 +637,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
     description: Heat resource handle for the ceph storage server
     value:
       {get_resource: CephStorage}
+    condition: server_not_blacklisted
   external_ip_address:
     description: IP address of the server in the external network
     value: {get_attr: [ExternalPort, ip_address]}
@@ -584,3 +656,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   management_ip_address:
     description: IP address of the server in the management network
     value: {get_attr: [ManagementPort, ip_address]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [CephStorage, os_collect_config]}
index 05318f3..5a662e8 100644 (file)
@@ -37,7 +37,7 @@ parameters:
     type: string
   NeutronPublicInterface:
     default: nic1
-    description: A port to add to the NeutronPhysicalBridge.
+    description: Which interface to add to the NeutronPhysicalBridge.
     type: string
   NodeIndex:
     type: number
@@ -90,8 +90,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   NovaComputeServerMetadata:
     default: {}
     description: >
@@ -150,6 +150,48 @@ parameters:
       major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
       environment files.
     default: ''
+  DeploymentServerBlacklistDict:
+    default: {}
+    type: json
+    description: >
+      Map of server hostnames to blacklist from any triggered
+      deployments. If the value is 1, the server will be blacklisted. This
+      parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
+
+conditions:
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
+  server_not_blacklisted:
+    not:
+      equals:
+        - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+        - 1
 
 resources:
 
@@ -180,6 +222,12 @@ resources:
           - {get_param: NovaComputeServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: NovaComputeSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -367,6 +415,8 @@ resources:
     type: OS::TripleO::Compute::PreNetworkConfig
     properties:
       server: {get_resource: NovaCompute}
+      RoleParameters: {get_param: RoleParameters}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkConfig:
     type: OS::TripleO::Compute::Net::SoftwareConfig
@@ -384,9 +434,13 @@ resources:
     depends_on: PreNetworkConfig
     properties:
       name: NetworkDeployment
+      actions:
+        if:
+          - server_not_blacklisted
+          - {get_param: NetworkDeploymentActions}
+          - []
       config: {get_resource: NetworkConfig}
       server: {get_resource: NovaCompute}
-      actions: {get_param: NetworkDeploymentActions}
       input_values:
         bridge_name: {get_param: NeutronPhysicalBridge}
         interface_name: {get_param: NeutronPublicInterface}
@@ -410,6 +464,11 @@ resources:
     depends_on: NetworkDeployment
     properties:
       name: NovaComputeUpgradeInitDeployment
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
       server: {get_resource: NovaCompute}
       config: {get_resource: NovaComputeUpgradeInitConfig}
 
@@ -421,6 +480,7 @@ resources:
         hierarchy:
           - '"%{::uuid}"'
           - heat_config_%{::deploy_config_name}
+          - config_step
           - compute_extraconfig
           - extraconfig
           - service_names
@@ -455,12 +515,18 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   NovaComputeDeployment:
     type: OS::TripleO::SoftwareDeployment
     depends_on: NovaComputeUpgradeInitDeployment
     properties:
       name: NovaComputeDeployment
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
       config: {get_resource: NovaComputeConfig}
       server: {get_resource: NovaCompute}
       input_values:
@@ -496,6 +562,11 @@ resources:
     depends_on: NetworkDeployment
     properties:
       name: UpdateDeployment
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
       config: {get_resource: UpdateConfig}
       server: {get_resource: NovaCompute}
       input_values:
@@ -609,4 +680,8 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   nova_server_resource:
     description: Heat resource handle for the Nova compute server
     value:
-      {get_resource: NovaCompute}
\ No newline at end of file
+      {get_resource: NovaCompute}
+    condition: server_not_blacklisted
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [NovaCompute, os_collect_config]}
index 163ba57..09e5b2b 100644 (file)
@@ -58,9 +58,13 @@ parameters:
     type: string
     constraints:
       - custom_constraint: nova.keypair
+  NeutronPhysicalBridge:
+    default: 'br-ex'
+    description: An OVS bridge to create for accessing external networks.
+    type: string
   NeutronPublicInterface:
     default: nic1
-    description: What interface to bridge onto br-ex for network nodes.
+    description: Which interface to add to the NeutronPhysicalBridge.
     type: string
   ServiceNetMap:
     default: {}
@@ -104,8 +108,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   ControllerServerMetadata:
     default: {}
     description: >
@@ -164,6 +168,35 @@ parameters:
       major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
       environment files.
     default: ''
+  DeploymentServerBlacklistDict:
+    default: {}
+    type: json
+    description: >
+      Map of server hostnames to blacklist from any triggered
+      deployments. If the value is 1, the server will be blacklisted. This
+      parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
 
 parameter_groups:
 - label: deprecated
@@ -171,6 +204,19 @@ parameter_groups:
   parameters:
   - controllerExtraConfig
 
+conditions:
+  server_not_blacklisted:
+    not:
+      equals:
+        - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+        - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
+
 resources:
 
   Controller:
@@ -199,6 +245,12 @@ resources:
           - {get_param: ControllerServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: ControllerSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -386,6 +438,8 @@ resources:
     type: OS::TripleO::Controller::PreNetworkConfig
     properties:
       server: {get_resource: Controller}
+      RoleParameters: {get_param: RoleParameters}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkConfig:
     type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -405,9 +459,13 @@ resources:
       name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: Controller}
-      actions: {get_param: NetworkDeploymentActions}
+      actions:
+        if:
+          - server_not_blacklisted
+          - {get_param: NetworkDeploymentActions}
+          - []
       input_values:
-        bridge_name: br-ex
+        bridge_name: {get_param: NeutronPhysicalBridge}
         interface_name: {get_param: NeutronPublicInterface}
 
   # Resource for site-specific injection of root certificate
@@ -444,6 +502,11 @@ resources:
     depends_on: NetworkDeployment
     properties:
       name: ControllerUpgradeInitDeployment
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
       server: {get_resource: Controller}
       config: {get_resource: ControllerUpgradeInitConfig}
 
@@ -452,6 +515,11 @@ resources:
     depends_on: ControllerUpgradeInitDeployment
     properties:
       name: ControllerDeployment
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
       config: {get_resource: ControllerConfig}
       server: {get_resource: Controller}
       input_values:
@@ -468,6 +536,7 @@ resources:
         hierarchy:
           - '"%{::uuid}"'
           - heat_config_%{::deploy_config_name}
+          - config_step
           - controller_extraconfig
           - extraconfig
           - service_configs
@@ -511,6 +580,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Hook for site-specific additional pre-deployment config, e.g extra hieradata
   ControllerExtraConfigPre:
@@ -535,6 +605,11 @@ resources:
     depends_on: NetworkDeployment
     properties:
       name: UpdateDeployment
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
       config: {get_resource: UpdateConfig}
       server: {get_resource: Controller}
       input_values:
@@ -649,9 +724,13 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
     description: Heat resource handle for the Nova compute server
     value:
       {get_resource: Controller}
+    condition: server_not_blacklisted
   tls_key_modulus_md5:
     description: MD5 checksum of the TLS Key Modulus
     value: {get_attr: [NodeTLSData, key_modulus_md5]}
   tls_cert_modulus_md5:
     description: MD5 checksum of the TLS Certificate Modulus
     value: {get_attr: [NodeTLSData, cert_modulus_md5]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [Controller, os_collect_config]}
index 4e1ad89..e4d20b4 100644 (file)
@@ -10,16 +10,20 @@ if [ -n "$artifact_urls" ]; then
   for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
     curl --globoff -o $TMP_DATA/file_data "$URL"
     if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
-      yum install -y $TMP_DATA/file_data
+      mv $TMP_DATA/file_data $TMP_DATA/file_data.rpm
+      yum install -y $TMP_DATA/file_data.rpm
+      rm $TMP_DATA/file_data.rpm
     elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
       pushd /
       tar xvzf $TMP_DATA/file_data
       popd
     else
-      echo "ERROR: Unsupported file format."
+      echo "ERROR: Unsupported file format: $URL"
       exit 1
     fi
-    rm $TMP_DATA/file_data
+    if [ -f $TMP_DATA/file_data ]; then
+      rm $TMP_DATA/file_data
+    fi
   done
 else
   echo "No artifact_urls was set. Skipping..."
index 8420f99..574c41b 100644 (file)
@@ -8,11 +8,14 @@ description: 'Upgrade steps for all roles'
 parameters:
   servers:
     type: json
-
+  stack_name:
+    type: string
+    description: Name of the topmost stack
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
-
+  ctlplane_service_ips:
+    type: json
   UpdateIdentifier:
     type: string
     description: >
@@ -44,24 +47,6 @@ resources:
         - ''
         - - "#!/bin/bash\n\n"
           - "set -eu\n\n"
-          - "if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then\n\n"
-          - "  crudini --set /etc/nova/nova.conf placement auth_type password\n\n"
-          - "  crudini --set /etc/nova/nova.conf placement username placement\n\n"
-          - "  crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n"
-          - "  crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n"
-          - "  crudini --set /etc/nova/nova.conf placement project_name service\n\n"
-          - "  crudini --set /etc/nova/nova.conf placement os_interface internal\n\n"
-          - str_replace:
-              template: |
-                crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD'
-                crudini --set /etc/nova/nova.conf placement os_region_name 'REGION_NAME'
-                crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL'
-              params:
-                SERVICE_PASSWORD: { get_param: NovaPassword }
-                REGION_NAME: { get_param: KeystoneRegion }
-                AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
-          - "  systemctl restart openstack-nova-compute\n\n"
-          - "fi\n\n"
           - str_replace:
               template: |
                 ROLE='ROLE_NAME'
@@ -224,7 +209,9 @@ resources:
 {%- endfor %}
     properties:
       servers: {get_param: servers}
+      stack_name: {get_param: stack_name}
       role_data: {get_param: role_data}
+      ctlplane_service_ips: {get_param: ctlplane_service_ips}
 
 outputs:
   # Output the config for each role, just use Step1 as the config should be
index 7ee12b1..4a1670f 100644 (file)
@@ -69,8 +69,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   SwiftStorageServerMetadata:
     default: {}
     description: >
@@ -132,6 +132,49 @@ parameters:
       major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
       environment files.
     default: ''
+  DeploymentServerBlacklistDict:
+    default: {}
+    type: json
+    description: >
+      Map of server hostnames to blacklist from any triggered
+      deployments. If the value is 1, the server will be blacklisted. This
+      parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+    default: {}
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
+
+conditions:
+  server_not_blacklisted:
+    not:
+      equals:
+        - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+        - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
 
@@ -160,6 +203,12 @@ resources:
           - {get_param: SwiftStorageServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: ObjectStorageSchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -358,6 +407,8 @@ resources:
     type: OS::TripleO::ObjectStorage::PreNetworkConfig
     properties:
       server: {get_resource: SwiftStorage}
+      RoleParameters: {get_param: RoleParameters}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -366,7 +417,12 @@ resources:
       name: NetworkDeployment
       config: {get_resource: NetworkConfig}
       server: {get_resource: SwiftStorage}
-      actions: {get_param: NetworkDeploymentActions}
+      actions:
+        if:
+          - server_not_blacklisted
+          - {get_param: NetworkDeploymentActions}
+          - []
+
 
   SwiftStorageUpgradeInitConfig:
     type: OS::Heat::SoftwareConfig
@@ -389,6 +445,11 @@ resources:
       name: SwiftStorageUpgradeInitDeployment
       server: {get_resource: SwiftStorage}
       config: {get_resource: SwiftStorageUpgradeInitConfig}
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   SwiftStorageHieraConfig:
     type: OS::Heat::StructuredConfig
@@ -398,6 +459,7 @@ resources:
         hierarchy:
           - '"%{::uuid}"'
           - heat_config_%{::deploy_config_name}
+          - config_step
           - object_extraconfig
           - extraconfig
           - service_names
@@ -426,6 +488,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   SwiftStorageHieraDeploy:
     type: OS::Heat::StructuredDeployment
@@ -436,6 +499,11 @@ resources:
       config: {get_resource: SwiftStorageHieraConfig}
       input_values:
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
@@ -464,6 +532,11 @@ resources:
       input_values:
         update_identifier:
           get_param: UpdateIdentifier
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
@@ -553,6 +626,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
     description: Heat resource handle for the swift storage server
     value:
       {get_resource: SwiftStorage}
+    condition: server_not_blacklisted
   external_ip_address:
     description: IP address of the server in the external network
     value: {get_attr: [ExternalPort, ip_address]}
@@ -571,3 +645,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   management_ip_address:
     description: IP address of the server in the management network
     value: {get_attr: [ManagementPort, ip_address]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [SwiftStorage, os_collect_config]}
index 3a15cec..67e1ecf 100644 (file)
@@ -8,7 +8,9 @@ parameters:
   servers:
     type: json
     description: Mapping of Role name e.g Controller to a list of servers
-
+  stack_name:
+    type: string
+    description: Name of the topmost stack
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
@@ -23,6 +25,7 @@ parameters:
     description: >
       Setting this to a unique value will re-run any deployment tasks which
       perform configuration on a Heat stack-update.
+  ctlplane_service_ips:
+    type: json
 
-resources:
 {% include 'puppet-steps.j2' %}
index 360c633..82c6171 100644 (file)
@@ -1,3 +1,19 @@
+{% set deploy_steps_max = 6 %}
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+  WorkflowTasks_Step{{step}}_Enabled:
+    or:
+    {% for role in roles %}
+      - not:
+          equals:
+            - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+            - ''
+      - False
+    {% endfor %}
+{% endfor %}
+
+resources:
   # Post deployment steps for all roles
   # A single config is re-applied with an incrementing step number
 {% for role in roles %}
       StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
 
   # Step through a series of configuration steps
-{% for step in range(1, 6) %}
+{% for step in range(1, deploy_steps_max) %}
   {{role.name}}Deployment_Step{{step}}:
     type: OS::Heat::StructuredDeploymentGroup
-  {% if step == 1 %}
-    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
-  {% else %}
     depends_on:
+      - WorkflowTasks_Step{{step}}_Execution
+    # TODO(gfidente): the following if/else condition
+    # replicates what is already defined for the
+    # WorkflowTasks_StepX resource and can be remove
+    # if https://bugs.launchpad.net/heat/+bug/1700569
+    # is fixed.
+    {% if step == 1 %}
+    {% for dep in roles %}
+      - {{dep.name}}PreConfig
+      - {{dep.name}}ArtifactsDeploy
+    {% endfor %}
+    {% else %}
     {% for dep in roles %}
       - {{dep.name}}Deployment_Step{{step -1}}
     {% endfor %}
-  {% endif %}
+    {% endif %}
     properties:
       name: {{role.name}}Deployment_Step{{step}}
       servers: {get_param: [servers, {{role.name}}]}
         update_identifier: {get_param: DeployIdentifier}
 {% endfor %}
 
+  # Note, this should be the last step to execute configuration changes.
+  # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+  # after all the previous deployment steps.
+  {{role.name}}ExtraConfigPost:
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step5
+  {% endfor %}
+    type: OS::TripleO::NodeExtraConfigPost
+    properties:
+        servers: {get_param: [servers, {{role.name}}]}
+
+  # The {{role.name}}PostConfig steps are in charge of
+  # quiescing all services, i.e. in the Controller case,
+  # we should run a full service reload.
   {{role.name}}PostConfig:
     type: OS::TripleO::Tasks::{{role.name}}PostConfig
     depends_on:
   {% for dep in roles %}
-      - {{dep.name}}Deployment_Step5
+      - {{dep.name}}ExtraConfigPost
   {% endfor %}
     properties:
       servers: {get_param: servers}
       input_values:
         update_identifier: {get_param: DeployIdentifier}
 
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  {{role.name}}ExtraConfigPost:
+
+{% endfor %}
+
+# BEGIN service_workflow_tasks handling
+{% for step in range(1, deploy_steps_max) %}
+  WorkflowTasks_Step{{step}}:
+    type: OS::Mistral::Workflow
+    condition: WorkflowTasks_Step{{step}}_Enabled
     depends_on:
-  {% for dep in roles %}
-      - {{dep.name}}PostConfig
-  {% endfor %}
-    type: OS::TripleO::NodeExtraConfigPost
+    {% if step == 1 %}
+    {% for dep in roles %}
+      - {{dep.name}}PreConfig
+      - {{dep.name}}ArtifactsDeploy
+    {% endfor %}
+    {% else %}
+    {% for dep in roles %}
+      - {{dep.name}}Deployment_Step{{step -1}}
+    {% endfor %}
+    {% endif %}
     properties:
-        servers: {get_param: [servers, {{role.name}}]}
+      name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+      type: direct
+      tasks:
+        yaql:
+          expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+          data:
+          {% for role in roles %}
+            - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+          {% endfor %}
 
+  WorkflowTasks_Step{{step}}_Execution:
+    type: OS::Mistral::ExternalResource
+    condition: WorkflowTasks_Step{{step}}_Enabled
+    depends_on: WorkflowTasks_Step{{step}}
+    properties:
+      actions:
+        CREATE:
+          workflow: { get_resource: WorkflowTasks_Step{{step}} }
+          params:
+            env:
+              service_ips: { get_param: ctlplane_service_ips }
+        UPDATE:
+          workflow: { get_resource: WorkflowTasks_Step{{step}} }
+          params:
+            env:
+              service_ips: { get_param: ctlplane_service_ips }
+      always_update: true
 {% endfor %}
+# END service_workflow_tasks handling
index dbb517f..b45736c 100644 (file)
@@ -28,9 +28,13 @@ parameters:
     constraints:
       - custom_constraint: nova.keypair
 {% endif %}
+  NeutronPhysicalBridge:
+    default: 'br-ex'
+    description: An OVS bridge to create for accessing tenant networks.
+    type: string
   NeutronPublicInterface:
     default: nic1
-    description: What interface to bridge onto br-ex for network nodes.
+    description: Which interface to add to the NeutronPhysicalBridge.
     type: string
   ServiceNetMap:
     default: {}
@@ -85,8 +89,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   {{role}}ServerMetadata:
     default: {}
     description: >
@@ -154,10 +158,52 @@ parameters:
       major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
       environment files.
     default: ''
+  DeploymentServerBlacklistDict:
+    default: {}
+    type: json
+    description: >
+      Map of server hostnames to blacklist from any triggered
+      deployments. If the value is 1, the server will be blacklisted. This
+      parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+  DeploymentSwiftDataMap:
+    type: json
+    description: |
+      Map of servers to Swift container and object for storing deployment data.
+      The keys are the Heat assigned hostnames, and the value is a map of the
+      container/object name in Swift. Example value:
+        overcloud-controller-0:
+          container: overcloud-controller
+          object: 0
+        overcloud-controller-1:
+          container: overcloud-controller
+          object: 1
+        overcloud-controller-2:
+          container: overcloud-controller
+          object: 2
+        overcloud-novacompute-0:
+          container: overcloud-compute
+          object: 0
+    default: {}
+
+conditions:
+  server_not_blacklisted:
+    not:
+      equals:
+        - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+        - 1
+  deployment_swift_data_map_unset:
+    equals:
+      - get_param:
+          - DeploymentSwiftDataMap
+          - {get_param: Hostname}
+      - ""
 
 resources:
   {{role}}:
-    type: OS::TripleO::{{role.name}}Server
+    type: OS::TripleO::{{role}}Server
     metadata:
       os-collect-config:
         command: {get_param: ConfigCommand}
@@ -182,6 +228,12 @@ resources:
           - {get_param: {{role}}ServerMetadata}
           - {get_param: ServiceMetadataSettings}
       scheduler_hints: {get_param: {{role}}SchedulerHints}
+      deployment_swift_data:
+        if:
+          - deployment_swift_data_map_unset
+          - {}
+          - {get_param: [DeploymentSwiftDataMap,
+                         {get_param: Hostname}]}
 
   # Combine the NodeAdminUserData and NodeUserData mime archives
   UserData:
@@ -380,6 +432,8 @@ resources:
     type: OS::TripleO::{{role}}::PreNetworkConfig
     properties:
       server: {get_resource: {{role}}}
+      RoleParameters: {get_param: RoleParameters}
+      ServiceNames: {get_param: ServiceNames}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -390,8 +444,13 @@ resources:
       server: {get_resource: {{role}}}
       actions: {get_param: NetworkDeploymentActions}
       input_values:
-        bridge_name: br-ex
+        bridge_name: {get_param: NeutronPhysicalBridge}
         interface_name: {get_param: NeutronPublicInterface}
+      actions:
+        if:
+          - server_not_blacklisted
+          - {get_param: NetworkDeploymentActions}
+          - []
 
   {{role}}UpgradeInitConfig:
     type: OS::Heat::SoftwareConfig
@@ -414,6 +473,11 @@ resources:
       name: {{role}}UpgradeInitDeployment
       server: {get_resource: {{role}}}
       config: {get_resource: {{role}}UpgradeInitConfig}
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   {{role}}Deployment:
     type: OS::Heat::StructuredDeployment
@@ -424,6 +488,11 @@ resources:
       server: {get_resource: {{role}}}
       input_values:
         enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   {{role}}Config:
     type: OS::Heat::StructuredConfig
@@ -433,6 +502,7 @@ resources:
         hierarchy:
           - '"%{::uuid}"'
           - heat_config_%{::deploy_config_name}
+          - config_step
           - {{role.lower()}}_extraconfig
           - extraconfig
           - service_names
@@ -463,6 +533,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
@@ -499,6 +570,11 @@ resources:
       input_values:
         update_identifier:
           get_param: UpdateIdentifier
+      actions:
+        if:
+          - server_not_blacklisted
+          - ['CREATE', 'UPDATE']
+          - []
 
   SshHostPubKey:
     type: OS::TripleO::Ssh::HostPubKey
@@ -588,6 +664,7 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
     description: Heat resource handle for {{role}} server
     value:
       {get_resource: {{role}}}
+    condition: server_not_blacklisted
   external_ip_address:
     description: IP address of the server in the external network
     value: {get_attr: [ExternalPort, ip_address]}
@@ -606,3 +683,6 @@ CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
   management_ip_address:
     description: IP address of the server in the management network
     value: {get_attr: [ManagementPort, ip_address]}
+  os_collect_config:
+    description: The os-collect-config configuration associated with this server resource
+    value: {get_attr: [{{role}}, os_collect_config]}
index 7a18ef0..d55414b 100644 (file)
@@ -95,6 +95,30 @@ are re-asserted when applying latter ones.
 
    5) Service activation (Pacemaker)
 
+It is also possible to use Mistral actions or workflows together with
+a deployment step, these are executed before the main configuration run.
+To describe actions or workflows from within a service use:
+
+  * service_workflow_tasks: One or more workflow task properties
+
+which expects a map where the key is the step and the value a list of
+dictionaries descrbing each a workflow task, for example::
+
+  service_workflow_tasks:
+    step2:
+      - name: echo
+        action: std.echo output=Hello
+    step3:
+      - name: external
+        workflow: my-pre-existing-workflow-name
+        input:
+          workflow_param1: value
+          workflow_param2: value
+
+The Heat guide for the `OS::Mistral::Workflow task property
+<https://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Mistral::Workflow-prop-tasks>`_
+has more details about the expected dictionary.
+
 Batch Upgrade Steps
 -------------------
 
index 331fe9a..0563d08 100644 (file)
@@ -56,11 +56,18 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  AodhDebug:
+    default: ''
+    description: Set to True to enable debugging Aodh services.
+    type: string
   KeystoneRegion:
     type: string
     default: 'regionOne'
     description: Keystone region for endpoint
 
+conditions:
+  service_debug_unset: {equals : [{get_param: AodhDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Aodh role.
@@ -78,7 +85,11 @@ outputs:
             query:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
-        aodh::debug: {get_param: Debug}
+        aodh::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: AodhDebug }
         aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         aodh::rabbit_userid: {get_param: RabbitUserName}
         aodh::rabbit_password: {get_param: RabbitPassword}
index 53fba63..5133124 100644 (file)
@@ -38,6 +38,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  BarbicanDebug:
+    default: ''
+    description: Set to True to enable debugging Barbican service.
+    type: string
   KeystoneRegion:
     type: string
     default: 'regionOne'
@@ -81,6 +85,9 @@ resources:
       RoleName: {get_param: RoleName}
       RoleParameters: {get_param: RoleParameters}
 
+conditions:
+  service_debug_unset: {equals : [{get_param: BarbicanDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Barbican API role.
@@ -97,7 +104,11 @@ outputs:
             barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]}
             barbican::api::db_auto_create: false
             barbican::api::enabled_certificate_plugins: ['simple_certificate']
-            barbican::api::logging::debug: {get_param: Debug}
+            barbican::api::logging::debug:
+              if:
+              - service_debug_unset
+              - {get_param: Debug }
+              - {get_param: BarbicanDebug }
             barbican::api::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
             barbican::api::rabbit_userid: {get_param: RabbitUserName}
             barbican::api::rabbit_password: {get_param: RabbitPassword}
index b3e2c3a..1d86369 100644 (file)
@@ -64,6 +64,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  CeilometerDebug:
+    default: ''
+    description: Set to True to enable debugging Ceilometer services.
+    type: string
   KeystoneRegion:
     type: string
     default: 'regionOne'
@@ -100,13 +104,20 @@ parameters:
     type: string
     hidden: true
 
+conditions:
+  service_debug_unset: {equals : [{get_param: CeilometerDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Ceilometer role.
     value:
       service_name: ceilometer_base
       config_settings:
-        ceilometer::debug: {get_param: Debug}
+        ceilometer::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: CeilometerDebug }
         ceilometer::keystone::authtoken::project_name: 'service'
         ceilometer::keystone::authtoken::user_domain_name: 'Default'
         ceilometer::keystone::authtoken::project_domain_name: 'Default'
index 6ad451a..0508c55 100644 (file)
@@ -26,11 +26,28 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  DefaultCRLURL:
+    default: 'http://ipa-ca/ipa/crl/MasterCRL.bin'
+    description: URI where to get the CRL to be configured in the nodes.
+    type: string
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 outputs:
   role_data:
     description: Role data for the certmonger-user service
     value:
       service_name: certmonger_user
+      config_settings:
+        tripleo::certmonger::ca::crl::crl_source:
+          if:
+            - internal_tls_enabled
+            - {get_param: DefaultCRLURL}
+            - null
       step_config: |
         include ::tripleo::profile::base::certmonger_user
index a201134..c0bffb1 100644 (file)
@@ -61,6 +61,9 @@ parameters:
   CinderDellScSecondaryScApiPort:
     type: number
     default: 3033
+  CinderDellScExcludedDomainIp:
+    type: string
+    default: ''
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -105,5 +108,6 @@ outputs:
                 cinder::backend::dellsc_iscsi::secondary_san_login: {get_param: CinderDellScSecondarySanLogin}
                 cinder::backend::dellsc_iscsi::secondary_san_password: {get_param: CinderDellScSecondarySanPassword}
                 cinder::backend::dellsc_iscsi::secondary_sc_api_port: {get_param: CinderDellScSecondaryScApiPort}
+                cinder::backend::dellsc_iscsi::excluded_domain_ip: {get_param: CinderDellScExcludedDomainIp}
       step_config: |
         include ::tripleo::profile::base::cinder::volume
index bddc8e1..fbde4c0 100644 (file)
@@ -93,6 +93,12 @@ parameters:
   CinderNetappWebservicePath:
     type: string
     default: '/devmgr/v2'
+  CinderNetappNasSecureFileOperations:
+    type: string
+    default: 'false'
+  CinderNetappNasSecureFilePermissions:
+    type: string
+    default: 'false'
   # DEPRECATED options for compatibility with older versions
   CinderNetappEseriesHostType:
     type: string
@@ -133,5 +139,7 @@ outputs:
         cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
         cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
         cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+        cinder::backend::netapp::nas_secure_file_operations: {get_param: CinderNetappNasSecureFileOperations}
+        cinder::backend::netapp::nas_secure_file_permissions: {get_param: CinderNetappNasSecureFilePermissions}
       step_config: |
         include ::tripleo::profile::base::cinder::volume
index 2ba5aa5..f7dfe5e 100644 (file)
@@ -12,6 +12,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  CinderDebug:
+    default: ''
+    description: Set to True to enable debugging on Cinder services.
+    type: string
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -93,6 +97,9 @@ parameters:
         Cron to move deleted instances to another table - Log destination
     default: '/var/log/cinder/cinder-rowsflush.log'
 
+conditions:
+  service_debug_unset: {equals : [{get_param: CinderDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Cinder base service.
@@ -109,7 +116,11 @@ outputs:
             query:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
-        cinder::debug: {get_param: Debug}
+        cinder::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: CinderDebug }
         cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         cinder::rabbit_userid: {get_param: RabbitUserName}
         cinder::rabbit_password: {get_param: RabbitPassword}
index fe95222..1f8c345 100644 (file)
@@ -40,6 +40,20 @@ parameters:
       NFS servers used by Cinder NFS backend. Effective when
       CinderEnableNfsBackend is true.
     type: comma_delimited_list
+  CinderNasSecureFileOperations:
+    default: false
+    description: >
+      Controls whether security enhanced NFS file operations are enabled.
+      Valid values are 'auto', 'true' or 'false'. Effective when
+      CinderEnableNfsBackend is true.
+    type: string
+  CinderNasSecureFilePermissions:
+    default: false
+    description: >
+      Controls whether security enhanced NFS file permissions are enabled.
+      Valid values are 'auto', 'true' or 'false'. Effective when
+      CinderEnableNfsBackend is true.
+    type: string
   CinderRbdPoolName:
     default: volumes
     type: string
@@ -105,6 +119,8 @@ outputs:
             tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
             tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
             tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
+            tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_operations: {get_param: CinderNasSecureFileOperations}
+            tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_permissions: {get_param: CinderNasSecureFilePermissions}
             tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
             tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
             tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
index 8fbcd99..5bca94d 100644 (file)
@@ -33,6 +33,10 @@ parameters:
   Debug:
     type: string
     default: ''
+  CongressDebug:
+    default: ''
+    description: Set to True to enable debugging Glance service.
+    type: string
   KeystoneRegion:
     type: string
     default: 'regionOne'
@@ -62,6 +66,9 @@ parameters:
     default: {}
     type: json
 
+conditions:
+  service_debug_unset: {equals : [{get_param: CongressDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Congress role.
@@ -79,7 +86,11 @@ outputs:
             query:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
-        congress::debug: {get_param: Debug}
+        congress::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: CongressDebug }
         congress::rpc_backend: rabbit
         congress::rabbit_userid: {get_param: RabbitUserName}
         congress::rabbit_password: {get_param: RabbitPassword}
index 2bde903..882ba29 100644 (file)
@@ -118,6 +118,16 @@ outputs:
                   template: "%{hiera('cloud_name_NETWORK')}"
                   params:
                     NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+              dnsnames:
+                - str_replace:
+                    template: "%{hiera('cloud_name_NETWORK')}"
+                    params:
+                      NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+                - str_replace:
+                    template:
+                      "%{hiera('fqdn_$NETWORK')}"
+                    params:
+                      $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
               principal:
                 str_replace:
                   template: "mysql/%{hiera('cloud_name_NETWORK')}"
@@ -132,6 +142,9 @@ outputs:
             - service: mysql
               network: {get_param: [ServiceNetMap, MysqlNetwork]}
               type: vip
+            - service: mysql
+              network: {get_param: [ServiceNetMap, MysqlNetwork]}
+              type: node
           - null
       upgrade_tasks:
         - name: Check for galera root password
index df406a8..9567a73 100644 (file)
@@ -52,3 +52,23 @@ outputs:
                   - 26379
       step_config: |
         include ::tripleo::profile::base::database::redis
+      upgrade_tasks:
+        - name: Check if redis is deployed
+          command: systemctl is-enabled redis
+          tags: common
+          ignore_errors: True
+          register: redis_enabled
+        - name: "PreUpgrade step0,validation: Check if redis is running"
+          shell: >
+            /usr/bin/systemctl show 'redis' --property ActiveState |
+            grep '\bactive\b'
+          when: redis_enabled.rc == 0
+          tags: step0,validation
+        - name: Stop redis service
+          tags: step1
+          when: redis_enabled.rc == 0
+          service: name=redis state=stopped
+        - name: Install redis package if it was disabled
+          tags: step3
+          yum: name=redis state=latest
+          when: redis_enabled.rc != 0
index 9b7b47e..7be394b 100644 (file)
@@ -27,24 +27,12 @@ parameters:
                  via parameter_defaults in the resource registry.
     type: json
 
-resources:
-  CeilometerServiceBase:
-    type: ../ceilometer-base.yaml
-    properties:
-      ServiceNetMap: {get_param: ServiceNetMap}
-      DefaultPasswords: {get_param: DefaultPasswords}
-      EndpointMap: {get_param: EndpointMap}
-      RoleName: {get_param: RoleName}
-      RoleParameters: {get_param: RoleParameters}
-
 outputs:
   role_data:
     description: Role data for the disabling Ceilometer Expirer role.
     value:
       service_name: ceilometer_expirer_disabled
-      config_settings:
-        map_merge:
-          - get_attr: [CeilometerServiceBase, role_data, config_settings]
-          - ceilometer::expirer::enable_cron: false
-      step_config: |
-        include ::tripleo::profile::base::ceilometer::expirer
+      upgrade_tasks:
+        - name: Remove ceilometer expirer cron tab on upgrade
+          tags: step1
+          shell: '/usr/bin/crontab -u ceilometer -r'
index 2815174..7812c8e 100644 (file)
@@ -30,6 +30,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  GlanceDebug:
+    default: ''
+    description: Set to True to enable debugging Glance service.
+    type: string
   GlancePassword:
     description: The password for the glance service and db account, used by the glance services.
     type: string
@@ -59,10 +63,6 @@ parameters:
   CephClientUserName:
     default: openstack
     type: string
-  Debug:
-    default: ''
-    description: Set to True to enable debugging on all services.
-    type: string
   GlanceNotifierStrategy:
     description: Strategy to use for Glance notification queue
     type: string
@@ -128,6 +128,7 @@ parameters:
 conditions:
   use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
   glance_workers_unset: {equals : [{get_param: GlanceWorkers}, '']}
+  service_debug_unset: {equals : [{get_param: GlanceDebug}, '']}
 
 resources:
 
@@ -170,7 +171,11 @@ outputs:
             glance::api::enable_v2_api: true
             glance::api::authtoken::password: {get_param: GlancePassword}
             glance::api::enable_proxy_headers_parsing: true
-            glance::api::debug: {get_param: Debug}
+            glance::api::debug:
+              if:
+              - service_debug_unset
+              - {get_param: Debug }
+              - {get_param: GlanceDebug }
             glance::policy::policies: {get_param: GlanceApiPolicies}
             tripleo.glance_api.firewall_rules:
               '112 glance_api':
index d62c349..b4af7e8 100644 (file)
@@ -31,9 +31,13 @@ parameters:
     description: The short name of the Gnocchi indexer backend to use.
     type: string
   MetricProcessingDelay:
-    default: 60
+    default: 30
     description: Delay between processing metrics.
     type: number
+  NumberOfStorageSacks:
+    default: 128
+    description: Number of storage sacks to create.
+    type: number
   GnocchiPassword:
     description: The password for the gnocchi service and db account.
     type: string
@@ -52,6 +56,13 @@ parameters:
     type: string
     default: ''
     description: Set to True to enable debugging on all services.
+  GnocchiDebug:
+    default: ''
+    description: Set to True to enable debugging Gnocchi services.
+    type: string
+
+conditions:
+  service_debug_unset: {equals : [{get_param: GnocchiDebug}, '']}
 
 outputs:
   aux_parameters:
@@ -65,7 +76,11 @@ outputs:
       config_settings:
         #Gnocchi engine
         gnocchi_redis_password: {get_param: RedisPassword}
-        gnocchi::debug: {get_param: Debug}
+        gnocchi::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: GnocchiDebug }
         gnocchi::db::database_connection:
           make_url:
             scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
@@ -76,7 +91,11 @@ outputs:
             query:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
-        gnocchi::db::sync::extra_opts: '--skip-storage'
+        gnocchi::db::sync::extra_opts:
+          str_replace:
+            template: " --sacks-number NUM_SACKS"
+            params:
+              NUM_SACKS: {get_param: NumberOfStorageSacks}
         gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
         gnocchi::storage::swift::swift_user: 'service:gnocchi'
         gnocchi::storage::swift::swift_auth_version: 3
index a71491c..5bdc3b8 100644 (file)
@@ -38,6 +38,10 @@ parameters:
     default: /dev/log
     description: Syslog address where HAproxy will send its log
     type: string
+  HAProxyStatsEnabled:
+    default: true
+    description: Whether or not to enable the HAProxy stats interface.
+    type: boolean
   RedisPassword:
     description: The password for Redis
     type: string
@@ -50,6 +54,11 @@ parameters:
     type: string
     description: Specifies the default CA cert to use if TLS is used for
                  services in the internal network.
+  InternalTLSCRLPEMFile:
+    default: '/etc/pki/CA/crl/overcloud-crl.pem'
+    type: string
+    description: Specifies the default CRL PEM file to use for revocation if
+                 TLS is used for services in the internal network.
 
 resources:
 
@@ -89,6 +98,8 @@ outputs:
             tripleo::haproxy::haproxy_stats_password: {get_param: HAProxyStatsPassword}
             tripleo::haproxy::redis_password: {get_param: RedisPassword}
             tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
+            tripleo::haproxy::crl_file: {get_param: InternalTLSCRLPEMFile}
+            tripleo::haproxy::haproxy_stats: {get_param: HAProxyStatsEnabled}
             tripleo::profile::base::haproxy::certificates_specs:
               map_merge:
                 - get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
index dfd823d..d89fe46 100644 (file)
@@ -8,6 +8,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  HeatDebug:
+    default: ''
+    description: Set to True to enable debugging Heat services.
+    type: string
   RabbitPassword:
     description: The password for RabbitMQ
     type: string
@@ -112,6 +116,9 @@ parameters:
     description: Maximum raw byte size of the Heat API JSON request body.
     type: number
 
+conditions:
+  service_debug_unset: {equals : [{get_param: HeatDebug}, '']}
+
 outputs:
   role_data:
     description: Shared role data for the Heat services.
@@ -122,7 +129,11 @@ outputs:
         heat::rabbit_password: {get_param: RabbitPassword}
         heat::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         heat::rabbit_port: {get_param: RabbitClientPort}
-        heat::debug: {get_param: Debug}
+        heat::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: HeatDebug }
         heat::enable_proxy_headers_parsing: true
         heat::rpc_response_timeout: 600
         # We need this because the default heat policy.json no longer works on TripleO
index 562afe1..1f97b8b 100644 (file)
@@ -14,6 +14,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  HorizonDebug:
+    default: false
+    description: Set to True to enable debugging Horizon service.
+    type: string
   DefaultPasswords:
     default: {}
     type: json
@@ -51,7 +55,7 @@ parameters:
   HorizonSecureCookies:
     description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
     type: boolean
-    default: true
+    default: false
   MemcachedIPv6:
     default: false
     description: Enable IPv6 features in Memcached.
@@ -62,7 +66,7 @@ parameters:
 
 conditions:
 
-  debug_empty: {equals : [{get_param: Debug}, '']}
+  debug_unset: {equals : [{get_param: Debug}, '']}
 
 outputs:
   role_data:
@@ -85,7 +89,6 @@ outputs:
           horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
           horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
           horizon::vhost_extra_params:
-            add_listen: false
             priority: 10
             access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
             options: ['FollowSymLinks','MultiViews']
@@ -104,9 +107,9 @@ outputs:
           memcached_ipv6: {get_param: MemcachedIPv6}
         -
           if:
-          - debug_empty
-          - {}
-          - horizon::django_debug: {get_param: Debug}
+          - debug_unset
+          - horizon::django_debug: { get_param: HorizonDebug }
+          - horizon::django_debug: { get_param: Debug }
       step_config: |
         include ::tripleo::profile::base::horizon
       # Ansible tasks to handle upgrade
index 945033a..0e8eacf 100644 (file)
@@ -43,8 +43,21 @@ parameters:
       e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
     default: {}
     type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
 
 resources:
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
+
   IronicBase:
     type: ./ironic-base.yaml
     properties:
@@ -63,6 +76,7 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [IronicBase, role_data, config_settings]
+          - get_attr: [ApacheServiceBase, role_data, config_settings]
           - ironic::api::authtoken::password: {get_param: IronicPassword}
             ironic::api::authtoken::project_name: 'service'
             ironic::api::authtoken::user_domain_name: 'Default'
@@ -80,7 +94,17 @@ outputs:
             ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
             # This is used to build links in responses
             ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+            ironic::api::service_name: 'httpd'
             ironic::policy::policies: {get_param: IronicApiPolicies}
+            ironic::wsgi::apache::bind_host: {get_param: [ServiceNetMap, IronicApiNetwork]}
+            ironic::wsgi::apache::port: {get_param: [EndpointMap, IronicInternal, port]}
+            ironic::wsgi::apache::servername:
+              str_replace:
+                template:
+                  "%{hiera('fqdn_$NETWORK')}"
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, IronicApiNetwork]}
+            ironic::wsgi::apache::ssl: {get_param: EnableInternalTLS}
             tripleo.ironic_api.firewall_rules:
               '133 ironic api':
                 dport:
@@ -106,6 +130,9 @@ outputs:
             - '%'
             - "%{hiera('mysql_bind_host')}"
       upgrade_tasks:
-        - name: Stop ironic_api service
+        - name: Stop ironic_api service (before httpd support)
+          tags: step1
+          service: name=openstack-ironic-api state=stopped enabled=no
+        - name: Stop ironic_api service (running under httpd)
           tags: step1
-          service: name=openstack-ironic-api state=stopped
+          service: name=httpd state=stopped
index da48516..41d6ced 100644 (file)
@@ -30,6 +30,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  IronicDebug:
+    default: ''
+    description: Set to True to enable debugging Ironic services.
+    type: string
   IronicPassword:
     description: The password for the Ironic service and db account, used by the Ironic services
     type: string
@@ -53,6 +57,9 @@ parameters:
         an SSL connection to the RabbitMQ host.
     type: string
 
+conditions:
+  service_debug_unset: {equals : [{get_param: IronicDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Ironic role.
@@ -69,7 +76,11 @@ outputs:
             query:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
-        ironic::debug: {get_param: Debug}
+        ironic::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: IronicDebug }
         ironic::rabbit_userid: {get_param: RabbitUserName}
         ironic::rabbit_password: {get_param: RabbitPassword}
         ironic::rabbit_port: {get_param: RabbitClientPort}
index b167671..0e8c8e1 100644 (file)
@@ -164,6 +164,12 @@ outputs:
             ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
             ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
             # Credentials to access other services
+            ironic::cinder::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::cinder::username: 'ironic'
+            ironic::cinder::password: {get_param: IronicPassword}
+            ironic::cinder::project_name: 'service'
+            ironic::cinder::user_domain_name: 'Default'
+            ironic::cinder::project_domain_name: 'Default'
             ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             ironic::glance::username: 'ironic'
             ironic::glance::password: {get_param: IronicPassword}
diff --git a/puppet/services/ironic-inspector.yaml b/puppet/services/ironic-inspector.yaml
new file mode 100644 (file)
index 0000000..e8537a2
--- /dev/null
@@ -0,0 +1,151 @@
+heat_template_version: ocata
+
+description: >
+  OpenStack Ironic Inspector configured with Puppet (EXPERIMENTAL)
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  MonitoringSubscriptionIronicInspector:
+    default: 'overcloud-ironic-inspector'
+    type: string
+  KeystoneRegion:
+    type: string
+    default: 'regionOne'
+    description: Keystone region for endpoint
+  Debug:
+    default: ''
+    description: Set to True to enable debugging on all services.
+    type: string
+  IronicInspectorInterface:
+    default: br-ex
+    description: |
+      Network interface on which inspection dnsmasq will listen. Should allow
+      access to untagged traffic from nodes booted for inspection. The default
+      value only makes sense if you don't modify any networking configuration.
+    type: string
+  IronicInspectorIPXEEnabled:
+    default: true
+    description: Whether to use iPXE for inspection.
+    type: boolean
+  IronicInspectorIpRange:
+    description: |
+        Temporary IP range that will be given to nodes during the inspection
+        process. This should not overlap with any range that Neutron's DHCP
+        gives away, but it has to be routeable back to ironic-inspector API.
+        This option has no meaningful defaults, and thus is required.
+    type: string
+  IronicInspectorUseSwift:
+    default: true
+    description: Whether to use Swift for storing introspection data.
+    type: boolean
+  IronicIPXEPort:
+    default: 8088
+    description: Port to use for serving images when iPXE is used.
+    type: string
+  IronicPassword:
+    description: The password for the Ironic service and db account, used by the Ironic services
+    type: string
+    hidden: true
+
+conditions:
+  enable_ipxe: {equals : [{get_param: IronicInspectorIPXEEnabled}, true]}
+  use_swift: {equals : [{get_param: IronicInspectorUseSwift}, true]}
+
+outputs:
+  role_data:
+    description: Role data for the Ironic Inspector role.
+    value:
+      service_name: ironic_inspector
+      monitoring_subscription: {get_param: MonitoringSubscriptionIronicInspector}
+      config_settings:
+        map_merge:
+          - ironic::inspector::listen_address: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+            ironic::inspector::dnsmasq_local_ip: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+            ironic::inspector::dnsmasq_ip_range: {get_param: IronicInspectorIpRange}
+            ironic::inspector::dnsmasq_interface: {get_param: IronicInspectorInterface}
+            ironic::inspector::debug: {get_param: Debug}
+            ironic::inspector::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+            ironic::inspector::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::inspector::authtoken::username: 'ironic'
+            ironic::inspector::authtoken::password: {get_param: IronicPassword}
+            ironic::inspector::authtoken::project_name: 'service'
+            ironic::inspector::authtoken::user_domain_name: 'Default'
+            ironic::inspector::authtoken::project_domain_name: 'Default'
+            tripleo.ironic_inspector.firewall_rules:
+              '137 ironic-inspector':
+                dport:
+                  - 5050
+            ironic::inspector::ironic_username: 'ironic'
+            ironic::inspector::ironic_password: {get_param: IronicPassword}
+            ironic::inspector::ironic_tenant_name: 'service'
+            ironic::inspector::ironic_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::inspector::ironic_max_retries: 6
+            ironic::inspector::ironic_retry_interval: 10
+            ironic::inspector::ironic_user_domain_name: 'Default'
+            ironic::inspector::ironic_project_domain_name: 'Default'
+            ironic::inspector::http_port: {get_param: IronicIPXEPort}
+            ironic::inspector::db::database_connection:
+              list_join:
+                - ''
+                - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+                  - '://ironic-inspector:'
+                  - {get_param: IronicPassword}
+                  - '@'
+                  - {get_param: [EndpointMap, MysqlInternal, host]}
+                  - '/ironic-inspector'
+                  - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          -
+            if:
+            - enable_ipxe
+            - ironic::inspector::pxe_transfer_protocol: 'http'
+            - {}
+          -
+            if:
+            - use_swift
+            - ironic::inspector::store_data: 'swift'
+              ironic::inspector::swift_username: 'ironic'
+              ironic::inspector::swift_password: {get_param: IronicPassword}
+              ironic::inspector::swift_tenant_name: 'service'
+              ironic::inspector::swift_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+              ironic::inspector::swift_user_domain_name: 'Default'
+              ironic::inspector::swift_project_domain_name: 'Default'
+            - {}
+      step_config: |
+        include ::tripleo::profile::base::ironic_inspector
+      service_config_settings:
+        keystone:
+          ironic::keystone::auth_inspector::tenant: 'service'
+          ironic::keystone::auth_inspector::public_url: {get_param: [EndpointMap, IronicInspectorPublic, uri]}
+          ironic::keystone::auth_inspector::internal_url: {get_param: [EndpointMap, IronicInspectorInternal, uri]}
+          ironic::keystone::auth_inspector::admin_url: {get_param: [EndpointMap, IronicInspectorAdmin, uri]}
+          ironic::keystone::auth_inspector::password: {get_param: IronicPassword}
+          ironic::keystone::auth_inspector::region: {get_param: KeystoneRegion}
+        mysql:
+          ironic::inspector::db::mysql::password: {get_param: IronicPassword}
+          ironic::inspector::db::mysql::user: ironic-inspector
+          ironic::inspector::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          ironic::inspector::db::mysql::dbname: ironic-inspector
+          ironic::inspector::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 7262e47..60d194b 100644 (file)
@@ -63,6 +63,10 @@ parameters:
   Debug:
     type: string
     default: ''
+  KeystoneDebug:
+    default: ''
+    description: Set to True to enable debugging Keystone service.
+    type: string
   AdminEmail:
     default: 'admin@example.com'
     description: The email for the keystone admin account.
@@ -109,10 +113,27 @@ parameters:
     description: The second Keystone credential key. Must be a valid key.
   KeystoneFernetKey0:
     type: string
-    description: The first Keystone fernet key. Must be a valid key.
+    default: ''
+    description: (DEPRECATED) The first Keystone fernet key. Must be a valid key.
   KeystoneFernetKey1:
     type: string
-    description: The second Keystone fernet key. Must be a valid key.
+    default: ''
+    description: (DEPRECATED) The second Keystone fernet key. Must be a valid key.
+  KeystoneFernetKeys:
+    type: json
+    description: Mapping containing keystone's fernet keys and their paths.
+  KeystoneFernetMaxActiveKeys:
+    type: number
+    description: The maximum active keys in the keystone fernet key repository.
+    default: 5
+  ManageKeystoneFernetKeys:
+    type: boolean
+    default: true
+    description: Whether TripleO should manage the keystone fernet keys or not.
+                 If set to true, the fernet keys will get the values from the
+                 saved keys repository in mistral (the KeystoneFernetKeys
+                 variable). If set to false, only the stack creation
+                 initializes the keys, but subsequent updates won't touch them.
   KeystoneLoggingSource:
     type: json
     default:
@@ -183,6 +204,17 @@ parameters:
     default: {}
     hidden: true
 
+parameter_groups:
+- label: deprecated
+  description: |
+   The following parameters are deprecated and will be removed. They should not
+   be relied on for new deployments. If you have concerns regarding deprecated
+   parameters, please contact the TripleO development team on IRC or the
+   OpenStack mailing list.
+  parameters:
+  - KeystoneFernetKey0
+  - KeystoneFernetKey1
+
 resources:
 
   ApacheServiceBase:
@@ -198,6 +230,7 @@ resources:
 conditions:
   keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
   keystone_ldap_domain_enabled: {equals: [{get_param: KeystoneLDAPDomainEnable}, True]}
+  service_debug_unset: {equals : [{get_param: KeystoneDebug}, '']}
 
 outputs:
   role_data:
@@ -229,6 +262,7 @@ outputs:
             keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
             keystone::token_provider: {get_param: KeystoneTokenProvider}
             keystone::enable_fernet_setup: {if: [keystone_fernet_tokens, true, false]}
+            keystone::fernet_max_active_keys: {get_param: KeystoneFernetMaxActiveKeys}
             keystone::enable_proxy_headers_parsing: true
             keystone::enable_credential_setup: true
             keystone::credential_keys:
@@ -236,13 +270,13 @@ outputs:
                 content: {get_param: KeystoneCredential0}
               '/etc/keystone/credential-keys/1':
                 content: {get_param: KeystoneCredential1}
-            keystone::fernet_keys:
-              '/etc/keystone/fernet-keys/0':
-                content: {get_param: KeystoneFernetKey0}
-              '/etc/keystone/fernet-keys/1':
-                content: {get_param: KeystoneFernetKey1}
-            keystone::fernet_replace_keys: false
-            keystone::debug: {get_param: Debug}
+            keystone::fernet_keys: {get_param: KeystoneFernetKeys}
+            keystone::fernet_replace_keys: {get_param: ManageKeystoneFernetKeys}
+            keystone::debug:
+              if:
+              - service_debug_unset
+              - {get_param: Debug }
+              - {get_param: KeystoneDebug }
             keystone::rabbit_userid: {get_param: RabbitUserName}
             keystone::rabbit_password: {get_param: RabbitPassword}
             keystone::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
index a299fff..d0ee212 100644 (file)
@@ -30,6 +30,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  ManilaDebug:
+    default: ''
+    description: Set to True to enable debugging Manila services.
+    type: string
   RabbitPassword:
     description: The password for RabbitMQ
     type: string
@@ -53,6 +57,9 @@ parameters:
     type: string
     hidden: true
 
+conditions:
+  service_debug_unset: {equals : [{get_param: ManilaDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Manila Base service.
@@ -63,7 +70,11 @@ outputs:
         manila::rabbit_password: {get_param: RabbitPassword}
         manila::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         manila::rabbit_port: {get_param: RabbitClientPort}
-        manila::debug: {get_param: Debug}
+        manila::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: ManilaDebug }
         manila::db::database_db_max_retries: -1
         manila::db::database_max_retries: -1
         manila::sql_connection:
index 2e70865..8b3655d 100644 (file)
@@ -31,6 +31,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  MistralDebug:
+    default: ''
+    description: Set to True to enable debugging Mistral services.
+    type: string
   RabbitPassword:
     description: The password for RabbitMQ
     type: string
@@ -58,6 +62,9 @@ parameters:
     default: 'regionOne'
     description: Keystone region for endpoint
 
+conditions:
+  service_debug_unset: {equals : [{get_param: MistralDebug}, '']}
+
 outputs:
   role_data:
     description: Shared role data for the Mistral services.
@@ -78,7 +85,11 @@ outputs:
         mistral::rabbit_password: {get_param: RabbitPassword}
         mistral::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         mistral::rabbit_port: {get_param: RabbitClientPort}
-        mistral::debug: {get_param: Debug}
+        mistral::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: MistralDebug }
         mistral::keystone_password: {get_param: MistralPassword}
         mistral::keystone_tenant: 'service'
         mistral::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
index 57581b5..3c7518b 100644 (file)
@@ -50,6 +50,10 @@ parameters:
     type: string
     default: ''
     description: Set to True to enable debugging on all services.
+  NeutronDebug:
+    default: ''
+    description: Set to True to enable debugging Neutron services.
+    type: string
   EnableConfigPurge:
     type: boolean
     default: false
@@ -90,6 +94,7 @@ parameters:
 
 conditions:
   dhcp_agents_zero: {equals : [{get_param: NeutronDhcpAgentsPerNetwork}, 0]}
+  service_debug_unset: {equals : [{get_param: NeutronDebug}, '']}
 
 outputs:
   role_data:
@@ -104,7 +109,11 @@ outputs:
             neutron::rabbit_port: {get_param: RabbitClientPort}
             neutron::core_plugin: {get_param: NeutronCorePlugin}
             neutron::service_plugins: {get_param: NeutronServicePlugins}
-            neutron::debug: {get_param: Debug}
+            neutron::debug:
+              if:
+              - service_debug_unset
+              - {get_param: Debug }
+              - {get_param: NeutronDebug }
             neutron::purge_config: {get_param: EnableConfigPurge}
             neutron::allow_overlapping_ips: true
             neutron::dns_domain: {get_param: NeutronDnsDomain}
diff --git a/puppet/services/neutron-linuxbridge-agent.yaml b/puppet/services/neutron-linuxbridge-agent.yaml
new file mode 100644 (file)
index 0000000..f432405
--- /dev/null
@@ -0,0 +1,83 @@
+heat_template_version: ocata
+
+description: >
+  OpenStack Neutron Linuxbridge agent configured with Puppet.
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  PhysicalInterfaceMapping:
+    description: List of  <physical_network>:<physical_interface> tuples
+                 mapping physical network names to agent's node-specific
+                 physical network interfaces. Defaults to empty list.
+    type: comma_delimited_list
+    default: ''
+  NeutronLinuxbridgeFirewallDriver:
+    default: ''
+    description: Configure the classname of the firewall driver to  use  for
+                 implementing security groups.  Possible  values  depend  on
+                 system  configuration. The default value of an empty string
+                 will result in a default supported configuration.
+    type: string
+  NeutronEnableL2Pop:
+    type: string
+    description: Enable/disable the L2 population feature in the Neutron agents.
+    default: 'False'
+  NeutronTunnelTypes:
+    default: 'vxlan'
+    description: The tunnel types for the Neutron tenant network.
+    type: comma_delimited_list
+
+conditions:
+  no_firewall_driver: {equals : [{get_param: NeutronLinuxbridgeFirewallDriver}, '']}
+
+resources:
+
+  NeutronBase:
+    type: ./neutron-base.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+
+outputs:
+  role_data:
+    description: Role data for the Neutron Linuxbridge agent service.
+    value:
+      service_name: neutron_linuxbridge_agent
+      config_settings:
+        map_merge:
+          - get_attr: [NeutronBase, role_data, config_settings]
+          - neutron::agents::ml2::linuxbridge::physical_interface_mappings: {get_param: PhysicalInterfaceMapping}
+            neutron::agents::ml2::linuxbridge::l2_population: {get_param: NeutronEnableL2Pop}
+            neutron::agents::ml2::linuxbridge::tunnel_types: {get_param: NeutronTunnelTypes}
+            neutron::agents::ml2::linuxbridge::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+            neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.BridgeInterfaceDriver'
+            neutron::agents::dhcp::dhcp_driver: 'neutron.agent.linux.dhcp.Dnsmasq'
+          -
+            if:
+            - no_firewall_driver
+            - {}
+            - neutron::agents::ml2::linuxbridge::firewall_driver: {get_param: NeutronLinuxbridgeFirewallDriver}
+      step_config: |
+        include ::tripleo::profile::base::neutron::linuxbridge
index 76d5c26..4493721 100644 (file)
@@ -92,8 +92,12 @@ resources:
       RoleName: {get_param: RoleName}
       RoleParameters: {get_param: RoleParameters}
 
-  OpenVswitchUpgrade:
-    type: ./openvswitch-upgrade.yaml
+  Ovs:
+    type: ./openvswitch.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
 
 outputs:
   role_data:
@@ -138,7 +142,7 @@ outputs:
           expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
           data:
             ovs_upgrade:
-              get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+              get_attr: [Ovs, role_data, upgrade_tasks]
             neutron_ovs_upgrade:
               - name: Check if neutron_ovs_agent is deployed
                 command: systemctl is-enabled neutron-openvswitch-agent
index fec9e2a..da7a4d6 100644 (file)
@@ -26,32 +26,6 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
-  HostCpusList:
-    default: "'0'"
-    description: List of cores to be used for host process
-    type: string
-    constraints:
-      - allowed_pattern: "'[0-9,-]+'"
-  NeutronDpdkCoreList:
-    default: "''"
-    description: List of cores to be used for DPDK Poll Mode Driver
-    type: string
-    constraints:
-      - allowed_pattern: "'[0-9,-]*'"
-  NeutronDpdkMemoryChannels:
-    default: ""
-    description: Number of memory channels to be used for DPDK
-    type: string
-    constraints:
-      - allowed_pattern: "[0-9]*"
-  NeutronDpdkSocketMemory:
-    default: ""
-    description: Memory allocated for each socket
-    type: string
-  NeutronDpdkDriverType:
-    default: "vfio-pci"
-    description: DPDK Driver type
-    type: string
   # below parameters has to be set in neutron agent only for compute nodes.
   # as of now there is no other usecase for these parameters except dpdk.
   # should be moved to compute only ovs agent in case of any other usecases.
@@ -75,9 +49,6 @@ resources:
       RoleName: {get_param: RoleName}
       RoleParameters: {get_param: RoleParameters}
 
-  OpenVswitchUpgrade:
-    type: ./openvswitch-upgrade.yaml
-
   # Merging role-specific parameters (RoleParameters) with the default parameters.
   # RoleParameters will have the precedence over the default parameters.
   RoleParametersValue:
@@ -89,20 +60,19 @@ resources:
           - map_replace:
             - neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
               neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
-              vswitch::dpdk::driver_type: NeutronDpdkDriverType
-              vswitch::dpdk::host_core_list: HostCpusList
-              vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
-              vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
-              vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
             - values: {get_param: [RoleParameters]}
           - values:
               NeutronDatapathType: {get_param: NeutronDatapathType}
               NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
-              NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
-              HostCpusList: {get_param: HostCpusList}
-              NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
-              NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
-              NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
+
+  Ovs:
+    type: ./openvswitch.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -116,7 +86,8 @@ outputs:
             - keys:
                 tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
           - neutron::agents::ml2::ovs::enable_dpdk: true
+          - get_attr: [Ovs, role_data, config_settings]
           - get_attr: [RoleParametersValue, value]
       step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
       upgrade_tasks:
-        get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+        get_attr: [Ovs, role_data, upgrade_tasks]
index c124d1e..090640e 100644 (file)
@@ -65,6 +65,24 @@ resources:
       RoleName: {get_param: RoleName}
       RoleParameters: {get_param: RoleParameters}
 
+  # Merging role-specific parameters (RoleParameters) with the default parameters.
+  # RoleParameters will have the precedence over the default parameters.
+  RoleParametersValue:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+        map_replace:
+          - map_replace:
+            - neutron::agents::ml2::sriov::physical_device_mappings: NeutronPhysicalDevMappings
+              neutron::agents::ml2::sriov::exclude_devices: NeutronExcludeDevices
+              tripleo::host::sriov::number_of_vfs: NeutronSriovNumVFs
+            - values: {get_param: [RoleParameters]}
+          - values:
+              NeutronPhysicalDevMappings: {get_param: NeutronPhysicalDevMappings}
+              NeutronExcludeDevices: {get_param: NeutronExcludeDevices}
+              NeutronSriovNumVFs: {get_param: NeutronSriovNumVFs}
+
 outputs:
   role_data:
     description: Role data for the Neutron SR-IOV nic agent service.
@@ -73,8 +91,6 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
-          - neutron::agents::ml2::sriov::physical_device_mappings: {get_param: NeutronPhysicalDevMappings}
-            neutron::agents::ml2::sriov::exclude_devices: {get_param: NeutronExcludeDevices}
-            tripleo::host::sriov::number_of_vfs: {get_param: NeutronSriovNumVFs}
+          - get_attr: [RoleParametersValue, value]
       step_config: |
         include ::tripleo::profile::base::neutron::sriov
index 835edf0..7e741d8 100644 (file)
@@ -81,17 +81,15 @@ conditions:
   nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
 
 resources:
-  # Temporarily disable Nova API deployed in WSGI
-  # https://bugs.launchpad.net/nova/+bug/1661360
-  # ApacheServiceBase:
-  #   type: ./apache.yaml
-  #   properties:
-  #     ServiceNetMap: {get_param: ServiceNetMap}
-  #     DefaultPasswords: {get_param: DefaultPasswords}
-  #     EndpointMap: {get_param: EndpointMap}
-  #     RoleName: {get_param: RoleName}
-  #     RoleParameters: {get_param: RoleParameters}
-  #     EnableInternalTLS: {get_param: EnableInternalTLS}
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
 
   NovaBase:
     type: ./nova-base.yaml
@@ -114,9 +112,7 @@ outputs:
       config_settings:
         map_merge:
         - get_attr: [NovaBase, role_data, config_settings]
-        # Temporarily disable Nova API deployed in WSGI
-        # https://bugs.launchpad.net/nova/+bug/1661360
-        # - get_attr: [ApacheServiceBase, role_data, config_settings]
+        - get_attr: [ApacheServiceBase, role_data, config_settings]
         - nova::cron::archive_deleted_rows::hour: '*/12'
           nova::cron::archive_deleted_rows::destination: '/dev/null'
           tripleo.nova_api.firewall_rules:
@@ -143,23 +139,21 @@ outputs:
                 "%{hiera('fqdn_$NETWORK')}"
               params:
                 $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
-          # Temporarily disable Nova API deployed in WSGI
-          # https://bugs.launchpad.net/nova/+bug/1661360
-          nova_wsgi_enabled: false
-          # nova::api::service_name: 'httpd'
-          # nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+          nova_wsgi_enabled: true
+          nova::api::service_name: 'httpd'
+          nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
           # NOTE: bind IP is found in Heat replacing the network name with the local node IP
           # for the given network; replacement examples (eg. for internal_api):
           # internal_api -> IP
           # internal_api_uri -> [IP]
           # internal_api_subnet - > IP/CIDR
-          nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
-          nova::wsgi::apache_api::servername:
-            str_replace:
-              template:
-                "%{hiera('fqdn_$NETWORK')}"
-              params:
-                $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+          nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+          nova::wsgi::apache_api::servername:
+            str_replace:
+              template:
+                "%{hiera('fqdn_$NETWORK')}"
+              params:
+                $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
           nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
           nova::api::instance_name_template: {get_param: InstanceNameTemplate}
           nova_enable_db_purge: {get_param: NovaEnableDBPurge}
@@ -169,9 +163,7 @@ outputs:
           - nova_workers_zero
           - {}
           - nova::api::osapi_compute_workers: {get_param: NovaWorkers}
-          # Temporarily disable Nova API deployed in WSGI
-          # https://bugs.launchpad.net/nova/+bug/1661360
-          # nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
+            nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
       step_config: |
         include tripleo::profile::base::nova::api
       service_config_settings:
@@ -199,87 +191,91 @@ outputs:
           nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
           nova::keystone::auth::password: {get_param: NovaPassword}
           nova::keystone::auth::region: {get_param: KeystoneRegion}
-      # Temporarily disable Nova API deployed in WSGI
-      # https://bugs.launchpad.net/nova/+bug/1661360
-      # metadata_settings:
-      #   get_attr: [ApacheServiceBase, role_data, metadata_settings]
+      metadata_settings:
+        get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: get bootstrap nodeid
-          tags: common
-          command: hiera bootstrap_nodeid
-          register: bootstrap_node
-        - name: set is_bootstrap_node fact
-          tags: common
-          set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
-        - name: Extra migration for nova tripleo/+bug/1656791
-          tags: step0,pre-upgrade
-          when: is_bootstrap_node
-          command: nova-manage db online_data_migrations
-        - name: Stop and disable nova_api service (pre-upgrade not under httpd)
-          tags: step2
-          service: name=openstack-nova-api state=stopped enabled=no
-        - name: Create puppet manifest to set transport_url in nova.conf
-          tags: step5
-          when: is_bootstrap_node
-          copy:
-            dest: /root/nova-api_upgrade_manifest.pp
-            mode: 0600
-            content: >
-              $transport_url = os_transport_url({
-                'transport' => hiera('messaging_service_name', 'rabbit'),
-                'hosts'     => any2array(hiera('rabbitmq_node_names', undef)),
-                'port'      => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
-                'username'  => hiera('nova::rabbit_userid', 'guest'),
-                'password'  => hiera('nova::rabbit_password'),
-                'ssl'       => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
-              })
-              oslo::messaging::default { 'nova_config':
-                transport_url => $transport_url
-              }
-        - name: Run puppet apply to set tranport_url in nova.conf
-          tags: step5
-          when: is_bootstrap_node
-          command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
-          register: puppet_apply_nova_api_upgrade
-          failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
-          changed_when: puppet_apply_nova_api_upgrade.rc == 2
-        - name: Setup cell_v2 (map cell0)
-          tags: step5
-          when: is_bootstrap_node
-          shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
-        - name: Setup cell_v2 (create default cell)
-          tags: step5
-          when: is_bootstrap_node
-          # (owalsh) puppet-nova expects the cell name 'default'
-          # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
-          shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
-          register: nova_api_create_cell
-          failed_when: nova_api_create_cell.rc not in [0,2]
-          changed_when: nova_api_create_cell.rc == 0
-        - name: Setup cell_v2 (sync nova/cell DB)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage db sync
-          async: {get_param: NovaDbSyncTimeout}
-          poll: 10
-        - name: Setup cell_v2 (get cell uuid)
-          tags: step5
-          when: is_bootstrap_node
-          shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
-          register: nova_api_cell_uuid
-        - name: Setup cell_v2 (migrate hosts)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
-        - name: Setup cell_v2 (migrate instances)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
-        - name: Sync nova_api DB
-          tags: step5
-          command: nova-manage api_db sync
-          when: is_bootstrap_node
-        - name: Online data migration for nova
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage db online_data_migrations
+        yaql:
+          expression: $.data.apache_upgrade + $.data.nova_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            nova_api_upgrade:
+              - name: get bootstrap nodeid
+                tags: common
+                command: hiera bootstrap_nodeid
+                register: bootstrap_node
+              - name: set is_bootstrap_node fact
+                tags: common
+                set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+              - name: Extra migration for nova tripleo/+bug/1656791
+                tags: step0,pre-upgrade
+                when: is_bootstrap_node
+                command: nova-manage db online_data_migrations
+              - name: Stop and disable nova_api service (pre-upgrade not under httpd)
+                tags: step2
+                service: name=openstack-nova-api state=stopped enabled=no
+              - name: Create puppet manifest to set transport_url in nova.conf
+                tags: step5
+                when: is_bootstrap_node
+                copy:
+                  dest: /root/nova-api_upgrade_manifest.pp
+                  mode: 0600
+                  content: >
+                    $transport_url = os_transport_url({
+                      'transport' => hiera('messaging_service_name', 'rabbit'),
+                      'hosts'     => any2array(hiera('rabbitmq_node_names', undef)),
+                      'port'      => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
+                      'username'  => hiera('nova::rabbit_userid', 'guest'),
+                      'password'  => hiera('nova::rabbit_password'),
+                      'ssl'       => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
+                    })
+                    oslo::messaging::default { 'nova_config':
+                      transport_url => $transport_url
+                    }
+              - name: Run puppet apply to set tranport_url in nova.conf
+                tags: step5
+                when: is_bootstrap_node
+                command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+                register: puppet_apply_nova_api_upgrade
+                failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
+                changed_when: puppet_apply_nova_api_upgrade.rc == 2
+              - name: Setup cell_v2 (map cell0)
+                tags: step5
+                when: is_bootstrap_node
+                shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
+              - name: Setup cell_v2 (create default cell)
+                tags: step5
+                when: is_bootstrap_node
+                # (owalsh) puppet-nova expects the cell name 'default'
+                # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
+                shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
+                register: nova_api_create_cell
+                failed_when: nova_api_create_cell.rc not in [0,2]
+                changed_when: nova_api_create_cell.rc == 0
+              - name: Setup cell_v2 (sync nova/cell DB)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage db sync
+                async: {get_param: NovaDbSyncTimeout}
+                poll: 10
+              - name: Setup cell_v2 (get cell uuid)
+                tags: step5
+                when: is_bootstrap_node
+                shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
+                register: nova_api_cell_uuid
+              - name: Setup cell_v2 (migrate hosts)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
+              - name: Setup cell_v2 (migrate instances)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
+              - name: Sync nova_api DB
+                tags: step5
+                command: nova-manage api_db sync
+                when: is_bootstrap_node
+              - name: Online data migration for nova
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage db online_data_migrations
index ea21af8..ea58493 100644 (file)
@@ -68,6 +68,10 @@ parameters:
     type: string
     default: ''
     description: Set to True to enable debugging on all services.
+  NovaDebug:
+    default: ''
+    description: Set to True to enable debugging Nova services.
+    type: string
   EnableConfigPurge:
     type: boolean
     default: false
@@ -136,6 +140,7 @@ parameters:
 conditions:
 
   compute_upgrade_level_empty: {equals : [{get_param: UpgradeLevelNovaCompute}, '']}
+  service_debug_unset: {equals : [{get_param: NovaDebug}, '']}
 
 outputs:
   role_data:
@@ -193,7 +198,11 @@ outputs:
               query:
                 read_default_file: /etc/my.cnf.d/tripleo.cnf
                 read_default_group: tripleo
-          nova::debug: {get_param: Debug}
+          nova::debug:
+            if:
+            - service_debug_unset
+            - {get_param: Debug }
+            - {get_param: NovaDebug }
           nova::purge_config: {get_param: EnableConfigPurge}
           nova::network::neutron::neutron_project_name: 'service'
           nova::network::neutron::neutron_username: 'neutron'
index e39e997..68a71e4 100644 (file)
@@ -105,6 +105,22 @@ resources:
       RoleName: {get_param: RoleName}
       RoleParameters: {get_param: RoleParameters}
 
+  # Merging role-specific parameters (RoleParameters) with the default parameters.
+  # RoleParameters will have the precedence over the default parameters.
+  RoleParametersValue:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+        map_replace:
+          - map_replace:
+            - nova::compute::vcpu_pin_set: NovaVcpuPinSet
+              nova::compute::reserved_host_memory: NovaReservedHostMemory
+            - values: {get_param: [RoleParameters]}
+          - values:
+              NovaVcpuPinSet: {get_param: NovaVcpuPinSet}
+              NovaReservedHostMemory: {get_param: NovaReservedHostMemory}
+
 outputs:
   role_data:
     description: Role data for the Nova Compute service.
@@ -117,14 +133,18 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [NovaBase, role_data, config_settings]
+          - get_attr: [RoleParametersValue, value]
           - nova::compute::libvirt::manage_libvirt_services: false
             nova::compute::pci_passthrough:
               str_replace:
                 template: "JSON_PARAM"
                 params:
-                  JSON_PARAM: {get_param: NovaPCIPassthrough}
-            nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
-            nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
+                  map_replace:
+                    - map_replace:
+                      - JSON_PARAM: NovaPCIPassthrough
+                      - values: {get_param: [RoleParameters]}
+                    - values:
+                        NovaPCIPassthrough: {get_param: NovaPCIPassthrough}
             # we manage migration in nova common puppet profile
             nova::compute::libvirt::migration_support: false
             tripleo::profile::base::nova::manage_migration: true
index 5da6d43..72a1fce 100644 (file)
@@ -45,6 +45,14 @@ parameters:
     default:
       tag: openstack.nova.scheduler
       path: /var/log/nova/nova-scheduler.log
+  NovaSchedulerDiscoverHostsInCellsInterval:
+    type: number
+    default: -1
+    description: >
+      This value controls how often (in seconds) the scheduler should
+      attempt to discover new hosts that have been added to cells.
+      The default value of -1 disables the periodic task completely.
+      It is recommended to set this parameter for deployments using Ironic.
 
 resources:
   NovaBase:
@@ -71,6 +79,7 @@ outputs:
           - nova::ram_allocation_ratio: '1.0'
             nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
             nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
+            nova::scheduler::discover_hosts_in_cells_interval: {get_param: NovaSchedulerDiscoverHostsInCellsInterval}
       step_config: |
         include tripleo::profile::base::nova::scheduler
       upgrade_tasks:
index 19dc5b4..0809b3e 100644 (file)
@@ -30,6 +30,10 @@ parameters:
     type: string
     default: ''
     description: Set to True to enable debugging on all services.
+  OctaviaDebug:
+    default: ''
+    description: Set to True to enable debugging Octavia services.
+    type: string
   EnableConfigPurge:
     type: boolean
     default: false
@@ -55,13 +59,20 @@ parameters:
     description: Set rabbit subscriber port, change this if using SSL
     type: number
 
+conditions:
+  service_debug_unset: {equals : [{get_param: OctaviaDebug}, '']}
+
 outputs:
   role_data:
     description: Base role data for Octavia services
     value:
        service_name: octavia_base
        config_settings:
-         octavia::debug: {get_param: Debug}
+         octavia::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: OctaviaDebug }
          octavia::purge_config: {get_param: EnableConfigPurge}
          octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
          octavia::rabbit_userid: {get_param: RabbitUserName}
index 0d859be..1a8754a 100644 (file)
@@ -57,8 +57,14 @@ parameters:
     type: json
 
 resources:
-  OpenVswitchUpgrade:
-    type: ./openvswitch-upgrade.yaml
+  Ovs:
+    type: ./openvswitch.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -66,19 +72,21 @@ outputs:
     value:
       service_name: opendaylight_ovs
       config_settings:
-        opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
-        opendaylight::username: {get_param: OpenDaylightUsername}
-        opendaylight::password: {get_param: OpenDaylightPassword}
-        opendaylight_check_url: {get_param: OpenDaylightCheckURL}
-        opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
-        neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
-        neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
-        tripleo.opendaylight_ovs.firewall_rules:
-          '118 neutron vxlan networks':
-             proto: 'udp'
-             dport: 4789
-          '136 neutron gre networks':
-             proto: 'gre'
+        map_merge:
+          - opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+            opendaylight::username: {get_param: OpenDaylightUsername}
+            opendaylight::password: {get_param: OpenDaylightPassword}
+            opendaylight_check_url: {get_param: OpenDaylightCheckURL}
+            opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
+            neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+            neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
+            tripleo.opendaylight_ovs.firewall_rules:
+              '118 neutron vxlan networks':
+               proto: 'udp'
+               dport: 4789
+              '136 neutron gre networks':
+               proto: 'gre'
+          - get_attr: [Ovs, role_data, config_settings]
       step_config: |
         include tripleo::profile::base::neutron::plugins::ovs::opendaylight
       upgrade_tasks:
@@ -86,7 +94,7 @@ outputs:
           expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
           data:
             ovs_upgrade:
-              get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+              get_attr: [Ovs, role_data, upgrade_tasks]
             opendaylight_upgrade:
               - name: Check if openvswitch is deployed
                 command: systemctl is-enabled openvswitch
diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml
deleted file mode 100644 (file)
index f6e7846..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-heat_template_version: pike
-
-description: >
-  Openvswitch package special handling for upgrade.
-
-outputs:
-  role_data:
-    description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
-    value:
-      service_name: openvswitch_upgrade
-      upgrade_tasks:
-        - name: Check openvswitch version.
-          tags: step2
-          register: ovs_version
-          ignore_errors: true
-          shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
-        - name: Check openvswitch packaging.
-          tags: step2
-          shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
-          register: ovs_packaging_issue
-          ignore_errors: true
-        - block:
-            - name: "Ensure empty directory: emptying."
-              file:
-                state: absent
-                path: /root/OVS_UPGRADE
-            - name: "Ensure empty directory: creating."
-              file:
-                state: directory
-                path: /root/OVS_UPGRADE
-                owner: root
-                group: root
-                mode: 0750
-            - name: Download OVS packages.
-              command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
-            - name: Get rpm list for manual upgrade of OVS.
-              shell: ls -1 /root/OVS_UPGRADE/*.rpm
-              register: ovs_list_of_rpms
-            - name: Manual upgrade of OVS
-              shell: |
-                rpm -U --test {{item}} 2>&1 | grep "already installed" || \
-                rpm -U --replacepkgs --notriggerun --nopostun {{item}};
-              args:
-                chdir: /root/OVS_UPGRADE
-              with_items:
-                - "{{ovs_list_of_rpms.stdout_lines}}"
-          tags: step2
-          when: "'2.5.0-14' in '{{ovs_version.stdout}}'
-                or
-                ovs_packaging_issue|succeeded"
diff --git a/puppet/services/openvswitch.yaml b/puppet/services/openvswitch.yaml
new file mode 100644 (file)
index 0000000..36aa5db
--- /dev/null
@@ -0,0 +1,178 @@
+heat_template_version: pike
+
+description: >
+  Open vSwitch Configuration
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  OvsDpdkCoreList:
+    description: >
+      List of cores to be used for DPDK lcore threads.  Note, these threads
+      are used by the OVS control path for validator and handling functions.
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ""
+  OvsDpdkMemoryChannels:
+    description: Number of memory channels per socket to be used for DPDK
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9]*"
+    default: ""
+  OvsDpdkSocketMemory:
+    default: ""
+    description: >
+      Sets the amount of hugepage memory to assign per NUMA node. It is
+      recommended to use the socket closest to the PCIe slot used for the
+      desired DPDK NIC.  The format should be in "<socket 0 mem>, <socket 1
+      mem>, <socket n mem>", where the value is specified in MB.  For example:
+      "1024,0".
+    type: string
+  OvsDpdkDriverType:
+    default: "vfio-pci"
+    description: >
+      DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+      this UIO/PMD driver.
+    type: string
+  OvsPmdCoreList:
+    description: >
+      A list or range of CPU cores for PMD threads to be pinned to.  Note, NIC
+      location to cores on socket, number of hyper-threaded logical cores, and
+      desired number of PMD threads can all play a role in configuring this
+      setting.  These cores should be on the same socket where
+      OvsDpdkSocketMemory is assigned.  If using hyperthreading then specify
+      both logical cores that would equal the physical core. Also, specifying
+      more than one core will trigger multiple PMD threads to be spawned which
+      may improve dataplane performance.
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    type: string
+    default: ""
+  # DEPRECATED: the following options are deprecated and are currently maintained
+  # for backwards compatibility. They will be removed in the Queens cycle.
+  HostCpusList:
+    description: List of cores to be used for host process
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ''
+  NeutronDpdkCoreList:
+    description: List of cores to be used for DPDK Poll Mode Driver
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9,-]*"
+    default: ''
+  NeutronDpdkMemoryChannels:
+    description: Number of memory channels to be used for DPDK
+    type: string
+    constraints:
+      - allowed_pattern: "[0-9]*"
+    default: ''
+  NeutronDpdkSocketMemory:
+    default: ''
+    description: Memory allocated for each socket
+    type: string
+  NeutronDpdkDriverType:
+    default: "vfio-pci"
+    description: DPDK Driver type
+    type: string
+
+parameter_groups:
+- label: deprecated
+  description: Do not use deprecated params, they will be removed.
+  parameters:
+    - HostCpusList
+    - NeutronDpdkCoreList
+    - NeutronDpdkMemoryChannels
+    - NeutronDpdkSocketMemory
+    - NeutronDpdkDriverType
+
+conditions:
+  l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+  pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+  mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+  socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+  driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+
+outputs:
+  role_data:
+    description: Role data for the Open vSwitch service.
+    value:
+      service_name: openvswitch
+      config_settings:
+        map_replace:
+          - map_replace:
+            - vswitch::dpdk::driver_type: OvsDpdkDriverType
+              vswitch::dpdk::host_core_list: OvsDpdkCoreList
+              vswitch::dpdk::pmd_core_list: OvsPmdCoreList
+              vswitch::dpdk::memory_channels: OvsDpdkMemoryChannels
+              vswitch::dpdk::socket_mem: OvsDpdkSocketMemory
+            - values: {get_param: [RoleParameters]}
+          - values:
+              OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+              OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+              OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+              OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+              OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+      upgrade_tasks:
+        - name: Check openvswitch version.
+          tags: step2
+          register: ovs_version
+          ignore_errors: true
+          shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+        - name: Check openvswitch packaging.
+          tags: step2
+          shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+          register: ovs_packaging_issue
+          ignore_errors: true
+        - block:
+            - name: "Ensure empty directory: emptying."
+              file:
+                state: absent
+                path: /root/OVS_UPGRADE
+            - name: "Ensure empty directory: creating."
+              file:
+                state: directory
+                path: /root/OVS_UPGRADE
+                owner: root
+                group: root
+                mode: 0750
+            - name: Download OVS packages.
+              command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+            - name: Get rpm list for manual upgrade of OVS.
+              shell: ls -1 /root/OVS_UPGRADE/*.rpm
+              register: ovs_list_of_rpms
+            - name: Manual upgrade of OVS
+              shell: |
+                rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+                rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+              args:
+                chdir: /root/OVS_UPGRADE
+              with_items:
+                - "{{ovs_list_of_rpms.stdout_lines}}"
+          tags: step2
+          when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+                or
+                ovs_packaging_issue|succeeded"
index 20c38d8..df234c7 100644 (file)
@@ -44,6 +44,7 @@ outputs:
           ovn::northbound::port: {get_param: OVNNorthboundServerPort}
           ovn::southbound::port: {get_param: OVNSouthboundServerPort}
           ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+          tripleo::haproxy::ovn_dbs_manage_lb: true
           tripleo.ovn_dbs.firewall_rules:
             '121 OVN DB server ports':
               proto: 'tcp'
index d8e942d..0a7659e 100644 (file)
@@ -27,6 +27,11 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
 
 resources:
 
@@ -61,6 +66,8 @@ outputs:
             # internal_api_subnet - > IP/CIDR
             tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr:
               get_param: [ServiceNetMap, MysqlNetwork]
+            tripleo::profile::pacemaker::database::mysql::ca_file:
+              get_param: InternalTLSCAFile
       step_config: |
         include ::tripleo::profile::pacemaker::database::mysql
       metadata_settings:
diff --git a/puppet/services/pacemaker/ovn-dbs.yaml b/puppet/services/pacemaker/ovn-dbs.yaml
new file mode 100644 (file)
index 0000000..1cbb476
--- /dev/null
@@ -0,0 +1,61 @@
+heat_template_version: ocata
+
+description: >
+  OVN databases configured with puppet in HA mode
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  OVNNorthboundServerPort:
+    description: Port of the OVN Northbound DB server
+    type: number
+    default: 6641
+  OVNSouthboundServerPort:
+    description: Port of the OVN Southbound DB server
+    type: number
+    default: 6642
+
+resources:
+
+  OVNDBsBase:
+    type: ../ovn-dbs.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the OVN northd service
+    value:
+      service_name: ovn_dbs
+      config_settings:
+        map_merge:
+          - get_attr: [OVNDBsBase, role_data, config_settings]
+          - tripleo::haproxy::ovn_dbs_manage_lb: false
+            tripleo::profile::pacemaker::ovn_northd::nb_db_port: {get_param: OVNNorthboundServerPort}
+            tripleo::profile::pacemaker::ovn_northd::sb_db_port: {get_param: OVNSouthboundServerPort}
+      step_config: |
+        include ::tripleo::profile::pacemaker::ovn_northd
index 74aaf59..c49b084 100644 (file)
@@ -34,6 +34,42 @@ parameters:
   MonitoringSubscriptionPacemakerRemote:
     default: 'overcloud-pacemaker_remote'
     type: string
+  EnableFencing:
+    default: false
+    description: Whether to enable fencing in Pacemaker or not.
+    type: boolean
+  FencingConfig:
+    default: {}
+    description: |
+      Pacemaker fencing configuration. The JSON should have
+      the following structure:
+        {
+          "devices": [
+            {
+              "agent": "AGENT_NAME",
+              "host_mac": "HOST_MAC_ADDRESS",
+              "params": {"PARAM_NAME": "PARAM_VALUE"}
+            }
+          ]
+        }
+      For instance:
+        {
+          "devices": [
+            {
+              "agent": "fence_xvm",
+              "host_mac": "52:54:00:aa:bb:cc",
+              "params": {
+                "multicast_address": "225.0.0.12",
+                "port": "baremetal_0",
+                "manage_fw": true,
+                "manage_key_file": true,
+                "key_file": "/etc/fence_xvm.key",
+                "key_file_password": "abcdef"
+              }
+            }
+          ]
+        }
+    type: json
   PacemakerRemoteLoggingSource:
     type: json
     default:
@@ -60,6 +96,8 @@ outputs:
             proto: 'tcp'
             dport:
               - 3121
+        tripleo::fencing::config: {get_param: FencingConfig}
+        enable_fencing: {get_param: EnableFencing}
         tripleo::profile::base::pacemaker_remote::remote_authkey: {get_param: PacemakerRemoteAuthkey}
       step_config: |
         include ::tripleo::profile::base::pacemaker_remote
index a41e34f..0289b7a 100644 (file)
@@ -84,8 +84,8 @@ outputs:
             tripleo.panko_api.firewall_rules:
               '140 panko-api':
                 dport:
-                  - 8779
-                  - 13779
+                  - 8977
+                  - 13977
             panko::api::host:
               str_replace:
                 template:
index 84817bc..a94d4ea 100644 (file)
@@ -34,11 +34,18 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  PankoDebug:
+    default: ''
+    description: Set to True to enable debugging Panko services.
+    type: string
   KeystoneRegion:
     type: string
     default: 'regionOne'
     description: Keystone region for endpoint
 
+conditions:
+  service_debug_unset: {equals : [{get_param: PankoDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Panko role.
@@ -55,7 +62,11 @@ outputs:
             query:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
-        panko::debug: {get_param: Debug}
+        panko::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: PankoDebug }
         panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         panko::keystone::authtoken::project_name: 'service'
         panko::keystone::authtoken::user_domain_name: 'Default'
index 1ee6d17..c294e74 100644 (file)
@@ -52,11 +52,18 @@ parameters:
     type: string
     default: ''
     description: Set to True to enable debugging on all services.
+  SaharaDebug:
+    default: ''
+    description: Set to True to enable debugging Sahara services.
+    type: string
   SaharaPlugins:
     default: ["ambari","cdh","mapr","vanilla","spark","storm"]
     description: Sahara enabled plugin list
     type: comma_delimited_list
 
+conditions:
+  service_debug_unset: {equals : [{get_param: SaharaDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Sahara base service.
@@ -77,7 +84,11 @@ outputs:
         sahara::rabbit_user: {get_param: RabbitUserName}
         sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         sahara::rabbit_port: {get_param: RabbitClientPort}
-        sahara::debug: {get_param: Debug}
+        sahara::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: SaharaDebug }
         # Remove admin_password when https://review.openstack.org/442619 is merged.
         sahara::admin_password: {get_param: SaharaPassword}
         sahara::use_neutron: true
index 9a304ed..c707efb 100644 (file)
@@ -59,10 +59,10 @@ parameters:
     type: string
   SwiftCeilometerPipelineEnabled:
     description: Set to False to disable the swift proxy ceilometer pipeline.
-    default: True
+    default: false
     type: boolean
   SwiftCeilometerIgnoreProjects:
-    default: ['services']
+    default: ['service']
     description: Comma-seperated list of project names to ignore.
     type: comma_delimited_list
   RabbitClientPort:
@@ -81,7 +81,7 @@ parameters:
 
 conditions:
 
-  ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+  ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, true]}
   use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
 
 resources:
@@ -118,14 +118,20 @@ outputs:
             swift::proxy::authtoken::project_name: 'service'
             swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
             swift::proxy::workers: {get_param: SwiftWorkers}
-            swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
-            swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
-            swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
-            swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
-            swift::proxy::ceilometer::password: {get_param: SwiftPassword}
-            swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
-            swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
-            swift::proxy::ceilometer::nonblocking_notify: true
+          -
+            if:
+            - ceilometer_pipeline_enabled
+            -
+              swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
+              swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+              swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+              swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+              swift::proxy::ceilometer::password: {get_param: SwiftPassword}
+              swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
+              swift::proxy::ceilometer::nonblocking_notify: true
+              swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+            - {}
+          - swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
             tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
             tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
             tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
@@ -168,7 +174,6 @@ outputs:
                     - ''
                   - 'proxy-logging'
                   - 'proxy-server'
-            swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
             swift::proxy::account_autocreate: true
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
index e121feb..5ced8c3 100644 (file)
@@ -33,6 +33,10 @@ parameters:
   Debug:
     type: string
     default: ''
+  TackerDebug:
+    default: ''
+    description: Set to True to enable debugging Tacker service.
+    type: string
   KeystoneRegion:
     type: string
     default: 'regionOne'
@@ -62,6 +66,9 @@ parameters:
     default: {}
     type: json
 
+conditions:
+  service_debug_unset: {equals : [{get_param: TackerDebug}, '']}
+
 outputs:
   role_data:
     description: Role data for the Tacker role.
@@ -80,7 +87,11 @@ outputs:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
 
-        tacker::debug: {get_param: Debug}
+        tacker::debug:
+          if:
+          - service_debug_unset
+          - {get_param: Debug }
+          - {get_param: TackerDebug }
         tacker::rpc_backend: rabbit
         tacker::rabbit_userid: {get_param: RabbitUserName}
         tacker::rabbit_password: {get_param: RabbitPassword}
index 6bc296a..416d86d 100644 (file)
@@ -30,6 +30,10 @@ parameters:
     default: ''
     description: Set to True to enable debugging on all services.
     type: string
+  ZaqarDebug:
+    default: ''
+    description: Set to True to enable debugging Zaqar service.
+    type: string
   ZaqarPassword:
     description: The password for Zaqar
     type: string
@@ -54,6 +58,7 @@ parameters:
 
 conditions:
   zaqar_workers_zero: {equals : [{get_param: ZaqarWorkers}, 0]}
+  service_debug_unset: {equals : [{get_param: ZaqarDebug}, '']}
 
 resources:
 
@@ -78,7 +83,11 @@ outputs:
             zaqar::keystone::authtoken::project_name: 'service'
             zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
-            zaqar::debug: {get_param: Debug}
+            zaqar::debug:
+              if:
+              - service_debug_unset
+              - {get_param: Debug }
+              - {get_param: ZaqarDebug }
             zaqar::server::service_name: 'httpd'
             zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
             zaqar::wsgi::apache::ssl: false
diff --git a/releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml b/releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml
new file mode 100644 (file)
index 0000000..64a4d7e
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - This introduces the ManageKeystoneFernetKeys parameter, which tells
+    heat/puppet if it should replace the existing fernet keys on a stack
+    deployment or not. This is useful if the deployer wants to do key rotations
+    out of band.
diff --git a/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml b/releasenotes/notes/Make-exposing-haproxy-stats-interface-configurable-2b634793c4f13950.yaml
new file mode 100644 (file)
index 0000000..193154d
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - The HAProxy stats interface can now be enabled/disabled with the
+    HAProxyStatsEnabled flag. Note that it's still enabled by default.
diff --git a/releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml b/releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml
new file mode 100644 (file)
index 0000000..1e2673f
--- /dev/null
@@ -0,0 +1,10 @@
+---
+features:
+  - The KeystoneFernetKeys parameter was introduced, which is able to take any
+    amount of keys as long as it's in the right format. It's generated by the
+    same mechanism as the rest of the passwords; so it's value is also
+    available via mistral's "password" environment variable. This will also
+    allow for rotations to be made via mistral and via stack updates.
+deprecations:
+  - The individual keystone fernet key parameters (KeystoneFernetKey0 and
+    KeystoneFernetKey1) were deprecated in favor of KeystoneFernetKeys.
diff --git a/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml b/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml
new file mode 100644 (file)
index 0000000..73b9f9c
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Add parameters to control the Cinder NAS security settings associated
+    with the NFS and NetApp Cinder back ends. The settings are disabled
+    by default.
diff --git a/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml b/releasenotes/notes/add-deploymentswiftdatamap-parameter-351ee63800016e4d.yaml
new file mode 100644 (file)
index 0000000..67a55cd
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - Added new DeploymentSwiftDataMap parameter, which is used to set the
+    deployment_swift_data property on the Server resoures. The parameter is a
+    map where the keys are the Heat assigned hostnames, and the value is a map
+    of the container/object name in Swift.
diff --git a/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml b/releasenotes/notes/add-server-os-collect-config-data-eeea2f57b3a82654.yaml
new file mode 100644 (file)
index 0000000..cd352ac
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - Adds a new output, ServerOsCollectConfigData, which is the
+    os-collect-config configuration associated with each server resource.
+    This can be used to [pre]configure the os-collect-config agents on
+    deployed-server's.
diff --git a/releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml b/releasenotes/notes/baremetal-cell-hosts-cd5cf5aa8a33643c.yaml
new file mode 100644 (file)
index 0000000..98ba86d
--- /dev/null
@@ -0,0 +1,6 @@
+---
+fixes:
+  - |
+    When ``environments/services/ironic.yaml`` is used, enable periodic task
+    in nova-scheduler to automatically discover new nodes. Otherwise a user
+    has to run nova management command on controllers each time.
diff --git a/releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml b/releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml
new file mode 100644 (file)
index 0000000..5117642
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Add an example role ``roles/IronicConductor.yaml`` for a node with only
+    ironic-conductor and its (i)PXE service.
diff --git a/releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml b/releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml
new file mode 100644 (file)
index 0000000..353d16d
--- /dev/null
@@ -0,0 +1,4 @@
+---
+fixes:
+  - Changed panko api port to run on 8977 instead of 8779. 8779 is reserved
+    for trove. Hence changing to avoid conflicts.
diff --git a/releasenotes/notes/debug_per_service-54a260917c4a7e3a.yaml b/releasenotes/notes/debug_per_service-54a260917c4a7e3a.yaml
new file mode 100644 (file)
index 0000000..da9af4a
--- /dev/null
@@ -0,0 +1,9 @@
+---
+features:
+  - |
+    Allow to configure debug per service.
+    The feature is backward compatible with existing Debug parameter.
+    Adding a new parameter per service, e.g. GlanceDebug. Set to False,
+    it will disable debug for the service, even if Debug is set to True.
+    If Debug is set to False but GlanceDebug is set to True, Glance debug
+    will be enabled.
diff --git a/releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml b/releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml
new file mode 100644 (file)
index 0000000..d8fcbfe
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - Added a custom plan-environment file for providing workflow specific
+    inputs for the derived parameters workflow.
diff --git a/releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml b/releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml
new file mode 100644 (file)
index 0000000..28dac8b
--- /dev/null
@@ -0,0 +1,5 @@
+---
+fixes:
+  - Disable ceilometer in the swift proxy middleware pipeline out of the box.
+    This generates a lot of events with gnocchi and swift backend and causes
+    heavy load. It should be easy to enable if needed.
diff --git a/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml b/releasenotes/notes/enable-dpdk-on-boot-f5b098b10152b436.yaml
new file mode 100644 (file)
index 0000000..4cb9b80
--- /dev/null
@@ -0,0 +1,8 @@
+---
+features:
+  - DPDK is enabled in OvS  before the NetworkDeployment to ensure DPDK
+    is ready to handle new port additions.
+upgrade:
+  - A new parameter ServiceNames is added to the PreNeworkConfig resource.
+    All templates associated with PreNeworkConfig should add this new
+    parameter during the upgrade.
diff --git a/releasenotes/notes/example-roles-d27c748090f6a154.yaml b/releasenotes/notes/example-roles-d27c748090f6a154.yaml
new file mode 100644 (file)
index 0000000..e27674d
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    A set of example roles has been created in the roles folder in
+    tripleo-heat-templates.  Management of services for roles should occur
+    in these role files rather than in roles_data.yaml.
diff --git a/releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml b/releasenotes/notes/fix-glance-api-network-4f9d7c20475a5994.yaml
new file mode 100644 (file)
index 0000000..18474cf
--- /dev/null
@@ -0,0 +1,3 @@
+---
+fixes:
+  - Incorrect network used for Glance API service.
diff --git a/releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml b/releasenotes/notes/fix-rpm-deploy-artifact-urls-03d5694073ad159d.yaml
new file mode 100644 (file)
index 0000000..25016e8
--- /dev/null
@@ -0,0 +1,4 @@
+---
+fixes:
+  - |
+    Fix support for RPMs to be installed via DeployArtifactURLs. LP#1697102
diff --git a/releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml b/releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml
new file mode 100644 (file)
index 0000000..0721334
--- /dev/null
@@ -0,0 +1,21 @@
+---
+features:
+  - |
+    There is now a tool in tripleo-heat-templates, similar to the
+    oslo-config-generator, that can be used to programmatically generate
+    sample environment files based directly on the contents of the templates
+    themselves.  This ensures consistency in the sample environments, as well
+    as making it easier to update environments to reflect changes to the
+    templates.
+upgrade:
+  - |
+    Some sample environment files will be moving as part of the work to
+    generate them programmatically.  The old versions will be left in place for
+    one cycle to allow a smooth upgrade process.  When upgrading, if any of the
+    environment files in use for the deployment have been deprecated they
+    should be replaced with the new generated verions.
+deprecations:
+  - |
+    Where a generated sample environment replaces an existing one, the existing
+    environment is deprecated.  This will be noted in a comment at the top of
+    the file.
diff --git a/releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml b/releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml
new file mode 100644 (file)
index 0000000..1fbdd1f
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Add basic support for **ironic-inspector** in the overcloud. It is highly
+    experimental and is not yet recommended for production use.
diff --git a/releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml b/releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml
new file mode 100644 (file)
index 0000000..4c10753
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - KeystoneFernetMaxActiveKeys was introduced as a parameter to the keystone
+    profile. It sets the max_active_keys value of the keystone.conf file and
+    will subsequently be used by mistral to purge the keys in a mistral task.
diff --git a/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml b/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml
new file mode 100644 (file)
index 0000000..e5adb6a
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add support to configure number of sacks in gnocchi.
diff --git a/releasenotes/notes/ovn-ha-c0139ac519680872.yaml b/releasenotes/notes/ovn-ha-c0139ac519680872.yaml
new file mode 100644 (file)
index 0000000..d36f836
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Support HA for OVN db servers and ovn-northd using Pacemaker.
diff --git a/releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml b/releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml
new file mode 100644 (file)
index 0000000..95e9260
--- /dev/null
@@ -0,0 +1,11 @@
+---
+features:
+  - PreNetworkConfig is modified to support role-specific parameters.
+upgrade:
+  - PreNetworkConfig takes a new parameter, RoleParameters. All the templates
+    associated with PreNetworkConfig should add this new parameter during
+    upgrade.
+deprecations:
+  - Parameters {{role}}KernelArgs, {{role}}TunedProfileName and
+    {{role}}HostCpusList are deprecated. Alternatively, role-specific
+    parameter support has been added with the same names.
diff --git a/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml b/releasenotes/notes/refactor-dpdk-dd37ccf14f711bb1.yaml
new file mode 100644 (file)
index 0000000..1e44d92
--- /dev/null
@@ -0,0 +1,23 @@
+---
+features:
+  - Adds common openvswitch service template to be
+    inherited by other services.
+  - Adds environment file to be used for deploying
+    OpenDaylight + OVS DPDK.
+  - Adds first boot and ovs configuration scripts
+deprecations:
+  - The ``HostCpusList`` parameter is deprecated in
+    favor of ``OvsDpdkCoreList`` and will be removed
+    in a future release.
+  - The ``NeutronDpdkCoreList`` parameter is deprecated in
+    favor of ``OvsPmdCoreList`` and will be removed
+    in a future release.
+  - The ``NeutronDpdkMemoryChannels`` parameter is deprecated in
+    favor of ``OvsDpdkMemoryChannels`` and will be removed
+    in a future release.
+  - The ``NeutronDpdkSocketMemory`` parameter is deprecated in
+    favor of ``OvsDpdkSocketMemory`` and will be removed
+    in a future release.
+  - The ``NeutronDpdkDriverType`` parameter is deprecated in
+    favor of ``OvsDpdkDriverType`` and will be removed
+    in a future release.
diff --git a/releasenotes/notes/remove-ceilometer-cron-85362e197ba245a0.yaml b/releasenotes/notes/remove-ceilometer-cron-85362e197ba245a0.yaml
new file mode 100644 (file)
index 0000000..7854fa5
--- /dev/null
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - Ceilometer expirer is deprecated in pike. During upgrade, the crontab thats
+    configured with ceilometer user will be removed to ensure the expirer
+    script is not running.
diff --git a/releasenotes/notes/server-blacklist-support-370c1a1f15a28a41.yaml b/releasenotes/notes/server-blacklist-support-370c1a1f15a28a41.yaml
new file mode 100644 (file)
index 0000000..7ab253b
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - Added the ability to blacklist servers by name from being
+    associated with any Heat triggered SoftwareDeployment
+    resources. The servers are specified in the new
+    DeploymentServerBlacklist parameter.
diff --git a/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml b/releasenotes/notes/service_workflow_tasks-4da5830821b7154b.yaml
new file mode 100644 (file)
index 0000000..cf99ec5
--- /dev/null
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    It is now possible to trigger Mistral workflows or workflow actions
+    before a deployment step is applied. This can be defined within the
+    scope of a service template and is described as a task property
+    for the Heat OS::Mistral::Workflow resource, for more details also
+    see the puppet/services/README.rst file.
\ No newline at end of file
diff --git a/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml b/releasenotes/notes/split-stack-environments-1f817e24b5d90959.yaml
new file mode 100644 (file)
index 0000000..1bc9937
--- /dev/null
@@ -0,0 +1,7 @@
+---
+features:
+  - Add 2 new example environments to facilitate deploying split-stack,
+    environments/overcloud-baremetal.j2.yaml and
+    environments/overcloud-services.yaml. The environments are used to deploy two
+    separate Heat stacks, one for just the baremetal+network configuration and one
+    for the service configuration.
diff --git a/releasenotes/notes/update-metric-delay-default-963d073026e2cc15.yaml b/releasenotes/notes/update-metric-delay-default-963d073026e2cc15.yaml
new file mode 100644 (file)
index 0000000..d74e3a1
--- /dev/null
@@ -0,0 +1,4 @@
+---
+fixes:
+  - Update the default metric processing delay to 30. This will help reduce
+    the metric backlog and wont load up the storage backend.
diff --git a/releasenotes/notes/vhost_default_dir-cac327a0ac05df90.yaml b/releasenotes/notes/vhost_default_dir-cac327a0ac05df90.yaml
new file mode 100644 (file)
index 0000000..b9ddaec
--- /dev/null
@@ -0,0 +1,6 @@
+---
+issues:
+  - Modify ``NeutronVhostuserSocketDir`` to a seprate directory in the DPDK
+    environment file. A different set of permission is required for creating
+    vhost sockets when the vhost type is dpdkvhostuserclient (which is default
+    from ocata).
diff --git a/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml b/releasenotes/notes/vipmap-output-4a9ce99930960346.yaml
new file mode 100644 (file)
index 0000000..1f49bac
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Add VipMap output to the top level stack output. VipMap is a mapping from
+    each network to the VIP address on that network. Also includes the Redis
+    VIP.
index ec158ce..72b89b1 100644 (file)
@@ -52,7 +52,7 @@ copyright = u'2017, TripleO Developers'
 # built documents.
 #
 # The full version, including alpha/beta/rc tags.
-release = '7.0.0.0b1'
+release = '7.0.0.0b2'
 # The short X.Y version.
 version = '7.0.0'
 
diff --git a/roles/BlockStorage.yaml b/roles/BlockStorage.yaml
new file mode 100644 (file)
index 0000000..b011740
--- /dev/null
@@ -0,0 +1,29 @@
+###############################################################################
+# Role: BlockStorage                                                          #
+###############################################################################
+- name: BlockStorage
+  description: |
+    Cinder Block Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::BlockStorageCinderVolume
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Iscsid
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
diff --git a/roles/CephStorage.yaml b/roles/CephStorage.yaml
new file mode 100644 (file)
index 0000000..647c4d5
--- /dev/null
@@ -0,0 +1,27 @@
+###############################################################################
+# Role: CephStorage                                                           #
+###############################################################################
+- name: CephStorage
+  description: |
+    Ceph OSD Storage node role
+  networks:
+    - Storage
+    - StorageMgmt
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CephOSD
+    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
diff --git a/roles/Compute.yaml b/roles/Compute.yaml
new file mode 100644 (file)
index 0000000..75a6f60
--- /dev/null
@@ -0,0 +1,45 @@
+###############################################################################
+# Role: Compute                                                               #
+###############################################################################
+- name: Compute
+  description: |
+    Basic Compute Node role
+  CountDefault: 1
+  networks:
+    - InternalApi
+    - Tenant
+    - Storage
+  HostnameFormatDefault: '%stackname%-novacompute-%index%'
+  disable_upgrade_deployment: True
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CephClient
+    - OS::TripleO::Services::CephExternal
+    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::ComputeCeilometerAgent
+    - OS::TripleO::Services::ComputeNeutronCorePlugin
+    - OS::TripleO::Services::ComputeNeutronL3Agent
+    - OS::TripleO::Services::ComputeNeutronMetadataAgent
+    - OS::TripleO::Services::ComputeNeutronOvsAgent
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Iscsid
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronLinuxbridgeAgent
+    - OS::TripleO::Services::NeutronSriovAgent
+    - OS::TripleO::Services::NeutronVppAgent
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::OpenDaylightOvs
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::Vpp
diff --git a/roles/Controller.yaml b/roles/Controller.yaml
new file mode 100644 (file)
index 0000000..b0a1313
--- /dev/null
@@ -0,0 +1,127 @@
+###############################################################################
+# Role: Controller                                                            #
+###############################################################################
+- name: Controller
+  description: |
+    Controller role that has all the controler services loaded and handles
+    Database, Messaging and Network functions.
+  CountDefault: 1
+  tags:
+    - primary
+    - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
+  HostnameFormatDefault: '%stackname%-controller-%index%'
+  ServicesDefault:
+    - OS::TripleO::Services::AodhApi
+    - OS::TripleO::Services::AodhEvaluator
+    - OS::TripleO::Services::AodhListener
+    - OS::TripleO::Services::AodhNotifier
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::BarbicanApi
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CeilometerAgentCentral
+    - OS::TripleO::Services::CeilometerAgentNotification
+    # FIXME: This service was disabled in Pike and this entry should be removed
+    # in Queens.
+    - OS::TripleO::Services::CeilometerExpirer
+    - OS::TripleO::Services::CephExternal
+    - OS::TripleO::Services::CephMds
+    - OS::TripleO::Services::CephMon
+    - OS::TripleO::Services::CephRbdMirror
+    - OS::TripleO::Services::CephRgw
+    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::CinderApi
+    - OS::TripleO::Services::CinderBackendDellPs
+    - OS::TripleO::Services::CinderBackendDellSc
+    - OS::TripleO::Services::CinderBackendNetApp
+    - OS::TripleO::Services::CinderBackendScaleIO
+    - OS::TripleO::Services::CinderBackup
+    - OS::TripleO::Services::CinderHPELeftHandISCSI
+    - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Congress
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Ec2Api
+    - OS::TripleO::Services::Etcd
+    - OS::TripleO::Services::ExternalSwiftProxy
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::GnocchiApi
+    - OS::TripleO::Services::GnocchiMetricd
+    - OS::TripleO::Services::GnocchiStatsd
+    - OS::TripleO::Services::HAproxy
+    - OS::TripleO::Services::HeatApi
+    - OS::TripleO::Services::HeatApiCfn
+    - OS::TripleO::Services::HeatApiCloudwatch
+    - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::Horizon
+    - OS::TripleO::Services::IronicApi
+    - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::Iscsid
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::ManilaApi
+    - OS::TripleO::Services::ManilaBackendCephFs
+    - OS::TripleO::Services::ManilaBackendGeneric
+    - OS::TripleO::Services::ManilaBackendNetapp
+    - OS::TripleO::Services::ManilaScheduler
+    - OS::TripleO::Services::ManilaShare
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronApi
+    - OS::TripleO::Services::NeutronBgpVpnApi
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL2gwAgent
+    - OS::TripleO::Services::NeutronL2gwApi
+    - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronLinuxbridgeAgent
+    - OS::TripleO::Services::NeutronMetadataAgent
+    - OS::TripleO::Services::NeutronML2FujitsuCfab
+    - OS::TripleO::Services::NeutronML2FujitsuFossw
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::NeutronVppAgent
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaConsoleauth
+    - OS::TripleO::Services::NovaIronic
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::NovaVncProxy
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::OctaviaApi
+    - OS::TripleO::Services::OctaviaHealthManager
+    - OS::TripleO::Services::OctaviaHousekeeping
+    - OS::TripleO::Services::OctaviaWorker
+    - OS::TripleO::Services::OpenDaylightApi
+    - OS::TripleO::Services::OpenDaylightOvs
+    - OS::TripleO::Services::OVNDBs
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::PankoApi
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::Redis
+    - OS::TripleO::Services::SaharaApi
+    - OS::TripleO::Services::SaharaEngine
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::SwiftStorage
+    - OS::TripleO::Services::Tacker
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::Vpp
+    - OS::TripleO::Services::Zaqar
diff --git a/roles/ControllerOpenstack.yaml b/roles/ControllerOpenstack.yaml
new file mode 100644 (file)
index 0000000..6cf2120
--- /dev/null
@@ -0,0 +1,104 @@
+###############################################################################
+# Role: ControllerOpenstack                                                   #
+###############################################################################
+- name: ControllerOpenstack
+  description: |
+    Controller role that does not contain the database, messaging and networking
+    components. Use in combination with the Database, Messaging and Networker
+    roles.
+  tags:
+    - primary
+    - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
+  HostnameFormatDefault: '%stackname%-controller-%index%'
+  ServicesDefault:
+    - OS::TripleO::Services::AodhApi
+    - OS::TripleO::Services::AodhEvaluator
+    - OS::TripleO::Services::AodhListener
+    - OS::TripleO::Services::AodhNotifier
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::BarbicanApi
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CeilometerAgentCentral
+    - OS::TripleO::Services::CeilometerAgentNotification
+    - OS::TripleO::Services::CeilometerApi
+    - OS::TripleO::Services::CeilometerExpirer
+    - OS::TripleO::Services::CephExternal
+    - OS::TripleO::Services::CephMds
+    - OS::TripleO::Services::CephMon
+    - OS::TripleO::Services::CephRbdMirror
+    - OS::TripleO::Services::CephRgw
+    - OS::TripleO::Services::CinderApi
+    - OS::TripleO::Services::CinderBackup
+    - OS::TripleO::Services::CinderHPELeftHandISCSI
+    - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Congress
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Ec2Api
+    - OS::TripleO::Services::Etcd
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::GnocchiApi
+    - OS::TripleO::Services::GnocchiMetricd
+    - OS::TripleO::Services::GnocchiStatsd
+    - OS::TripleO::Services::HAproxy
+    - OS::TripleO::Services::HeatApi
+    - OS::TripleO::Services::HeatApiCfn
+    - OS::TripleO::Services::HeatApiCloudwatch
+    - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::Horizon
+    - OS::TripleO::Services::IronicApi
+    - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::ManilaApi
+    - OS::TripleO::Services::ManilaBackendCephFs
+    - OS::TripleO::Services::ManilaBackendGeneric
+    - OS::TripleO::Services::ManilaBackendNetapp
+    - OS::TripleO::Services::ManilaScheduler
+    - OS::TripleO::Services::ManilaShare
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaConsoleauth
+    - OS::TripleO::Services::NovaIronic
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::NovaVncProxy
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::OctaviaApi
+    - OS::TripleO::Services::OctaviaHealthManager
+    - OS::TripleO::Services::OctaviaHousekeeping
+    - OS::TripleO::Services::OctaviaWorker
+    - OS::TripleO::Services::OpenDaylightApi
+    - OS::TripleO::Services::OpenDaylightOvs
+    - OS::TripleO::Services::OVNDBs
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::PankoApi
+    - OS::TripleO::Services::Redis
+    - OS::TripleO::Services::SaharaApi
+    - OS::TripleO::Services::SaharaEngine
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::SwiftStorage
+    - OS::TripleO::Services::Tacker
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::Vpp
+    - OS::TripleO::Services::Zaqar
+
diff --git a/roles/Database.yaml b/roles/Database.yaml
new file mode 100644 (file)
index 0000000..75b26a8
--- /dev/null
@@ -0,0 +1,25 @@
+###############################################################################
+# Role: Database                                                              #
+###############################################################################
+- name: Database
+  description: |
+    Standalone database role with the database being managed via Pacemaker
+  networks:
+    - InternalApi
+  HostnameFormatDefault: '%stackname%-database-%index%'
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
+
diff --git a/roles/IronicConductor.yaml b/roles/IronicConductor.yaml
new file mode 100644 (file)
index 0000000..8a29b33
--- /dev/null
@@ -0,0 +1,21 @@
+###############################################################################
+# Role: IronicConductor                                                       #
+###############################################################################
+- name: IronicConductor
+  description: |
+    Ironic Conductor node role
+  HostnameFormatDefault: '%stackname%-ironic-%index%'
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
diff --git a/roles/Messaging.yaml b/roles/Messaging.yaml
new file mode 100644 (file)
index 0000000..5b06063
--- /dev/null
@@ -0,0 +1,24 @@
+###############################################################################
+# Role: Messaging                                                             #
+###############################################################################
+- name: Messaging
+  description: |
+    Standalone messaging role with RabbitMQ being managed via Pacemaker
+  networks:
+    - InternalApi
+  HostnameFormatDefault: '%stackname%-messaging-%index%'
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
+
diff --git a/roles/Networker.yaml b/roles/Networker.yaml
new file mode 100644 (file)
index 0000000..a28eaa6
--- /dev/null
@@ -0,0 +1,38 @@
+###############################################################################
+# Role: Networker                                                             #
+###############################################################################
+- name: Networker
+  description: |
+    Standalone networking role to run Neutron services their own. Includes
+    Pacemaker integration via PacemakerRemote
+  networks:
+    - InternalApi
+  HostnameFormatDefault: '%stackname%-networker-%index%'
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronApi
+    - OS::TripleO::Services::NeutronBgpvpnApi
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL2gwAgent
+    - OS::TripleO::Services::NeutronL2gwApi
+    - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronMetadataAgent
+    - OS::TripleO::Services::NeutronML2FujitsuCfab
+    - OS::TripleO::Services::NeutronML2FujitsuFossw
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::NeutronVppAgent
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::OpenDaylightOvs
+    - OS::TripleO::Services::PacemakerRemote
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
+
diff --git a/roles/ObjectStorage.yaml b/roles/ObjectStorage.yaml
new file mode 100644 (file)
index 0000000..27dc123
--- /dev/null
@@ -0,0 +1,30 @@
+###############################################################################
+# Role: ObjectStorage                                                         #
+###############################################################################
+- name: ObjectStorage
+  description: |
+    Swift Object Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
+  disable_upgrade_deployment: True
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::SwiftStorage
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
diff --git a/roles/README.rst b/roles/README.rst
new file mode 100644 (file)
index 0000000..cd1fcb4
--- /dev/null
@@ -0,0 +1,210 @@
+Roles
+=====
+
+The yaml files in this directory can be combined into a single roles_data.yaml
+and be used with TripleO to create custom deployments.
+
+Use tripleoclient to build your own custom roles_data.yaml for your
+environment.
+
+roles_data.yaml
+---------------
+
+The roles_data.yaml specifies which roles (groups of nodes) will be deployed.
+Note this file is used as an input the the various \*.j2.yaml jinja2 templates,
+so that they are converted into \*.yaml during the plan creation. This occurs
+via a mistral action/workflow. The file format of this file is a yaml list.
+
+Role YAML files
+===============
+
+Each role yaml file should contain only a single role. The filename should
+match the role name. The name of the role is  mandatory and must be unique.
+
+The role files in this folder should contain at least a role name and the
+default list of services for the role.
+
+Role Options
+------------
+
+* CountDefault: (number) optional, default number of nodes, defaults to 0
+  sets the default for the {{role.name}}Count parameter in overcloud.yaml
+
+* HostnameFormatDefault: (string) optional default format string for hostname
+  defaults to '%stackname%-{{role.name.lower()}}-%index%'
+  sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
+
+* disable_constraints: (boolean) optional, whether to disable Nova and Glance
+  constraints for each role specified in the templates.
+
+* disable_upgrade_deployment: (boolean) optional, whether to run the
+  ansible upgrade steps for all services that are deployed on the role. If set
+  to True, the operator will drive the upgrade for this role's nodes.
+
+* upgrade_batch_size: (number): batch size for upgrades where tasks are
+  specified by services to run in batches vs all nodes at once.
+  This defaults to 1, but larger batches may be specified here.
+
+* ServicesDefault: (list) optional default list of services to be deployed
+  on the role, defaults to an empty list. Sets the default for the
+  {{role.name}}Services parameter in overcloud.yaml
+
+* tags: (list) list of tags used by other parts of the deployment process to
+  find the role for a specific type of functionality. Currently a role
+  with both 'primary' and 'controller' is used as the primary role for the
+  deployment process. If no roles have have 'primary' and 'controller', the
+  first role in this file is used as the primary role.
+
+* description: (string) as few sentences describing the role and information
+  pertaining to the usage of the role.
+
+ * networks: (list), optional list of networks which the role will have
+   access to when network isolation is enabled. The names should match
+   those defined in network_data.yaml.
+
+Working with Roles
+==================
+The tripleoclient provides a series of commands that can be used to view
+roles and generate a roles_data.yaml file for deployment.
+
+Listing Available Roles
+-----------------------
+The ``openstack overcloud role list`` command can be used to view the list
+of roles provided by tripleo-heat-templates.
+
+Usage
+^^^^^
+.. code-block::
+
+  usage: openstack overcloud role list [-h] [--roles-path <roles directory>]
+
+  List availables roles
+
+  optional arguments:
+    -h, --help            show this help message and exit
+    --roles-path <roles directory>
+                          Filesystem path containing the role yaml files. By
+                          default this is /usr/share/openstack-tripleo-heat-
+                          templates/roles
+
+Example
+^^^^^^^
+.. code-block::
+
+  [user@host ~]$ openstack overcloud role list
+  BlockStorage
+  CephStorage
+  Compute
+  Controller
+  ControllerOpenstack
+  Database
+  Messaging
+  Networker
+  ObjectStorage
+  Telemetry
+  Undercloud
+
+Viewing Role Details
+--------------------
+The ``openstack overcloud role show`` command can be used as a quick way to
+view some of the information about a role.
+
+Usage
+^^^^^
+.. code-block::
+
+  usage: openstack overcloud role show [-h] [--roles-path <roles directory>]
+                                       <role>
+
+  Show information about a given role
+
+  positional arguments:
+    <role>                Role to display more information about.
+
+  optional arguments:
+    -h, --help            show this help message and exit
+    --roles-path <roles directory>
+                          Filesystem path containing the role yaml files. By
+                          default this is /usr/share/openstack-tripleo-heat-
+                          templates/roles
+
+Example
+^^^^^^^
+.. code-block::
+
+  [user@host ~]$ openstack overcloud role show Compute
+  ###############################################################################
+  # Role Data for 'Compute'
+  ###############################################################################
+  HostnameFormatDefault: '%stackname%-novacompute-%index%'
+  ServicesDefault:
+   * OS::TripleO::Services::AuditD
+   * OS::TripleO::Services::CACerts
+   * OS::TripleO::Services::CephClient
+   * OS::TripleO::Services::CephExternal
+   * OS::TripleO::Services::CertmongerUser
+   * OS::TripleO::Services::Collectd
+   * OS::TripleO::Services::ComputeCeilometerAgent
+   * OS::TripleO::Services::ComputeNeutronCorePlugin
+   * OS::TripleO::Services::ComputeNeutronL3Agent
+   * OS::TripleO::Services::ComputeNeutronMetadataAgent
+   * OS::TripleO::Services::ComputeNeutronOvsAgent
+   * OS::TripleO::Services::Docker
+   * OS::TripleO::Services::FluentdClient
+   * OS::TripleO::Services::Kernel
+   * OS::TripleO::Services::MySQLClient
+   * OS::TripleO::Services::NeutronSriovAgent
+   * OS::TripleO::Services::NeutronVppAgent
+   * OS::TripleO::Services::NovaCompute
+   * OS::TripleO::Services::NovaLibvirt
+   * OS::TripleO::Services::Ntp
+   * OS::TripleO::Services::OpenDaylightOvs
+   * OS::TripleO::Services::Securetty
+   * OS::TripleO::Services::SensuClient
+   * OS::TripleO::Services::Snmp
+   * OS::TripleO::Services::Sshd
+   * OS::TripleO::Services::Timezone
+   * OS::TripleO::Services::TripleoFirewall
+   * OS::TripleO::Services::TripleoPackages
+   * OS::TripleO::Services::Vpp
+  name: 'Compute'
+
+Generate roles_data.yaml
+------------------------
+The ``openstack overcloud roles generate`` command can be used to generate
+a roles_data.yaml file for deployments.
+
+Usage
+^^^^^
+.. code-block::
+
+  usage: openstack overcloud roles generate [-h]
+                                            [--roles-path <roles directory>]
+                                            [-o <output file>]
+                                            <role> [<role> ...]
+
+  Generate roles_data.yaml file
+
+  positional arguments:
+    <role>                List of roles to use to generate the roles_data.yaml
+                          file for the deployment. NOTE: Ordering is important
+                          if no role has the "primary" and "controller" tags. If
+                          no role is tagged then the first role listed will be
+                          considered the primary role. This usually is the
+                          controller role.
+
+  optional arguments:
+    -h, --help            show this help message and exit
+    --roles-path <roles directory>
+                          Filesystem path containing the role yaml files. By
+                          default this is /usr/share/openstack-tripleo-heat-
+                          templates/roles
+    -o <output file>, --output-file <output file>
+                          File to capture all output to. For example,
+                          roles_data.yaml
+
+Example
+^^^^^^^
+.. code-block::
+
+  [user@host ~]$ openstack overcloud roles generate -o roles_data.yaml Controller Compute BlockStorage ObjectStorage CephStorage
diff --git a/roles/Telemetry.yaml b/roles/Telemetry.yaml
new file mode 100644 (file)
index 0000000..d23ab6e
--- /dev/null
@@ -0,0 +1,32 @@
+###############################################################################
+# Role: Telemetry                                                             #
+###############################################################################
+- name: Telemetry
+  description: |
+    Telemetry role that has all the telemetry services.
+  networks:
+    - InternalApi
+  HostnameFormatDefault: '%stackname%-telemetry-%index%'
+  ServicesDefault:
+    - OS::TripleO::Services::AodhApi
+    - OS::TripleO::Services::AodhEvaluator
+    - OS::TripleO::Services::AodhListener
+    - OS::TripleO::Services::AodhNotifier
+    - OS::TripleO::Services::CeilometerAgentCentral
+    - OS::TripleO::Services::CeilometerAgentNotification
+    - OS::TripleO::Services::CeilometerApi
+    - OS::TripleO::Services::CeilometerExpirer
+    - OS::TripleO::Services::GnocchiApi
+    - OS::TripleO::Services::GnocchiMetricd
+    - OS::TripleO::Services::GnocchiStatsd
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::PankoApi
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::Redis
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
+
diff --git a/roles/Undercloud.yaml b/roles/Undercloud.yaml
new file mode 100644 (file)
index 0000000..bcdedc7
--- /dev/null
@@ -0,0 +1,56 @@
+###############################################################################
+# Role: Undercloud                                                            #
+###############################################################################
+- name: Undercloud
+  description: |
+    EXPERIMENTAL. A role to deploy the undercloud via heat using the 'openstack
+    undercloud deploy' command.
+  CountDefault: 1
+  disable_constraints: True
+  tags:
+    - primary
+    - controller
+  ServicesDefault:
+    - OS::TripleO::Services::Apache
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::HeatApi
+    - OS::TripleO::Services::HeatApiCfn
+    - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::IronicApi
+    - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::IronicPxe
+    - OS::TripleO::Services::Iscsid
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::MistralApi
+    - OS::TripleO::Services::MistralEngine
+    - OS::TripleO::Services::MistralExecutor
+    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::NeutronApi
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::NeutronServer
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaIronic
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::SwiftStorage
+    - OS::TripleO::Services::UndercloudAodhApi
+    - OS::TripleO::Services::UndercloudAodhEvaluator
+    - OS::TripleO::Services::UndercloudAodhListener
+    - OS::TripleO::Services::UndercloudAodhNotifier
+    - OS::TripleO::Services::UndercloudCeilometerAgentCentral
+    - OS::TripleO::Services::UndercloudCeilometerAgentNotification
+    - OS::TripleO::Services::UndercloudGnocchiApi
+    - OS::TripleO::Services::UndercloudGnocchiMetricd
+    - OS::TripleO::Services::UndercloudGnocchiStatsd
+    - OS::TripleO::Services::UndercloudPankoApi
+    - OS::TripleO::Services::Zaqar
index 68d0b9e..f96e562 100644 (file)
-# Specifies which roles (groups of nodes) will be deployed
-# Note this is used as an input to the various *.j2.yaml
-# jinja2 templates, so that they are converted into *.yaml
-# during the plan creation (via a mistral action/workflow).
-#
-# The format is a list, with the following format:
-#
-# * name: (string) mandatory, name of the role, must be unique
-#
-# CountDefault: (number) optional, default number of nodes, defaults to 0
-# sets the default for the {{role.name}}Count parameter in overcloud.yaml
-#
-# HostnameFormatDefault: (string) optional default format string for hostname
-# defaults to '%stackname%-{{role.name.lower()}}-%index%'
-# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
-#
-# disable_constraints: (boolean) optional, whether to disable Nova and Glance
-# constraints for each role specified in the templates.
-#
-# disable_upgrade_deployment: (boolean) optional, whether to run the
-# ansible upgrade steps for all services that are deployed on the role. If set
-# to True, the operator will drive the upgrade for this role's nodes.
-#
-# upgrade_batch_size: (number): batch size for upgrades where tasks are
-# specified by services to run in batches vs all nodes at once.
-# This defaults to 1, but larger batches may be specified here.
-#
-# ServicesDefault: (list) optional default list of services to be deployed
-# on the role, defaults to an empty list. Sets the default for the
-# {{role.name}}Services parameter in overcloud.yaml
-#
-# tags: (list) list of tags used by other parts of the deployment process to
-# find the role for a specific type of functionality. Currently a role
-# with both 'primary' and 'controller' is used as the primary role for the
-# deployment process. If no roles have have 'primary' and 'controller', the
-# first role in this file is used as the primary role.
-#
+###############################################################################
+# File generated by tripleoclient
+###############################################################################
+###############################################################################
+# Role: Controller                                                            #
+###############################################################################
 - name: Controller
+  description: |
+    Controller role that has all the controler services loaded and handles
+    Database, Messaging and Network functions.
   CountDefault: 1
   tags:
     - primary
     - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
+  HostnameFormatDefault: '%stackname%-controller-%index%'
   ServicesDefault:
+    - OS::TripleO::Services::AodhApi
+    - OS::TripleO::Services::AodhEvaluator
+    - OS::TripleO::Services::AodhListener
+    - OS::TripleO::Services::AodhNotifier
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::BarbicanApi
     - OS::TripleO::Services::CACerts
-    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::CeilometerAgentCentral
+    - OS::TripleO::Services::CeilometerAgentNotification
+    # FIXME: This service was disabled in Pike and this entry should be removed
+    # in Queens.
+    - OS::TripleO::Services::CeilometerExpirer
+    - OS::TripleO::Services::CephExternal
     - OS::TripleO::Services::CephMds
     - OS::TripleO::Services::CephMon
-    - OS::TripleO::Services::CephExternal
     - OS::TripleO::Services::CephRbdMirror
     - OS::TripleO::Services::CephRgw
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CinderApi
-    - OS::TripleO::Services::CinderBackup
-    - OS::TripleO::Services::CinderScheduler
-    - OS::TripleO::Services::CinderVolume
     - OS::TripleO::Services::CinderBackendDellPs
     - OS::TripleO::Services::CinderBackendDellSc
     - OS::TripleO::Services::CinderBackendNetApp
     - OS::TripleO::Services::CinderBackendScaleIO
+    - OS::TripleO::Services::CinderBackup
+    - OS::TripleO::Services::CinderHPELeftHandISCSI
+    - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Congress
-    - OS::TripleO::Services::Kernel
-    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Ec2Api
+    - OS::TripleO::Services::Etcd
+    - OS::TripleO::Services::ExternalSwiftProxy
+    - OS::TripleO::Services::FluentdClient
     - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::GnocchiApi
+    - OS::TripleO::Services::GnocchiMetricd
+    - OS::TripleO::Services::GnocchiStatsd
+    - OS::TripleO::Services::HAproxy
     - OS::TripleO::Services::HeatApi
     - OS::TripleO::Services::HeatApiCfn
     - OS::TripleO::Services::HeatApiCloudwatch
     - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::Horizon
+    - OS::TripleO::Services::IronicApi
+    - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::Iscsid
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::ManilaApi
+    - OS::TripleO::Services::ManilaBackendCephFs
+    - OS::TripleO::Services::ManilaBackendGeneric
+    - OS::TripleO::Services::ManilaBackendNetapp
+    - OS::TripleO::Services::ManilaScheduler
+    - OS::TripleO::Services::ManilaShare
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::MongoDb
     - OS::TripleO::Services::MySQL
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronApi
     - OS::TripleO::Services::NeutronBgpVpnApi
+    - OS::TripleO::Services::NeutronCorePlugin
     - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL2gwAgent
     - OS::TripleO::Services::NeutronL2gwApi
     - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronLinuxbridgeAgent
     - OS::TripleO::Services::NeutronMetadataAgent
-    - OS::TripleO::Services::NeutronApi
-    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronML2FujitsuCfab
+    - OS::TripleO::Services::NeutronML2FujitsuFossw
     - OS::TripleO::Services::NeutronOvsAgent
-    - OS::TripleO::Services::NeutronL2gwAgent
-    - OS::TripleO::Services::RabbitMQ
-    - OS::TripleO::Services::HAproxy
-    - OS::TripleO::Services::Keepalived
-    - OS::TripleO::Services::Memcached
-    - OS::TripleO::Services::Pacemaker
-    - OS::TripleO::Services::Redis
-    - OS::TripleO::Services::NovaConductor
-    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::NeutronVppAgent
     - OS::TripleO::Services::NovaApi
-    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaConsoleauth
+    - OS::TripleO::Services::NovaIronic
     - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaPlacement
     - OS::TripleO::Services::NovaScheduler
-    - OS::TripleO::Services::NovaConsoleauth
     - OS::TripleO::Services::NovaVncProxy
-    - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::Ntp
-    - OS::TripleO::Services::SwiftProxy
-    - OS::TripleO::Services::ExternalSwiftProxy
-    - OS::TripleO::Services::SwiftStorage
-    - OS::TripleO::Services::SwiftRingBuilder
-    - OS::TripleO::Services::Snmp
-    - OS::TripleO::Services::Sshd
-    - OS::TripleO::Services::Securetty
-    - OS::TripleO::Services::Timezone
-    # FIXME: This service was disabled in Pike and this entry should be removed
-    # in Queens.
-    - OS::TripleO::Services::CeilometerExpirer
-    - OS::TripleO::Services::CeilometerAgentCentral
-    - OS::TripleO::Services::CeilometerAgentNotification
-    - OS::TripleO::Services::Horizon
-    - OS::TripleO::Services::GnocchiApi
-    - OS::TripleO::Services::GnocchiMetricd
-    - OS::TripleO::Services::GnocchiStatsd
-    - OS::TripleO::Services::ManilaApi
-    - OS::TripleO::Services::ManilaScheduler
-    - OS::TripleO::Services::ManilaBackendGeneric
-    - OS::TripleO::Services::ManilaBackendNetapp
-    - OS::TripleO::Services::ManilaBackendCephFs
-    - OS::TripleO::Services::ManilaShare
-    - OS::TripleO::Services::AodhApi
-    - OS::TripleO::Services::AodhEvaluator
-    - OS::TripleO::Services::AodhNotifier
-    - OS::TripleO::Services::AodhListener
-    - OS::TripleO::Services::SaharaApi
-    - OS::TripleO::Services::SaharaEngine
-    - OS::TripleO::Services::IronicApi
-    - OS::TripleO::Services::IronicConductor
-    - OS::TripleO::Services::NovaIronic
-    - OS::TripleO::Services::TripleoPackages
-    - OS::TripleO::Services::TripleoFirewall
-    - OS::TripleO::Services::OpenDaylightApi
-    - OS::TripleO::Services::OpenDaylightOvs
-    - OS::TripleO::Services::SensuClient
-    - OS::TripleO::Services::FluentdClient
-    - OS::TripleO::Services::Collectd
-    - OS::TripleO::Services::BarbicanApi
-    - OS::TripleO::Services::PankoApi
-    - OS::TripleO::Services::Tacker
-    - OS::TripleO::Services::Zaqar
-    - OS::TripleO::Services::OVNDBs
-    - OS::TripleO::Services::NeutronML2FujitsuCfab
-    - OS::TripleO::Services::NeutronML2FujitsuFossw
-    - OS::TripleO::Services::CinderHPELeftHandISCSI
-    - OS::TripleO::Services::Etcd
-    - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::OctaviaApi
     - OS::TripleO::Services::OctaviaHealthManager
     - OS::TripleO::Services::OctaviaHousekeeping
     - OS::TripleO::Services::OctaviaWorker
+    - OS::TripleO::Services::OpenDaylightApi
+    - OS::TripleO::Services::OpenDaylightOvs
+    - OS::TripleO::Services::OVNDBs
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::PankoApi
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::Redis
+    - OS::TripleO::Services::SaharaApi
+    - OS::TripleO::Services::SaharaEngine
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::SwiftStorage
+    - OS::TripleO::Services::Tacker
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::Vpp
-    - OS::TripleO::Services::NeutronVppAgent
-    - OS::TripleO::Services::Docker
-
+    - OS::TripleO::Services::Zaqar
+###############################################################################
+# Role: Compute                                                               #
+###############################################################################
 - name: Compute
+  description: |
+    Basic Compute Node role
   CountDefault: 1
+  networks:
+    - InternalApi
+    - Tenant
+    - Storage
   HostnameFormatDefault: '%stackname%-novacompute-%index%'
   disable_upgrade_deployment: True
   ServicesDefault:
+    - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::CACerts
-    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephClient
     - OS::TripleO::Services::CephExternal
-    - OS::TripleO::Services::Timezone
-    - OS::TripleO::Services::Ntp
-    - OS::TripleO::Services::Snmp
-    - OS::TripleO::Services::Sshd
-    - OS::TripleO::Services::Securetty
-    - OS::TripleO::Services::NovaCompute
-    - OS::TripleO::Services::NovaLibvirt
-    - OS::TripleO::Services::Kernel
-    - OS::TripleO::Services::ComputeNeutronCorePlugin
-    - OS::TripleO::Services::ComputeNeutronOvsAgent
+    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::ComputeCeilometerAgent
+    - OS::TripleO::Services::ComputeNeutronCorePlugin
     - OS::TripleO::Services::ComputeNeutronL3Agent
     - OS::TripleO::Services::ComputeNeutronMetadataAgent
-    - OS::TripleO::Services::TripleoPackages
-    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::ComputeNeutronOvsAgent
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Iscsid
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronLinuxbridgeAgent
     - OS::TripleO::Services::NeutronSriovAgent
+    - OS::TripleO::Services::NeutronVppAgent
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::OpenDaylightOvs
+    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::SensuClient
-    - OS::TripleO::Services::FluentdClient
-    - OS::TripleO::Services::AuditD
-    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::Vpp
-    - OS::TripleO::Services::NeutronVppAgent
-    - OS::TripleO::Services::MySQLClient
-    - OS::TripleO::Services::Docker
-
+###############################################################################
+# Role: BlockStorage                                                          #
+###############################################################################
 - name: BlockStorage
+  description: |
+    Cinder Block Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::BlockStorageCinderVolume
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CertmongerUser
-    - OS::TripleO::Services::BlockStorageCinderVolume
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::Ntp
-    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
-    - OS::TripleO::Services::Securetty
-    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::TripleoFirewall
-    - OS::TripleO::Services::SensuClient
-    - OS::TripleO::Services::FluentdClient
-    - OS::TripleO::Services::AuditD
-    - OS::TripleO::Services::Collectd
-    - OS::TripleO::Services::MySQLClient
-    - OS::TripleO::Services::Docker
-
+    - OS::TripleO::Services::TripleoPackages
+###############################################################################
+# Role: ObjectStorage                                                         #
+###############################################################################
 - name: ObjectStorage
+  description: |
+    Swift Object Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   disable_upgrade_deployment: True
   ServicesDefault:
+    - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::FluentdClient
     - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::Ntp
-    - OS::TripleO::Services::SwiftStorage
-    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
-    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::SwiftStorage
     - OS::TripleO::Services::Timezone
-    - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
-    - OS::TripleO::Services::SensuClient
-    - OS::TripleO::Services::FluentdClient
-    - OS::TripleO::Services::AuditD
-    - OS::TripleO::Services::Collectd
-    - OS::TripleO::Services::MySQLClient
-    - OS::TripleO::Services::Docker
-
+    - OS::TripleO::Services::TripleoPackages
+###############################################################################
+# Role: CephStorage                                                           #
+###############################################################################
 - name: CephStorage
+  description: |
+    Ceph OSD Storage node role
+  networks:
+    - Storage
+    - StorageMgmt
   ServicesDefault:
+    - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::CACerts
-    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephOSD
+    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::FluentdClient
     - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::Securetty
+    - OS::TripleO::Services::SensuClient
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
-    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::Timezone
-    - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
-    - OS::TripleO::Services::SensuClient
-    - OS::TripleO::Services::FluentdClient
-    - OS::TripleO::Services::AuditD
-    - OS::TripleO::Services::Collectd
-    - OS::TripleO::Services::MySQLClient
-    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::TripleoPackages
index d57c8fc..783df91 100644 (file)
@@ -1,49 +1,61 @@
+###############################################################################
+# File generated by tripleoclient
+###############################################################################
+###############################################################################
+# Role: Undercloud                                                            #
+###############################################################################
 - name: Undercloud
+  description: |
+    EXPERIMENTAL. A role to deploy the undercloud via heat using the 'openstack
+    undercloud deploy' command.
   CountDefault: 1
   disable_constraints: True
   tags:
     - primary
     - controller
   ServicesDefault:
-    - OS::TripleO::Services::Ntp
-    - OS::TripleO::Services::MySQL
-    - OS::TripleO::Services::MongoDb
-    - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::Apache
-    - OS::TripleO::Services::RabbitMQ
     - OS::TripleO::Services::GlanceApi
-    - OS::TripleO::Services::SwiftProxy
-    - OS::TripleO::Services::SwiftStorage
-    - OS::TripleO::Services::SwiftRingBuilder
-    - OS::TripleO::Services::Memcached
     - OS::TripleO::Services::HeatApi
     - OS::TripleO::Services::HeatApiCfn
     - OS::TripleO::Services::HeatEngine
-    - OS::TripleO::Services::NovaApi
-    - OS::TripleO::Services::NovaPlacement
-    - OS::TripleO::Services::NovaMetadata
-    - OS::TripleO::Services::NovaScheduler
-    - OS::TripleO::Services::NovaConductor
-    - OS::TripleO::Services::MistralEngine
-    - OS::TripleO::Services::MistralApi
-    - OS::TripleO::Services::MistralExecutor
     - OS::TripleO::Services::IronicApi
     - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::IronicInspector
     - OS::TripleO::Services::IronicPxe
-    - OS::TripleO::Services::NovaIronic
-    - OS::TripleO::Services::Zaqar
-    - OS::TripleO::Services::NeutronServer
+    - OS::TripleO::Services::Iscsid
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::MistralApi
+    - OS::TripleO::Services::MistralEngine
+    - OS::TripleO::Services::MistralExecutor
+    - OS::TripleO::Services::MongoDb
+    - OS::TripleO::Services::MySQL
     - OS::TripleO::Services::NeutronApi
     - OS::TripleO::Services::NeutronCorePlugin
-    - OS::TripleO::Services::NeutronOvsAgent
     - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::NeutronServer
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaIronic
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::SwiftStorage
     - OS::TripleO::Services::UndercloudAodhApi
     - OS::TripleO::Services::UndercloudAodhEvaluator
-    - OS::TripleO::Services::UndercloudAodhNotifier
     - OS::TripleO::Services::UndercloudAodhListener
+    - OS::TripleO::Services::UndercloudAodhNotifier
+    - OS::TripleO::Services::UndercloudCeilometerAgentCentral
+    - OS::TripleO::Services::UndercloudCeilometerAgentIpmi
+    - OS::TripleO::Services::UndercloudCeilometerAgentNotification
     - OS::TripleO::Services::UndercloudGnocchiApi
     - OS::TripleO::Services::UndercloudGnocchiMetricd
     - OS::TripleO::Services::UndercloudGnocchiStatsd
     - OS::TripleO::Services::UndercloudPankoApi
-    - OS::TripleO::Services::UndercloudCeilometerAgentCentral
-    - OS::TripleO::Services::UndercloudCeilometerAgentNotification
+    - OS::TripleO::Services::Zaqar
diff --git a/sample-env-generator/README.rst b/sample-env-generator/README.rst
new file mode 100644 (file)
index 0000000..32e94f9
--- /dev/null
@@ -0,0 +1,160 @@
+Sample Environment Generator
+----------------------------
+
+This is a tool to automate the generation of our sample environment
+files.  It takes a yaml file as input, and based on the environments
+defined in that file generates a number of sample environment files
+from the parameters in the Heat templates.
+
+Usage
+=====
+
+The simplest case is when an existing sample environment needs to be
+updated to reflect changes in the templates.  Use the tox ``genconfig``
+target to do this::
+
+    tox -e genconfig
+
+.. note:: The tool should be run from the root directory of the
+          ``tripleo-heat-templates`` project.
+
+If a new sample environment is needed, it should be added to the
+appropriate file in the ``sample-env-generator/`` directory.  The existing
+entries in the files can be used as examples, and a more detailed
+explanation of the different available keys is below:
+
+Top-level:
+- **environments**: This is the top-level key in the file.  All other keys
+  below should appear in a list of dictionaries that define environments.
+
+Environment-specific:
+- **name**: the output file will be this name + .yaml, in the
+  ``environments`` directory.
+- **title**: a human-readable title for the environment.
+- **description**: A description of the environment.  Will be included
+  as a comment at the top of the sample file.
+- **files**: The Heat templates containing the parameter definitions
+  for the environment.  Should be specified as a path relative to the
+  root of the ``tripleo-heat-templates`` project.  For example:
+  ``puppet/extraconfig/tls/tls-cert-inject.yaml:``.  Each filename
+  should be a YAML dictionary that contains a ``parameters`` entry.
+- **parameters**: There should be one ``parameters`` entry per file in the
+  ``files`` section (see the example configuration below).
+  This can be either a list of parameters related to
+  the environment, which is necessary for templates like
+  overcloud.yaml, or the string 'all', which indicates that all
+  parameters from the file should be included.
+- **static**: Can be used to specify that certain parameters must
+  not be changed.  Examples would be the EnableSomething params
+  in the templates.  When writing a sample config for Something,
+  ``EnableSomething: True`` would be a static param, since it
+  would be nonsense to include the environment with it set to any other
+  value.
+- **sample_values**: Sometimes it is useful to include a sample value
+  for a parameter that is not the parameter's actual default.
+  An example of this is the SSLCertificate param in the enable-tls
+  environment file.
+- **resource_registry**: Many environments also need to pass
+  resource_registry entries when they are used.  This can be used
+  to specify that in the configuration file.
+- **children**: For environments that share a lot of common values but may
+  need minor variations for different use cases, sample environment entries
+  can be nested.  ``children`` takes a list of environments with the same
+  structure as the top-level ``environments`` key.  The main difference is
+  that all keys are optional, and any that are omitted will be inherited from
+  the parent environment definition.
+
+Some behavioral notes:
+
+- Parameters without default values will be marked as mandatory to indicate
+  that the user must set a value for them.
+- It is no longer recommended to set parameters using the ``parameters``
+  section.  Instead, all parameters should be set as ``parameter_defaults``
+  which will work regardless of whether the parameter is top-level or nested.
+  Therefore, the tool will always set parameters in the ``parameter_defaults``
+  section.
+- Parameters whose name begins with the _ character are treated as private.
+  This indicates that the parameter value will be passed in from another
+  template and does not need to be exposed directly to the user.
+
+If adding a new environment, don't forget to add the new file to the
+git repository so it will be included with the review.
+
+Example
+=======
+
+Given a Heat template named ``example.yaml`` that looks like::
+
+    parameters:
+      EnableExample:
+        default: False
+        description: Enable the example feature
+        type: boolean
+      ParamOne:
+        default: one
+        description: First example param
+        type: string
+      ParamTwo:
+        description: Second example param
+        type: number
+      _PrivateParam:
+        default: does not matter
+        description: Will not show up
+        type: string
+
+And an environment generator entry that looks like::
+
+    environments:
+      -
+        name: example
+        title: Example Environment
+        description: |
+          An example environment demonstrating how to use the sample
+          environment generator.  This text will be included at the top
+          of the generated file as a comment.
+        files:
+          example.yaml:
+            parameters: all
+        sample_values:
+          EnableExample: True
+        static:
+          - EnableExample
+        resource_registry:
+          OS::TripleO::ExampleData: ../extraconfig/example.yaml
+
+The generated environment file would look like::
+
+    # *******************************************************************
+    # This file was created automatically by the sample environment
+    # generator. Developers should use `tox -e genconfig` to update it.
+    # Users are recommended to make changes to a copy of the file instead
+    # of the original, if any customizations are needed.
+    # *******************************************************************
+    # title: Example Environment
+    # description: |
+    #   An example environment demonstrating how to use the sample
+    #   environment generator.  This text will be included at the top
+    #   of the generated file as a comment.
+    parameter_defaults:
+      # First example param
+      # Type: string
+      ParamOne: one
+
+      # Second example param
+      # Mandatory. This parameter must be set by the user.
+      # Type: number
+      ParamTwo: <None>
+
+      # ******************************************************
+      # Static parameters - these are values that must be
+      # included in the environment but should not be changed.
+      # ******************************************************
+      # Enable the example feature
+      # Type: boolean
+      EnableExample: True
+
+      # *********************
+      # End static parameters
+      # *********************
+    resource_registry:
+      OS::TripleO::ExampleData: ../extraconfig/example.yaml
diff --git a/sample-env-generator/networking.yaml b/sample-env-generator/networking.yaml
new file mode 100644 (file)
index 0000000..ea7042b
--- /dev/null
@@ -0,0 +1,32 @@
+environments:
+  -
+    name: networking/neutron-midonet
+    title: Enable the Neutron MidoNet Services
+    description: A Heat environment that can be used to deploy MidoNet Services
+    files:
+      puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml:
+        parameters: all
+      puppet/services/neutron-base.yaml:
+        parameters:
+          - NeutronCorePlugin
+      puppet/services/neutron-dhcp.yaml:
+        parameters:
+          - NeutronEnableIsolatedMetadata
+    sample_values:
+      NeutronCorePlugin: 'midonet.neutron.plugin_v1.MidonetPluginV2'
+      NeutronEnableIsolatedMetadata: true
+      EnableZookeeperOnController: true
+      EnableCassandraOnController: true
+    static:
+      - NeutronCorePlugin
+      - NeutronEnableIsolatedMetadata
+      - EnableZookeeperOnController
+      - EnableCassandraOnController
+    resource_registry:
+      OS::TripleO::AllNodesExtraConfig: ../../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+      OS::TripleO::Controller::Net::SoftwareConfig: ../../net-config-linux-bridge.yaml
+      OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+      OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+      OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+      OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet
+      OS::TripleO::Services::ComputeNeutronCorePlugin: ../../puppet/services/neutron-compute-plugin-midonet.yaml
diff --git a/sample-env-generator/predictable-placement.yaml b/sample-env-generator/predictable-placement.yaml
new file mode 100644 (file)
index 0000000..ffda7ac
--- /dev/null
@@ -0,0 +1,17 @@
+environments:
+  -
+    name: predictable-placement/custom-hostnames
+    title: Custom Hostnames
+    files:
+      overcloud.yaml:
+        parameters:
+          - ControllerHostnameFormat
+          - ComputeHostnameFormat
+          - BlockStorageHostnameFormat
+          - ObjectStorageHostnameFormat
+          - CephStorageHostnameFormat
+    description: |
+      Hostname format for each role
+      Note %index% is translated into the index of the node, e.g 0/1/2 etc
+      and %stackname% is replaced with OS::stack_name in the template below.
+      If you want to use the heat generated names, pass '' (empty string).
diff --git a/sample-env-generator/ssl.yaml b/sample-env-generator/ssl.yaml
new file mode 100644 (file)
index 0000000..6963e84
--- /dev/null
@@ -0,0 +1,459 @@
+environments:
+  -
+    name: ssl/enable-tls
+    title: Enable SSL on OpenStack Public Endpoints
+    description: |
+      Use this environment to pass in certificates for SSL deployments.
+      For these values to take effect, one of the tls-endpoints-*.yaml environments
+      must also be used.
+    files:
+      puppet/extraconfig/tls/tls-cert-inject.yaml:
+        parameters: all
+    static:
+      # This should probably be private, but for testing static params I'm
+      # setting it as such for now.
+      - DeployedSSLCertificatePath
+    sample_values:
+      SSLCertificate: |-
+        |
+            The contents of your certificate go here
+      SSLKey: |-
+        |
+            The contents of the private key go here
+    resource_registry:
+      OS::TripleO::NodeTLSData: ../../puppet/extraconfig/tls/tls-cert-inject.yaml
+  - name: ssl/inject-trust-anchor
+    title: Inject SSL Trust Anchor on Overcloud Nodes
+    description: |
+      When using an SSL certificate signed by a CA that is not in the default
+      list of CAs, this environment allows adding a custom CA certificate to
+      the overcloud nodes.
+    files:
+      puppet/extraconfig/tls/ca-inject.yaml:
+        parameters:
+          - SSLRootCertificate
+    sample_values:
+      SSLRootCertificate: |-
+        |
+            The contents of your certificate go here
+    resource_registry:
+      OS::TripleO::NodeTLSCAData: ../../puppet/extraconfig/tls/ca-inject.yaml
+    children:
+      - name: ssl/inject-trust-anchor-hiera
+        files:
+          puppet/services/ca-certs.yaml:
+            parameters:
+              - CAMap
+        # Need to clear this so we don't inherit the parent registry
+        resource_registry: {}
+        sample_values:
+          CAMap:  |-2
+
+                first-ca-name:
+                  content: |
+                    The content of the CA cert goes here
+                second-ca-name:
+                  content: |
+                    The content of the CA cert goes here
+  -
+    name: ssl/tls-endpoints-public-ip
+    title: Deploy Public SSL Endpoints as IP Addresses
+    description: |
+      Use this environment when deploying an SSL-enabled overcloud where the public
+      endpoint is an IP address.
+    files:
+      network/endpoints/endpoint_map.yaml:
+        parameters:
+          - EndpointMap
+    sample_values:
+      # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+      # works.  The |-2 tells YAML to strip two spaces off the indentation of
+      # the value, which because it's indented six spaces gets us to the four
+      # that we actually want.  Note that zero is not a valid value here, so
+      # two seemed like the most sane option.
+      EndpointMap: |-2
+
+            AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+            AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+            AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'}
+            BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+            BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+            BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'}
+            CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+            CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+            CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
+            CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            CephRgwPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+            CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+            CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+            CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
+            CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+            CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+            CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'}
+            ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+            Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+            Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
+            GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+            GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+            GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'}
+            GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+            GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+            GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'}
+            HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+            HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+            HeatPublic: {protocol: 'https', port: '13004', host: 'IP_ADDRESS'}
+            HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+            HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+            HeatCfnPublic: {protocol: 'https', port: '13005', host: 'IP_ADDRESS'}
+            HorizonPublic: {protocol: 'https', port: '443', host: 'IP_ADDRESS'}
+            IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+            IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+            IronicPublic: {protocol: 'https', port: '13385', host: 'IP_ADDRESS'}
+            IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+            IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+            IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'IP_ADDRESS'}
+            KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+            KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+            KeystonePublic: {protocol: 'https', port: '13000', host: 'IP_ADDRESS'}
+            ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+            ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+            ManilaPublic: {protocol: 'https', port: '13786', host: 'IP_ADDRESS'}
+            MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+            MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+            MistralPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+            MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+            NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+            NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+            NeutronPublic: {protocol: 'https', port: '13696', host: 'IP_ADDRESS'}
+            NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+            NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+            NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'}
+            NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+            NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+            NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'}
+            NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+            NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+            NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'}
+            OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+            OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+            OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
+            PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+            PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+            PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+            SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+            SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+            SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
+            SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+            TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+            TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+            TackerPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+            ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+            ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+            ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
+            ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+            ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+            ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
+  -
+    name: ssl/tls-endpoints-public-dns
+    title: Deploy Public SSL Endpoints as DNS Names
+    description: |
+      Use this environment when deploying an SSL-enabled overcloud where the public
+      endpoint is a DNS name.
+    files:
+      network/endpoints/endpoint_map.yaml:
+        parameters:
+          - EndpointMap
+    sample_values:
+      # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+      # works.  The |-2 tells YAML to strip two spaces off the indentation of
+      # the value, which because it's indented six spaces gets us to the four
+      # that we actually want.  Note that zero is not a valid value here, so
+      # two seemed like the most sane option.
+      EndpointMap: |-2
+
+            AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+            AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+            AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+            BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+            BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+            BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+            CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+            CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+            CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+            CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+            CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+            CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+            CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+            CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+            CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+            CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+            ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+            Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+            Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+            GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+            GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+            GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+            GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+            GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+            GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+            HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+            HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+            HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+            HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+            HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+            HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+            HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+            IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+            IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+            IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+            IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+            IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+            IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+            KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+            KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+            KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+            ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+            ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+            ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+            MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+            MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+            MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+            MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+            NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+            NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+            NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+            NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+            NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+            NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+            NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+            NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+            NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+            NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+            NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+            NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+            OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+            OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+            OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+            PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+            PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+            PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+            SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+            SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+            SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+            SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+            TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+            TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+            TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+            ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+            ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+            ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+            ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+            ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+            ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+  -
+    name: ssl/tls-everywhere-endpoints-dns
+    title: Deploy All SSL Endpoints as DNS Names
+    description: |
+      Use this environment when deploying an overcloud where all the endpoints are
+      DNS names and there's TLS in all endpoint types.
+    files:
+      network/endpoints/endpoint_map.yaml:
+        parameters:
+          - EndpointMap
+    sample_values:
+      # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+      # works.  The |-2 tells YAML to strip two spaces off the indentation of
+      # the value, which because it's indented six spaces gets us to the four
+      # that we actually want.  Note that zero is not a valid value here, so
+      # two seemed like the most sane option.
+      EndpointMap: |-2
+
+            AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+            AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+            AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+            BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+            BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+            BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+            CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+            CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+            CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+            CephRgwAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+            CephRgwInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+            CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+            CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+            CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+            CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+            CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+            CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+            CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+            ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+            host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+            ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+            ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+            Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+            Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+            Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+            GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+            GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+            GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+            GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+            GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+            GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+            HeatAdmin: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+            HeatInternal: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+            HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+            HeatCfnAdmin: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+            HeatCfnInternal: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+            HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+            HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+            IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+            IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+            IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+            IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+            IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+            IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+            KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
+            KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
+            KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+            ManilaAdmin: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+            ManilaInternal: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+            ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+            MistralAdmin: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+            MistralInternal: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+            MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+            MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'CLOUDNAME'}
+            NeutronAdmin: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+            NeutronInternal: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+            NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+            NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+            NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+            NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+            NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+            NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+            NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+            NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+            NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+            NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+            OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+            OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+            OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+            PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+            PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+            PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+            SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+            SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+            SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+            SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+            SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+            SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+            TackerAdmin: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+            TackerInternal: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+            TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+            ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+            ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+            ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+            ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+            ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+            ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
diff --git a/sample-env-generator/storage.yaml b/sample-env-generator/storage.yaml
new file mode 100644 (file)
index 0000000..aa0385c
--- /dev/null
@@ -0,0 +1,133 @@
+environments:
+  -
+    name: storage/enable-ceph
+    title: Enable Ceph Storage Backend
+    files:
+      puppet/services/cinder-volume.yaml:
+        parameters:
+          - CinderEnableIscsiBackend
+          - CinderEnableRbdBackend
+      puppet/services/cinder-backup.yaml:
+        parameters:
+          - CinderBackupBackend
+      puppet/services/nova-compute.yaml:
+        parameters:
+          - NovaEnableRbdBackend
+      puppet/services/glance-api.yaml:
+        parameters:
+          - GlanceBackend
+      puppet/services/gnocchi-api.yaml:
+        parameters:
+          - GnocchiBackend
+    sample_values:
+      CinderEnableIscsiBackend: False
+      CinderEnableRbdBackend: True
+      CinderBackupBackend: rbd
+      NovaEnableRbdBackend: True
+      GlanceBackend: rbd
+      GnocchiBackend: rbd
+    description: |
+      Include this environment to enable Ceph as the backend for
+      Cinder, Nova, Gnocchi, and Glance.
+  -
+    name: storage/cinder-nfs
+    title: Enable Cinder NFS Backend
+    files:
+      puppet/services/cinder-volume.yaml:
+        parameters:
+          - CinderNfsMountOptions
+          - CinderNfsServers
+          - CinderEnableNfsBackend
+          - CinderEnableIscsiBackend
+    sample_values:
+      CinderEnableNfsBackend: True
+      CinderEnableIscsiBackend: False
+      CinderNfsServers: '192.168.122.1:/export/cinder'
+    description: |
+      Configure and include this environment to enable the use of an NFS
+      share as the backend for Cinder.
+  -
+    name: storage/glance-nfs
+    title: Enable Glance NFS Backend
+    files:
+      puppet/services/glance-api.yaml:
+        parameters:
+          - GlanceBackend
+          - GlanceNfsEnabled
+          - GlanceNfsShare
+          - GlanceNfsOptions
+    sample_values:
+      GlanceBackend: file
+      GlanceNfsEnabled: True
+    static:
+      - GlanceBackend
+      - GlanceNfsEnabled
+    description: |
+      Configure and include this environment to enable the use of an NFS
+      share as the backend for Glance.
+  -
+    name: storage/external-ceph
+    title: Deploy Using an External Ceph Cluster
+    files:
+      puppet/services/nova-compute.yaml:
+        parameters:
+          - NovaRbdPoolName
+          - NovaEnableRbdBackend
+          - CephClientUserName
+      puppet/services/cinder-volume.yaml:
+        parameters:
+          - CinderRbdPoolName
+          - CinderEnableIscsiBackend
+          - CinderEnableRbdBackend
+      puppet/services/glance-api.yaml:
+        parameters:
+          - GlanceRbdPoolName
+          - GlanceBackend
+      puppet/services/gnocchi-api.yaml:
+        parameters:
+          - GnocchiBackend
+      puppet/services/gnocchi-base.yaml:
+        parameters:
+          - GnocchiRbdPoolName
+      puppet/services/ceph-external.yaml:
+        parameters:
+          - CephClusterFSID
+          - CephClientKey
+          - CephExternalMonHost
+          - RbdDefaultFeatures
+      puppet/services/ceph-base.yaml:
+        parameters:
+          - CephAdminKey
+    sample_values:
+      CinderEnableIscsiBackend: False
+      CinderEnableRbdBackend: True
+      NovaEnableRbdBackend: True
+      GlanceBackend: rbd
+      GnocchiBackend: rbd
+      NovaRbdPoolName: vms
+      CinderRbdPoolName: volumes
+      GlanceRbdPoolName: images
+      GnocchiRbdPoolName: metrics
+      CephClientUserName: openstack
+      CephAdminKey: ''
+    description: |
+       A Heat environment file which can be used to enable the
+       use of an externally managed Ceph cluster.
+    resource_registry:
+      OS::TripleO::Services::CephExternal: ../../puppet/services/ceph-external.yaml
+      OS::TripleO::Services::CephMon: OS::Heat::None
+      OS::TripleO::Services::CephClient: OS::Heat::None
+      OS::TripleO::Services::CephOSD: OS::Heat::None
+  -
+    name: storage/cinder-netapp-config
+    title: Enable the Cinder NetApp Backend
+    description: |
+      A Heat environment file which can be used to enable a
+      a Cinder NetApp backend, configured via puppet
+    files:
+      puppet/services/cinder-backend-netapp.yaml:
+        parameters: all
+    static:
+      - CinderEnableNetappBackend
+    resource_registry:
+      OS::TripleO::ControllerExtraConfigPre: ../../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
similarity index 84%
rename from puppet/services/services.yaml
rename to services.yaml
index 0e7b6d2..4d3ca8d 100644 (file)
@@ -1,3 +1,4 @@
+#FIXME move into common when specfile adds it
 heat_template_version: pike
 
 description: >
@@ -115,6 +116,10 @@ outputs:
         yaql:
           expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
           data: {role_data: {get_attr: [ServiceChain, role_data]}}
+      service_workflow_tasks:
+        yaql:
+          expression: $.data.role_data.where($ != null).select($.get('service_workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})
+          data: {role_data: {get_attr: [ServiceChain, role_data]}}
       step_config: {get_attr: [ServiceChain, role_data, step_config]}
       upgrade_tasks:
         yaql:
@@ -127,3 +132,17 @@ outputs:
           expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
           data: {get_attr: [ServiceChain, role_data]}
       service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
+
+      # Keys to support docker/services
+      puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
+      kolla_config:
+        map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
+      docker_config:
+        {get_attr: [ServiceChain, role_data, docker_config]}
+      docker_puppet_tasks:
+        {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+      host_prep_tasks:
+        yaql:
+          # Note we use distinct() here to filter any identical tasks
+          expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+          data: {get_attr: [ServiceChain, role_data]}
index c30101f..8113635 100644 (file)
@@ -6,4 +6,12 @@ Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
 six>=1.9.0 # MIT
 sphinx!=1.6.1,>=1.5.1 # BSD
 oslosphinx>=4.7.0 # Apache-2.0
-reno>=1.8.0 # Apache-2.0
+reno!=2.3.1,>=1.8.0 # Apache-2.0
+coverage!=4.4,>=4.0 # Apache-2.0
+fixtures>=3.0.0 # Apache-2.0/BSD
+python-subunit>=0.0.18 # Apache-2.0/BSD
+testrepository>=0.0.18 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+testtools>=1.4.0 # MIT
+mock>=2.0 # BSD
+oslotest>=1.10.0 # Apache-2.0
index 92d76d2..233ec18 100755 (executable)
@@ -20,7 +20,14 @@ import yaml
 required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
                    'RoleName', 'RoleParameters']
 
+# NOTE(bnemec): The duplication in this list is intentional.  For the
+# transition to generated environments we have two copies of these files,
+# so they need to be listed twice.  Once the deprecated version can be removed
+# the duplicate entries can be as well.
 envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
+                                'tls-endpoints-public-ip.yaml',
+                                'tls-everywhere-endpoints-dns.yaml',
+                                'tls-endpoints-public-dns.yaml',
                                 'tls-endpoints-public-ip.yaml',
                                 'tls-everywhere-endpoints-dns.yaml']
 ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
@@ -32,6 +39,24 @@ OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
 REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
                                           'config_image']
 OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
+# Mapping of parameter names to a list of the fields we should _not_ enforce
+# consistency across files on.  This should only contain parameters whose
+# definition we cannot change for backwards compatibility reasons.  New
+# parameters to the templates should not be added to this list.
+PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
+                                   'ManagementAllocationPools': ['default'],
+                                   'ExternalNetCidr': ['default'],
+                                   'ExternalAllocationPools': ['default'],
+                                   'StorageNetCidr': ['default'],
+                                   'StorageAllocationPools': ['default'],
+                                   'StorageMgmtNetCidr': ['default'],
+                                   'StorageMgmtAllocationPools': ['default'],
+                                   }
+
+PREFERRED_CAMEL_CASE = {
+    'ec2api': 'Ec2Api',
+    'haproxy': 'HAProxy',
+}
 
 
 def exit_usage():
@@ -39,6 +64,11 @@ def exit_usage():
     sys.exit(1)
 
 
+def to_camel_case(string):
+    return PREFERRED_CAMEL_CASE.get(string, ''.join(s.capitalize() or '_' for
+                                                    s in string.split('_')))
+
+
 def get_base_endpoint_map(filename):
     try:
         tpl = yaml.load(open(filename).read())
@@ -163,6 +193,13 @@ def validate_docker_service(filename, tpl):
                         % (key, filename))
                   return 1
 
+            config_volume = puppet_config.get('config_volume')
+            expected_config_image_parameter = "Docker%sConfigImage" % to_camel_case(config_volume)
+            if config_volume and not expected_config_image_parameter in tpl.get('parameters', []):
+                print('ERROR: Missing %s heat parameter for %s config_volume.'
+                      % (expected_config_image_parameter, config_volume))
+                return 1
+
     if 'parameters' in tpl:
         for param in required_params:
             if param not in tpl['parameters']:
@@ -204,7 +241,30 @@ def validate_service(filename, tpl):
     return 0
 
 
-def validate(filename):
+def validate(filename, param_map):
+    """Validate a Heat template
+
+    :param filename: The path to the file to validate
+    :param param_map: A dict which will be populated with the details of the
+                      parameters in the template.  The dict will have the
+                      following structure:
+
+                          {'ParameterName': [
+                               {'filename': ./file1.yaml,
+                                'data': {'description': '',
+                                         'type': string,
+                                         'default': '',
+                                         ...}
+                                },
+                               {'filename': ./file2.yaml,
+                                'data': {'description': '',
+                                         'type': string,
+                                         'default': '',
+                                         ...}
+                                },
+                                ...
+                           ]}
+    """
     print('Validating %s' % filename)
     retval = 0
     try:
@@ -219,12 +279,10 @@ def validate(filename):
 
         # qdr aliases rabbitmq service to provide alternative messaging backend
         if (filename.startswith('./puppet/services/') and
-                filename not in ['./puppet/services/services.yaml',
-                                 './puppet/services/qdr.yaml']):
+                filename not in ['./puppet/services/qdr.yaml']):
             retval = validate_service(filename, tpl)
 
-        if (filename.startswith('./docker/services/') and
-                filename != './docker/services/services.yaml'):
+        if filename.startswith('./docker/services/'):
             retval = validate_docker_service(filename, tpl)
 
         if filename.endswith('hyperconverged-ceph.yaml'):
@@ -235,7 +293,9 @@ def validate(filename):
         return 1
     # yaml is OK, now walk the parameters and output a warning for unused ones
     if 'heat_template_version' in tpl:
-        for p in tpl.get('parameters', {}):
+        for p, data in tpl.get('parameters', {}).items():
+            definition = {'data': data, 'filename': filename}
+            param_map.setdefault(p, []).append(definition)
             if p in required_params:
                 continue
             str_p = '\'%s\'' % p
@@ -255,6 +315,7 @@ exit_val = 0
 failed_files = []
 base_endpoint_map = None
 env_endpoint_maps = list()
+param_map = {}
 
 for base_path in path_args:
     if os.path.isdir(base_path):
@@ -262,7 +323,7 @@ for base_path in path_args:
             for f in files:
                 if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
                     file_path = os.path.join(subdir, f)
-                    failed = validate(file_path)
+                    failed = validate(file_path, param_map)
                     if failed:
                         failed_files.append(file_path)
                     exit_val |= failed
@@ -273,7 +334,7 @@ for base_path in path_args:
                         if env_endpoint_map:
                             env_endpoint_maps.append(env_endpoint_map)
     elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
-        failed = validate(base_path)
+        failed = validate(base_path, param_map)
         if failed:
             failed_files.append(base_path)
         exit_val |= failed
@@ -294,9 +355,9 @@ if base_endpoint_map and \
         else:
             print("%s matches base endpoint map" % env_endpoint_map['file'])
 else:
-    print("ERROR: Can't validate endpoint maps since a file is missing. "
-          "If you meant to delete one of these files you should update this "
-          "tool as well.")
+    print("ERROR: Did not find expected number of environments containing the "
+          "EndpointMap parameter.  If you meant to add or remove one of these "
+          "environments then you also need to update this tool.")
     if not base_endpoint_map:
         failed_files.append(ENDPOINT_MAP_FILE)
     if len(env_endpoint_maps) != len(envs_containing_endpoint_map):
@@ -305,6 +366,34 @@ else:
         failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
     exit_val |= 1
 
+# Validate that duplicate parameters defined in multiple files all have the
+# same definition.
+mismatch_count = 0
+for p, defs in param_map.items():
+    # Nothing to validate if the parameter is only defined once
+    if len(defs) == 1:
+        continue
+    check_data = [d['data'] for d in defs]
+    # Override excluded fields so they don't affect the result
+    exclusions = PARAMETER_DEFINITION_EXCLUSIONS.get(p, [])
+    ex_dict = {}
+    for field in exclusions:
+        ex_dict[field] = 'IGNORED'
+    for d in check_data:
+        d.update(ex_dict)
+    # If all items in the list are not == the first, then the check fails
+    if check_data.count(check_data[0]) != len(check_data):
+        mismatch_count += 1
+        # TODO(bnemec): Make this a hard failure once all the templates have
+        #               been fixed.
+        #exit_val |= 1
+        #failed_files.extend([d['filename'] for d in defs])
+        print('Mismatched parameter definitions found for "%s"' % p)
+        print('Definitions found:')
+        for d in defs:
+            print('  %s:\n    %s' % (d['filename'], d['data']))
+print('Mismatched parameter definitions: %d' % mismatch_count)
+
 if failed_files:
     print('Validation failed on:')
     for f in failed_files:
diff --git a/tox.ini b/tox.ini
index b92e545..c87bf7b 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,12 +1,14 @@
 [tox]
 minversion = 1.6
 skipsdist = True
+envlist = py35,py27,pep8
 
 [testenv]
 usedevelop = True
 install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
+commands = python setup.py testr --slowest --testr-args='{posargs}'
 
 [testenv:venv]
 commands = {posargs}
@@ -22,3 +24,11 @@ commands = python ./tools/process-templates.py
 
 [testenv:releasenotes]
 commands = bash -c tools/releasenotes_tox.sh
+
+[testenv:cover]
+commands = python setup.py test --coverage --coverage-package-name=tripleo_heat_templates --testr-args='{posargs}'
+
+[testenv:genconfig]
+commands =
+           python ./tools/process-templates.py
+           python ./tripleo_heat_templates/environment_generator.py sample-env-generator/
diff --git a/tripleo_heat_templates/__init__.py b/tripleo_heat_templates/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tripleo_heat_templates/environment_generator.py b/tripleo_heat_templates/environment_generator.py
new file mode 100755 (executable)
index 0000000..876dd85
--- /dev/null
@@ -0,0 +1,212 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import os
+import sys
+import yaml
+
+
+_PARAM_FORMAT = u"""  # %(description)s
+  %(mandatory)s# Type: %(type)s
+  %(name)s:%(default)s
+"""
+_STATIC_MESSAGE_START = (
+    '  # ******************************************************\n'
+    '  # Static parameters - these are values that must be\n'
+    '  # included in the environment but should not be changed.\n'
+    '  # ******************************************************\n'
+    )
+_STATIC_MESSAGE_END = ('  # *********************\n'
+                       '  # End static parameters\n'
+                       '  # *********************\n'
+                       )
+_FILE_HEADER = (
+    '# *******************************************************************\n'
+    '# This file was created automatically by the sample environment\n'
+    '# generator. Developers should use `tox -e genconfig` to update it.\n'
+    '# Users are recommended to make changes to a copy of the file instead\n'
+    '# of the original, if any customizations are needed.\n'
+    '# *******************************************************************\n'
+    )
+# Certain parameter names can't be changed, but shouldn't be shown because
+# they are never intended for direct user input.
+_PRIVATE_OVERRIDES = ['server', 'servers', 'NodeIndex', 'DefaultPasswords']
+# Hidden params are not included by default when the 'all' option is used,
+# but can be explicitly included by referencing them in sample_defaults or
+# static.  This allows us to generate sample environments using them when
+# necessary, but they won't be improperly included by accident.
+_HIDDEN_PARAMS = ['EndpointMap', 'RoleName', 'RoleParameters',
+                  'ServiceNetMap',
+                  ]
+
+
+def _create_output_dir(target_file):
+    try:
+        os.makedirs(os.path.dirname(target_file))
+    except OSError as e:
+        if e.errno == errno.EEXIST:
+            pass
+        else:
+            raise
+
+
+def _generate_environment(input_env, parent_env=None):
+    if parent_env is None:
+        parent_env = {}
+    env = dict(parent_env)
+    env.pop('children', None)
+    env.update(input_env)
+    parameter_defaults = {}
+    param_names = []
+    sample_values = env.get('sample_values', {})
+    static_names = env.get('static', [])
+    for template_file, template_data in env['files'].items():
+        with open(template_file) as f:
+            f_data = yaml.safe_load(f)
+            f_params = f_data['parameters']
+            parameter_defaults.update(f_params)
+            if template_data['parameters'] == 'all':
+                new_names = [k for k, v in f_params.items()]
+                for hidden in _HIDDEN_PARAMS:
+                    if (hidden not in (static_names + sample_values.keys()) and
+                            hidden in new_names):
+                        new_names.remove(hidden)
+            else:
+                new_names = template_data['parameters']
+            missing_params = [name for name in new_names
+                              if name not in f_params]
+            if missing_params:
+                raise RuntimeError('Did not find specified parameter names %s '
+                                   'in file %s for environment %s' %
+                                   (missing_params, template_file,
+                                    env['name']))
+            param_names += new_names
+
+    static_defaults = {k: v for k, v in parameter_defaults.items()
+                       if k in param_names and
+                       k in static_names
+                       }
+    parameter_defaults = {k: v for k, v in parameter_defaults.items()
+                          if k in param_names and
+                          k not in _PRIVATE_OVERRIDES and
+                          not k.startswith('_') and
+                          k not in static_names
+                          }
+
+    for k, v in sample_values.items():
+        if k in parameter_defaults:
+            parameter_defaults[k]['sample'] = v
+        if k in static_defaults:
+            static_defaults[k]['sample'] = v
+
+    def write_sample_entry(f, name, value):
+        default = value.get('default')
+        mandatory = ''
+        if default is None:
+            mandatory = ('# Mandatory. This parameter must be set by the '
+                         'user.\n  ')
+            default = '<None>'
+        if value.get('sample') is not None:
+            default = value['sample']
+        # We ultimately cast this to str for output anyway
+        default = str(default)
+        if default == '':
+            default = "''"
+        # If the default value is something like %index%, yaml won't
+        # parse the output correctly unless we wrap it in quotes.
+        # However, not all default values can be wrapped so we need to
+        # do it conditionally.
+        if default.startswith('%'):
+            default = "'%s'" % default
+        if not default.startswith('\n'):
+            default = ' ' + default
+
+        values = {'name': name,
+                  'type': value['type'],
+                  'description':
+                      value.get('description', '').rstrip().replace('\n',
+                                                                    '\n  # '),
+                  'default': default,
+                  'mandatory': mandatory,
+                  }
+        f.write(_PARAM_FORMAT % values + '\n')
+
+    target_file = os.path.join('environments', env['name'] + '.yaml')
+    _create_output_dir(target_file)
+    with open(target_file, 'w') as env_file:
+        env_file.write(_FILE_HEADER)
+        # TODO(bnemec): Once Heat allows the title and description to live in
+        # the environment itself, uncomment these entries and make them
+        # top-level keys in the YAML.
+        env_title = env.get('title', '')
+        env_file.write(u'# title: %s\n' % env_title)
+        env_desc = env.get('description', '')
+        env_file.write(u'# description: |\n')
+        for line in env_desc.splitlines():
+            env_file.write(u'#   %s\n' % line)
+
+        if parameter_defaults:
+            env_file.write(u'parameter_defaults:\n')
+        for name, value in sorted(parameter_defaults.items()):
+            write_sample_entry(env_file, name, value)
+        if static_defaults:
+            env_file.write(_STATIC_MESSAGE_START)
+        for name, value in sorted(static_defaults.items()):
+            write_sample_entry(env_file, name, value)
+        if static_defaults:
+            env_file.write(_STATIC_MESSAGE_END)
+
+        if env.get('resource_registry'):
+            env_file.write(u'resource_registry:\n')
+        for res, value in sorted(env.get('resource_registry', {}).items()):
+            env_file.write(u'  %s: %s\n' % (res, value))
+        print('Wrote sample environment "%s"' % target_file)
+
+    for e in env.get('children', []):
+        _generate_environment(e, env)
+
+
+def generate_environments(config_path):
+    if os.path.isdir(config_path):
+        config_files = os.listdir(config_path)
+        config_files = [os.path.join(config_path, i) for i in config_files
+                        if os.path.splitext(i)[1] == '.yaml']
+    else:
+        config_files = [config_path]
+    for config_file in config_files:
+        print('Reading environment definitions from %s' % config_file)
+        with open(config_file) as f:
+            config = yaml.safe_load(f)
+        for env in config['environments']:
+            _generate_environment(env)
+
+
+def usage(exit_code=1):
+    print('Usage: %s [<filename.yaml> | <directory>]' % sys.argv[0])
+    sys.exit(exit_code)
+
+
+def main():
+    try:
+        config_path = sys.argv[1]
+    except IndexError:
+        usage()
+    generate_environments(config_path)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tripleo_heat_templates/tests/__init__.py b/tripleo_heat_templates/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tripleo_heat_templates/tests/test_environment_generator.py b/tripleo_heat_templates/tests/test_environment_generator.py
new file mode 100644 (file)
index 0000000..94d13c7
--- /dev/null
@@ -0,0 +1,498 @@
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import tempfile
+
+import mock
+from oslotest import base
+import six
+import testscenarios
+
+from tripleo_heat_templates import environment_generator
+
+load_tests = testscenarios.load_tests_apply_scenarios
+
+basic_template = '''
+parameters:
+  FooParam:
+    default: foo
+    description: Foo description
+    type: string
+  BarParam:
+    default: 42
+    description: Bar description
+    type: number
+  EndpointMap:
+    default: {}
+    description: Parameter that should not be included by default
+    type: json
+resources:
+  # None
+'''
+basic_private_template = '''
+parameters:
+  FooParam:
+    default: foo
+    description: Foo description
+    type: string
+  _BarParam:
+    default: 42
+    description: Bar description
+    type: number
+resources:
+  # None
+'''
+mandatory_template = '''
+parameters:
+  FooParam:
+    description: Mandatory param
+    type: string
+resources:
+  # None
+'''
+index_template = '''
+parameters:
+  FooParam:
+    description: Param with %index% as its default
+    type: string
+    default: '%index%'
+resources:
+  # None
+'''
+multiline_template = '''
+parameters:
+  FooParam:
+    description: |
+      Parameter with
+      multi-line description
+    type: string
+    default: ''
+resources:
+  # None
+'''
+
+
+class GeneratorTestCase(base.BaseTestCase):
+    content_scenarios = [
+        ('basic',
+         {'template': basic_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Bar description
+  # Type: number
+  BarParam: 42
+
+  # Foo description
+  # Type: string
+  FooParam: foo
+
+''',
+          }),
+        ('basic-one-param',
+         {'template': basic_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters:
+          - FooParam
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Foo description
+  # Type: string
+  FooParam: foo
+
+''',
+          }),
+        ('basic-static-param',
+         {'template': basic_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+    static:
+      - BarParam
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Foo description
+  # Type: string
+  FooParam: foo
+
+  # ******************************************************
+  # Static parameters - these are values that must be
+  # included in the environment but should not be changed.
+  # ******************************************************
+  # Bar description
+  # Type: number
+  BarParam: 42
+
+  # *********************
+  # End static parameters
+  # *********************
+''',
+          }),
+        ('basic-static-param-sample',
+         {'template': basic_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+    static:
+      - BarParam
+    sample_values:
+      BarParam: 1
+      FooParam: ''
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Foo description
+  # Type: string
+  FooParam: ''
+
+  # ******************************************************
+  # Static parameters - these are values that must be
+  # included in the environment but should not be changed.
+  # ******************************************************
+  # Bar description
+  # Type: number
+  BarParam: 1
+
+  # *********************
+  # End static parameters
+  # *********************
+''',
+          }),
+        ('basic-private',
+         {'template': basic_private_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Foo description
+  # Type: string
+  FooParam: foo
+
+''',
+          }),
+        ('mandatory',
+         {'template': mandatory_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Mandatory param
+  # Mandatory. This parameter must be set by the user.
+  # Type: string
+  FooParam: <None>
+
+''',
+          }),
+        ('basic-sample',
+         {'template': basic_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+    sample_values:
+      FooParam: baz
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Bar description
+  # Type: number
+  BarParam: 42
+
+  # Foo description
+  # Type: string
+  FooParam: baz
+
+''',
+          }),
+        ('basic-resource-registry',
+         {'template': basic_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+    resource_registry:
+      OS::TripleO::FakeResource: fake-filename.yaml
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Bar description
+  # Type: number
+  BarParam: 42
+
+  # Foo description
+  # Type: string
+  FooParam: foo
+
+resource_registry:
+  OS::TripleO::FakeResource: fake-filename.yaml
+''',
+          }),
+        ('basic-hidden',
+         {'template': basic_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+    sample_values:
+      EndpointMap: |-2
+
+            foo: bar
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Bar description
+  # Type: number
+  BarParam: 42
+
+  # Parameter that should not be included by default
+  # Type: json
+  EndpointMap:
+    foo: bar
+
+  # Foo description
+  # Type: string
+  FooParam: foo
+
+''',
+          }),
+        ('missing-param',
+         {'template': basic_template,
+          'exception': RuntimeError,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters:
+          - SomethingNonexistent
+''',
+          'expected_output': None,
+          }),
+        ('percent-index',
+         {'template': index_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Param with %index% as its default
+  # Type: string
+  FooParam: '%index%'
+
+''',
+          }),
+        ('nested',
+         {'template': multiline_template,
+          'exception': None,
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+    children:
+      - name: nested
+        title: Nested Environment
+        description: Nested description
+        sample_values:
+          FooParam: bar
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Parameter with
+  # multi-line description
+  # Type: string
+  FooParam: ''
+
+''',
+          'nested_output': '''# title: Nested Environment
+# description: |
+#   Nested description
+parameter_defaults:
+  # Parameter with
+  # multi-line description
+  # Type: string
+  FooParam: bar
+
+''',
+          }),
+        ('multi-line-desc',
+         {'template': multiline_template,
+          'exception': None,
+          'nested_output': '',
+          'input_file': '''environments:
+  -
+    name: basic
+    title: Basic Environment
+    description: Basic description
+    files:
+      foo.yaml:
+        parameters: all
+''',
+          'expected_output': '''# title: Basic Environment
+# description: |
+#   Basic description
+parameter_defaults:
+  # Parameter with
+  # multi-line description
+  # Type: string
+  FooParam: ''
+
+''',
+          }),
+        ]
+
+    @classmethod
+    def generate_scenarios(cls):
+        cls.scenarios = testscenarios.multiply_scenarios(
+            cls.content_scenarios)
+
+    def test_generator(self):
+        fake_input = io.StringIO(six.text_type(self.input_file))
+        fake_template = io.StringIO(six.text_type(self.template))
+        _, fake_output_path = tempfile.mkstemp()
+        fake_output = open(fake_output_path, 'w')
+        with mock.patch('tripleo_heat_templates.environment_generator.open',
+                        create=True) as mock_open:
+            mock_se = [fake_input, fake_template, fake_output]
+            if self.nested_output:
+                _, fake_nested_output_path = tempfile.mkstemp()
+                fake_nested_output = open(fake_nested_output_path, 'w')
+                fake_template2 = io.StringIO(six.text_type(self.template))
+                mock_se = [fake_input, fake_template, fake_output,
+                           fake_template2, fake_nested_output]
+            mock_open.side_effect = mock_se
+            if not self.exception:
+                environment_generator.generate_environments('ignored.yaml')
+            else:
+                self.assertRaises(self.exception,
+                                  environment_generator.generate_environments,
+                                  'ignored.yaml')
+                return
+        expected = environment_generator._FILE_HEADER + self.expected_output
+        with open(fake_output_path) as f:
+            self.assertEqual(expected, f.read())
+        if self.nested_output:
+            with open(fake_nested_output_path) as f:
+                expected = (environment_generator._FILE_HEADER +
+                            self.nested_output)
+                self.assertEqual(expected, f.read())
+
+GeneratorTestCase.generate_scenarios()