Merge "Add missing release note for cadf environment"
authorJenkins <jenkins@review.openstack.org>
Sat, 20 May 2017 00:08:24 +0000 (00:08 +0000)
committerGerrit Code Review <review@openstack.org>
Sat, 20 May 2017 00:08:24 +0000 (00:08 +0000)
584 files changed:
README.rst
all-nodes-validation.yaml
bindep.txt [new file with mode: 0644]
bootstrap-config.yaml
capabilities-map.yaml
ci/common/net-config-multinode-os-net-config.yaml
ci/common/net-config-multinode.yaml
ci/environments/multinode-3nodes.yaml
ci/environments/multinode-container-upgrade.yaml [new file with mode: 0644]
ci/environments/multinode-core.yaml
ci/environments/multinode.yaml
ci/environments/multinode_major_upgrade.yaml
ci/environments/scenario001-multinode.yaml
ci/environments/scenario002-multinode.yaml
ci/environments/scenario003-multinode.yaml
ci/environments/scenario004-multinode.yaml
ci/pingtests/scenario001-multinode.yaml
ci/pingtests/scenario002-multinode.yaml
ci/pingtests/scenario003-multinode.yaml
ci/pingtests/scenario004-multinode.yaml
ci/pingtests/tenantvm_floatingip.yaml
default_passwords.yaml
deployed-server/README.rst
deployed-server/ctlplane-port.yaml
deployed-server/deployed-neutron-port.yaml
deployed-server/deployed-server-bootstrap-centos.sh
deployed-server/deployed-server-bootstrap-centos.yaml
deployed-server/deployed-server-bootstrap-rhel.sh
deployed-server/deployed-server-bootstrap-rhel.yaml
deployed-server/deployed-server-roles-data.yaml
deployed-server/deployed-server.yaml
deployed-server/scripts/get-occ-config.sh
docker/docker-puppet.py
docker/docker-steps.j2 [new file with mode: 0644]
docker/firstboot/setup_docker_host.sh
docker/firstboot/setup_docker_host.yaml
docker/post-upgrade.j2.yaml [new file with mode: 0644]
docker/post.j2.yaml
docker/services/README.rst
docker/services/aodh-api.yaml [new file with mode: 0644]
docker/services/aodh-evaluator.yaml [new file with mode: 0644]
docker/services/aodh-listener.yaml [new file with mode: 0644]
docker/services/aodh-notifier.yaml [new file with mode: 0644]
docker/services/ceilometer-agent-central.yaml [new file with mode: 0644]
docker/services/ceilometer-agent-compute.yaml [new file with mode: 0644]
docker/services/ceilometer-agent-notification.yaml [new file with mode: 0644]
docker/services/containers-common.yaml [new file with mode: 0644]
docker/services/database/mongodb.yaml
docker/services/database/mysql.yaml
docker/services/database/redis.yaml [new file with mode: 0644]
docker/services/etcd.yaml [new file with mode: 0644]
docker/services/glance-api.yaml
docker/services/gnocchi-api.yaml [new file with mode: 0644]
docker/services/gnocchi-metricd.yaml [new file with mode: 0644]
docker/services/gnocchi-statsd.yaml [new file with mode: 0644]
docker/services/heat-api-cfn.yaml
docker/services/heat-api.yaml
docker/services/heat-engine.yaml
docker/services/ironic-api.yaml
docker/services/ironic-conductor.yaml
docker/services/ironic-pxe.yaml
docker/services/keystone.yaml
docker/services/memcached.yaml
docker/services/mistral-api.yaml
docker/services/mistral-engine.yaml
docker/services/mistral-executor.yaml
docker/services/neutron-api.yaml
docker/services/neutron-dhcp.yaml
docker/services/neutron-l3.yaml
docker/services/neutron-metadata.yaml [new file with mode: 0644]
docker/services/neutron-ovs-agent.yaml
docker/services/neutron-plugin-ml2.yaml
docker/services/nova-api.yaml
docker/services/nova-compute.yaml
docker/services/nova-conductor.yaml
docker/services/nova-ironic.yaml
docker/services/nova-libvirt.yaml
docker/services/nova-metadata.yaml
docker/services/nova-placement.yaml
docker/services/nova-scheduler.yaml
docker/services/panko-api.yaml [new file with mode: 0644]
docker/services/rabbitmq.yaml
docker/services/services.yaml
docker/services/swift-proxy.yaml
docker/services/swift-ringbuilder.yaml
docker/services/swift-storage.yaml
docker/services/zaqar.yaml
environments/cadf.yaml [new file with mode: 0644]
environments/cinder-netapp-config.yaml
environments/cinder-pure-config.yaml [new file with mode: 0644]
environments/collectd-environment.yaml
environments/contrail/contrail-net.yaml
environments/contrail/contrail-nic-config-compute.yaml
environments/contrail/roles_data_contrail.yaml
environments/deployed-server-environment.j2.yaml [new file with mode: 0644]
environments/deployed-server-environment.yaml [deleted file]
environments/deployed-server-pacemaker-environment.yaml
environments/docker-services-tls-everywhere.yaml [new file with mode: 0644]
environments/docker.yaml
environments/enable-internal-tls.yaml
environments/external-loadbalancer-vip-v6.yaml
environments/external-loadbalancer-vip.yaml
environments/fixed-ip-vips-v6.yaml [new file with mode: 0644]
environments/fixed-ip-vips.yaml [new file with mode: 0644]
environments/hyperconverged-ceph.yaml
environments/logging-environment.yaml
environments/major-upgrade-all-in-one.yaml [deleted file]
environments/major-upgrade-aodh-migration.yaml [deleted file]
environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml [deleted file]
environments/major-upgrade-composable-steps-docker.yaml [new file with mode: 0644]
environments/major-upgrade-composable-steps.yaml
environments/major-upgrade-converge-docker.yaml [new file with mode: 0644]
environments/major-upgrade-converge.yaml
environments/major-upgrade-pacemaker-converge.yaml [deleted file]
environments/major-upgrade-pacemaker-init.yaml [deleted file]
environments/major-upgrade-pacemaker.yaml [deleted file]
environments/major-upgrade-remove-sahara.yaml [deleted file]
environments/manila-cephfsnative-config.yaml
environments/network-environment.yaml
environments/neutron-bgpvpn.yaml [new file with mode: 0644]
environments/neutron-l2gw.yaml [new file with mode: 0644]
environments/neutron-ml2-bigswitch.yaml
environments/neutron-ml2-cisco-n1kv.yaml
environments/neutron-ml2-cisco-nexus-ucsm.yaml
environments/neutron-ml2-vpp.yaml [new file with mode: 0644]
environments/neutron-nsx.yaml [new file with mode: 0644]
environments/neutron-nuage-config.yaml
environments/neutron-opendaylight.yaml
environments/nova-api-policy.yaml [new file with mode: 0644]
environments/puppet-pacemaker.yaml
environments/securetty.yaml [new file with mode: 0644]
environments/services-docker/etcd.yaml [new file with mode: 0644]
environments/services-docker/ironic.yaml [new file with mode: 0644]
environments/services-docker/mistral.yaml [new file with mode: 0644]
environments/services-docker/undercloud-aodh.yaml [new file with mode: 0644]
environments/services-docker/undercloud-ceilometer.yaml [new file with mode: 0644]
environments/services-docker/undercloud-gnocchi.yaml [new file with mode: 0644]
environments/services-docker/undercloud-panko.yaml [new file with mode: 0644]
environments/services-docker/zaqar.yaml [new file with mode: 0644]
environments/services/ceilometer-api.yaml [new file with mode: 0644]
environments/services/ceilometer-collector.yaml [new file with mode: 0644]
environments/services/ceilometer-expirer.yaml [new file with mode: 0644]
environments/services/disable-ceilometer-api.yaml [deleted file]
environments/services/keystone_domain_specific_ldap_backend.yaml [new file with mode: 0644]
environments/services/panko.yaml [deleted file]
environments/services/qdr.yaml [new file with mode: 0644]
environments/sshd-banner.yaml
environments/swift-external.yaml [new file with mode: 0644]
environments/undercloud.yaml
environments/updates/update-from-192_0_2-subnet.yaml [new file with mode: 0644]
extraconfig/all_nodes/mac_hostname.j2.yaml
extraconfig/all_nodes/random_string.j2.yaml
extraconfig/all_nodes/swap-partition.j2.yaml
extraconfig/all_nodes/swap.j2.yaml
extraconfig/nova_metadata/krb-service-principals.yaml
extraconfig/post_deploy/default.yaml
extraconfig/post_deploy/example.yaml
extraconfig/post_deploy/example_run_on_update.yaml
extraconfig/post_deploy/undercloud_post.yaml
extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
extraconfig/pre_deploy/rhel-registration/scripts/rhel-registration
extraconfig/pre_network/config_then_reboot.yaml
extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
extraconfig/tasks/aodh_data_migration.sh [deleted file]
extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml [deleted file]
extraconfig/tasks/major_upgrade_check.sh [deleted file]
extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh [deleted file]
extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh [deleted file]
extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh [deleted file]
extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh [deleted file]
extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh [deleted file]
extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh [deleted file]
extraconfig/tasks/major_upgrade_pacemaker.yaml [deleted file]
extraconfig/tasks/major_upgrade_pacemaker_migrations.sh [deleted file]
extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml [deleted file]
extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp [deleted file]
extraconfig/tasks/pacemaker_common_functions.sh
extraconfig/tasks/post_puppet_pacemaker.j2.yaml [moved from extraconfig/tasks/post_puppet_pacemaker.yaml with 53% similarity]
extraconfig/tasks/post_puppet_pacemaker_restart.yaml
extraconfig/tasks/pre_puppet_pacemaker.yaml
extraconfig/tasks/run_puppet.sh
extraconfig/tasks/ssh/host_public_key.yaml [new file with mode: 0644]
extraconfig/tasks/ssh/known_hosts_config.yaml [new file with mode: 0644]
extraconfig/tasks/swift-ring-deploy.yaml [deleted file]
extraconfig/tasks/swift-ring-update.yaml [deleted file]
extraconfig/tasks/tripleo_upgrade_node.sh
extraconfig/tasks/yum_update.sh
extraconfig/tasks/yum_update.yaml
extraconfig/tasks/yum_update_noop.yaml
firstboot/install_vrouter_kmod.yaml
firstboot/os-net-config-mappings.yaml
firstboot/userdata_default.yaml
firstboot/userdata_dev_rsync.yaml
firstboot/userdata_example.yaml
firstboot/userdata_heat_admin.yaml
firstboot/userdata_root_password.yaml
hosts-config.yaml
net-config-bond.yaml
net-config-bridge.yaml
net-config-linux-bridge.yaml
net-config-noop.yaml
net-config-static-bridge-with-external-dhcp.yaml
net-config-static-bridge.yaml
net-config-static.yaml
net-config-undercloud.yaml
network/config/bond-with-vlans/ceph-storage.yaml
network/config/bond-with-vlans/cinder-storage.yaml
network/config/bond-with-vlans/compute-dpdk.yaml
network/config/bond-with-vlans/compute.yaml
network/config/bond-with-vlans/controller-no-external.yaml
network/config/bond-with-vlans/controller-v6.yaml
network/config/bond-with-vlans/controller.yaml
network/config/bond-with-vlans/swift-storage.yaml
network/config/multiple-nics/ceph-storage.yaml
network/config/multiple-nics/cinder-storage.yaml
network/config/multiple-nics/compute-dvr.yaml
network/config/multiple-nics/compute.yaml
network/config/multiple-nics/controller-v6.yaml
network/config/multiple-nics/controller.yaml
network/config/multiple-nics/swift-storage.yaml
network/config/single-nic-linux-bridge-vlans/ceph-storage.yaml
network/config/single-nic-linux-bridge-vlans/cinder-storage.yaml
network/config/single-nic-linux-bridge-vlans/compute.yaml
network/config/single-nic-linux-bridge-vlans/controller-v6.yaml
network/config/single-nic-linux-bridge-vlans/controller.yaml
network/config/single-nic-linux-bridge-vlans/swift-storage.yaml
network/config/single-nic-vlans/ceph-storage.yaml
network/config/single-nic-vlans/cinder-storage.yaml
network/config/single-nic-vlans/compute.yaml
network/config/single-nic-vlans/controller-no-external.yaml
network/config/single-nic-vlans/controller-v6.yaml
network/config/single-nic-vlans/controller.yaml
network/config/single-nic-vlans/swift-storage.yaml
network/endpoints/build_endpoint_map.py
network/endpoints/endpoint_data.yaml
network/endpoints/endpoint_map.yaml
network/external.yaml
network/external_v6.yaml
network/internal_api.yaml
network/internal_api_v6.yaml
network/management.yaml
network/management_v6.yaml
network/networks.j2.yaml [new file with mode: 0644]
network/networks.yaml [deleted file]
network/ports/ctlplane_vip.yaml
network/ports/external.yaml
network/ports/external_from_pool.yaml
network/ports/external_from_pool_v6.yaml
network/ports/external_v6.yaml
network/ports/from_service.yaml
network/ports/from_service_v6.yaml
network/ports/internal_api.yaml
network/ports/internal_api_from_pool.yaml
network/ports/internal_api_from_pool_v6.yaml
network/ports/internal_api_v6.yaml
network/ports/management.yaml
network/ports/management_from_pool.yaml
network/ports/management_from_pool_v6.yaml
network/ports/management_v6.yaml
network/ports/net_ip_list_map.yaml
network/ports/net_ip_map.yaml
network/ports/net_vip_map_external.yaml
network/ports/net_vip_map_external_v6.yaml
network/ports/noop.yaml
network/ports/storage.yaml
network/ports/storage_from_pool.yaml
network/ports/storage_from_pool_v6.yaml
network/ports/storage_mgmt.yaml
network/ports/storage_mgmt_from_pool.yaml
network/ports/storage_mgmt_from_pool_v6.yaml
network/ports/storage_mgmt_v6.yaml
network/ports/storage_v6.yaml
network/ports/tenant.yaml
network/ports/tenant_from_pool.yaml
network/ports/tenant_from_pool_v6.yaml
network/ports/tenant_v6.yaml
network/ports/vip.yaml
network/ports/vip_v6.yaml
network/service_net_map.j2.yaml
network/storage.yaml
network/storage_mgmt.yaml
network/storage_mgmt_v6.yaml
network/storage_v6.yaml
network/tenant.yaml
network/tenant_v6.yaml
network_data.yaml [new file with mode: 0644]
overcloud-resource-registry-puppet.j2.yaml
overcloud.j2.yaml
plan-environment.yaml
puppet/all-nodes-config.yaml
puppet/blockstorage-role.yaml
puppet/cephstorage-role.yaml
puppet/compute-role.yaml
puppet/config.role.j2.yaml
puppet/controller-role.yaml
puppet/deploy-artifacts.yaml
puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
puppet/extraconfig/pre_deploy/compute/nova-nuage.yaml
puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml [deleted file]
puppet/extraconfig/pre_deploy/controller/multiple.yaml
puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
puppet/extraconfig/pre_deploy/default.yaml
puppet/extraconfig/pre_deploy/per_node.yaml
puppet/extraconfig/tls/ca-inject.yaml
puppet/extraconfig/tls/freeipa-enroll.yaml
puppet/extraconfig/tls/tls-cert-inject.yaml
puppet/major_upgrade_steps.j2.yaml
puppet/objectstorage-role.yaml
puppet/post-upgrade.j2.yaml
puppet/post.j2.yaml
puppet/puppet-steps.j2
puppet/role.role.j2.yaml
puppet/services/README.rst
puppet/services/aodh-api.yaml
puppet/services/aodh-base.yaml
puppet/services/aodh-evaluator.yaml
puppet/services/aodh-listener.yaml
puppet/services/aodh-notifier.yaml
puppet/services/apache-internal-tls-certmonger.yaml [deleted file]
puppet/services/apache.yaml
puppet/services/auditd.yaml
puppet/services/barbican-api.yaml
puppet/services/ca-certs.yaml
puppet/services/ceilometer-agent-central.yaml
puppet/services/ceilometer-agent-compute.yaml
puppet/services/ceilometer-agent-ipmi.yaml [new file with mode: 0644]
puppet/services/ceilometer-agent-notification.yaml
puppet/services/ceilometer-api.yaml
puppet/services/ceilometer-base.yaml
puppet/services/ceilometer-collector.yaml
puppet/services/ceilometer-expirer.yaml
puppet/services/ceph-base.yaml
puppet/services/ceph-client.yaml
puppet/services/ceph-external.yaml
puppet/services/ceph-mds.yaml
puppet/services/ceph-mon.yaml
puppet/services/ceph-osd.yaml
puppet/services/ceph-rgw.yaml
puppet/services/certmonger-user.yaml [new file with mode: 0644]
puppet/services/cinder-api.yaml
puppet/services/cinder-backend-dellps.yaml
puppet/services/cinder-backend-dellsc.yaml
puppet/services/cinder-backend-netapp.yaml [new file with mode: 0644]
puppet/services/cinder-backend-pure.yaml [new file with mode: 0644]
puppet/services/cinder-backend-scaleio.yaml
puppet/services/cinder-backup.yaml
puppet/services/cinder-base.yaml
puppet/services/cinder-hpelefthand-iscsi.yaml
puppet/services/cinder-scheduler.yaml
puppet/services/cinder-volume.yaml
puppet/services/congress.yaml
puppet/services/database/mongodb-base.yaml
puppet/services/database/mongodb.yaml
puppet/services/database/mysql-client.yaml
puppet/services/database/mysql-internal-tls-certmonger.yaml [deleted file]
puppet/services/database/mysql.yaml
puppet/services/database/redis-base.yaml
puppet/services/database/redis.yaml
puppet/services/disabled/ceilometer-collector.yaml [new file with mode: 0644]
puppet/services/disabled/ceilometer-expirer.yaml [new file with mode: 0644]
puppet/services/disabled/glance-registry.yaml
puppet/services/docker.yaml [new file with mode: 0644]
puppet/services/ec2-api.yaml
puppet/services/etcd.yaml
puppet/services/external-swift-proxy.yaml [new file with mode: 0644]
puppet/services/glance-api.yaml
puppet/services/glance-base.yaml [deleted file]
puppet/services/gnocchi-api.yaml
puppet/services/gnocchi-base.yaml
puppet/services/gnocchi-metricd.yaml
puppet/services/gnocchi-statsd.yaml
puppet/services/haproxy-internal-tls-certmonger.yaml
puppet/services/haproxy-public-tls-certmonger.yaml
puppet/services/haproxy.yaml
puppet/services/heat-api-cfn.yaml
puppet/services/heat-api-cloudwatch.yaml
puppet/services/heat-api.yaml
puppet/services/heat-base.yaml
puppet/services/heat-engine.yaml
puppet/services/horizon.yaml
puppet/services/ironic-api.yaml
puppet/services/ironic-base.yaml
puppet/services/ironic-conductor.yaml
puppet/services/keepalived.yaml
puppet/services/kernel.yaml
puppet/services/keystone.yaml
puppet/services/logging/fluentd-base.yaml
puppet/services/logging/fluentd-client.yaml
puppet/services/logging/fluentd-config.yaml
puppet/services/manila-api.yaml
puppet/services/manila-backend-cephfs.yaml
puppet/services/manila-backend-generic.yaml
puppet/services/manila-backend-netapp.yaml
puppet/services/manila-base.yaml
puppet/services/manila-scheduler.yaml
puppet/services/manila-share.yaml
puppet/services/memcached.yaml
puppet/services/metrics/collectd.yaml
puppet/services/mistral-api.yaml
puppet/services/mistral-base.yaml
puppet/services/mistral-engine.yaml
puppet/services/mistral-executor.yaml
puppet/services/monitoring/sensu-base.yaml
puppet/services/monitoring/sensu-client.yaml
puppet/services/network/contrail-analytics-database.yaml
puppet/services/network/contrail-analytics.yaml
puppet/services/network/contrail-base.yaml
puppet/services/network/contrail-config.yaml
puppet/services/network/contrail-control.yaml
puppet/services/network/contrail-database.yaml
puppet/services/network/contrail-heat.yaml
puppet/services/network/contrail-neutron-plugin.yaml
puppet/services/network/contrail-provision.yaml
puppet/services/network/contrail-tsn.yaml
puppet/services/network/contrail-vrouter.yaml
puppet/services/network/contrail-webui.yaml
puppet/services/neutron-api.yaml
puppet/services/neutron-base.yaml
puppet/services/neutron-bgpvpn-api.yaml [new file with mode: 0644]
puppet/services/neutron-bigswitch-agent.yaml [new file with mode: 0644]
puppet/services/neutron-compute-plugin-midonet.yaml
puppet/services/neutron-compute-plugin-nuage.yaml
puppet/services/neutron-compute-plugin-ovn.yaml
puppet/services/neutron-compute-plugin-plumgrid.yaml
puppet/services/neutron-dhcp.yaml
puppet/services/neutron-l2gw-agent.yaml [new file with mode: 0644]
puppet/services/neutron-l2gw-api.yaml [new file with mode: 0644]
puppet/services/neutron-l3-compute-dvr.yaml
puppet/services/neutron-l3.yaml
puppet/services/neutron-metadata.yaml
puppet/services/neutron-midonet.yaml
puppet/services/neutron-ovs-agent.yaml
puppet/services/neutron-ovs-dpdk-agent.yaml
puppet/services/neutron-plugin-ml2-fujitsu-cfab.yaml
puppet/services/neutron-plugin-ml2-fujitsu-fossw.yaml
puppet/services/neutron-plugin-ml2-odl.yaml [new file with mode: 0644]
puppet/services/neutron-plugin-ml2-ovn.yaml
puppet/services/neutron-plugin-ml2.yaml
puppet/services/neutron-plugin-nsx.yaml [new file with mode: 0644]
puppet/services/neutron-plugin-nuage.yaml
puppet/services/neutron-plugin-plumgrid.yaml
puppet/services/neutron-sriov-agent.yaml
puppet/services/neutron-vpp-agent.yaml [new file with mode: 0644]
puppet/services/nova-api.yaml
puppet/services/nova-base.yaml
puppet/services/nova-compute.yaml
puppet/services/nova-conductor.yaml
puppet/services/nova-consoleauth.yaml
puppet/services/nova-ironic.yaml
puppet/services/nova-libvirt.yaml
puppet/services/nova-metadata.yaml
puppet/services/nova-placement.yaml
puppet/services/nova-scheduler.yaml
puppet/services/nova-vnc-proxy.yaml
puppet/services/octavia-api.yaml
puppet/services/octavia-base.yaml
puppet/services/octavia-health-manager.yaml
puppet/services/octavia-housekeeping.yaml
puppet/services/octavia-worker.yaml
puppet/services/opendaylight-api.yaml
puppet/services/opendaylight-ovs.yaml
puppet/services/openvswitch-upgrade.yaml [new file with mode: 0644]
puppet/services/ovn-dbs.yaml
puppet/services/pacemaker.yaml
puppet/services/pacemaker/ceph-rbdmirror.yaml
puppet/services/pacemaker/cinder-backup.yaml
puppet/services/pacemaker/cinder-volume.yaml
puppet/services/pacemaker/database/mysql.yaml
puppet/services/pacemaker/database/redis.yaml
puppet/services/pacemaker/haproxy.yaml
puppet/services/pacemaker/manila-share.yaml
puppet/services/pacemaker/rabbitmq.yaml
puppet/services/pacemaker_remote.yaml
puppet/services/panko-api.yaml
puppet/services/panko-base.yaml
puppet/services/qdr.yaml [new file with mode: 0644]
puppet/services/rabbitmq.yaml
puppet/services/releasenotes/notes/mod_ssl-e7fd4db71189242e.yaml [new file with mode: 0644]
puppet/services/sahara-api.yaml
puppet/services/sahara-base.yaml
puppet/services/sahara-engine.yaml
puppet/services/securetty.yaml [new file with mode: 0644]
puppet/services/services.yaml
puppet/services/snmp.yaml
puppet/services/sshd.yaml
puppet/services/swift-base.yaml
puppet/services/swift-proxy.yaml
puppet/services/swift-ringbuilder.yaml
puppet/services/swift-storage.yaml
puppet/services/tacker.yaml
puppet/services/time/ntp.yaml
puppet/services/time/timezone.yaml
puppet/services/tripleo-firewall.yaml
puppet/services/tripleo-packages.yaml
puppet/services/vpp.yaml
puppet/services/zaqar.yaml
puppet/upgrade_config.yaml
releasenotes/notes/Add-Internal-TLS-CA-File-parameter-c24ee13daaa11dfc.yaml [new file with mode: 0644]
releasenotes/notes/Enable-TLS-for-libvirt-0aab48cd8339da0f.yaml [new file with mode: 0644]
releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml [new file with mode: 0644]
releasenotes/notes/add-all-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml [new file with mode: 0644]
releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml [new file with mode: 0644]
releasenotes/notes/add-ceilometer-agent-ipmi-2c86726d0373d354.yaml [new file with mode: 0644]
releasenotes/notes/add-ipv6-diable-options-9aaee219bb87ac6a.yaml [new file with mode: 0644]
releasenotes/notes/add-l2gw-agent-1a2f14a6ceefe362.yaml [new file with mode: 0644]
releasenotes/notes/add-l2gw-api-support-2206d3d14f409088.yaml [new file with mode: 0644]
releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml [new file with mode: 0644]
releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml [new file with mode: 0644]
releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml [new file with mode: 0644]
releasenotes/notes/add-qdr-99a27dffef42c13e.yaml [new file with mode: 0644]
releasenotes/notes/add-support-for-pure-cinder-1a595f1940d5a06f.yaml [new file with mode: 0644]
releasenotes/notes/add_db_sync_timeout-c9b2f401cca0b37d.yaml [new file with mode: 0644]
releasenotes/notes/api-policy-4ca739519537f6f4.yaml [new file with mode: 0644]
releasenotes/notes/big-switch-agent-4c743a2112251234.yaml [new file with mode: 0644]
releasenotes/notes/change-rabbitmq-ha-mode-policy-default-6c6cd7f02181f0e0.yaml [new file with mode: 0644]
releasenotes/notes/configurable-snmpd-options-3954c5858e2c7656.yaml [new file with mode: 0644]
releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml [new file with mode: 0644]
releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml [new file with mode: 0644]
releasenotes/notes/deprecate-ceilometer-expirer-83b193a07631d89d.yaml [new file with mode: 0644]
releasenotes/notes/deprecate-collector-a16e5d58ae00806d.yaml [new file with mode: 0644]
releasenotes/notes/deprecate-panko-b2bdce647d2b9a6d.yaml [new file with mode: 0644]
releasenotes/notes/disable-ceilo-api-dfe5d0947563bbe0.yaml [new file with mode: 0644]
releasenotes/notes/disable-core-dump-for-setuid-programs-e83a2a5da908b9c3.yaml [new file with mode: 0644]
releasenotes/notes/disable-kernel-parameter-for-icmp-redirects-f325f91d71b58b5f.yaml [new file with mode: 0644]
releasenotes/notes/disable-manila-cephfs-snapshots-by-default-d5320a05d9b501cf.yaml [new file with mode: 0644]
releasenotes/notes/disable_default_apache_vhost-f41d11fe07605f7f.yaml [new file with mode: 0644]
releasenotes/notes/docker-service-all-roles-5c22a018caeafcf0.yaml [new file with mode: 0644]
releasenotes/notes/enable-logging-suspicious-packets-d5545586f917d2ca.yaml [new file with mode: 0644]
releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml [new file with mode: 0644]
releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml [new file with mode: 0644]
releasenotes/notes/expose-metric-processing-delay-0c098d7ec0af0728.yaml [new file with mode: 0644]
releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml [new file with mode: 0644]
releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml [new file with mode: 0644]
releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml [new file with mode: 0644]
releasenotes/notes/get-occ-config-local-connector-5bbec3f591a9f311.yaml [new file with mode: 0644]
releasenotes/notes/glance-keystonev3-d35182ba9a3778eb.yaml [new file with mode: 0644]
releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml [new file with mode: 0644]
releasenotes/notes/ha-by-default-55326e699ee8602c.yaml [deleted file]
releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml [new file with mode: 0644]
releasenotes/notes/ironic-boot-option-3f3036aa5e82ec7e.yaml [new file with mode: 0644]
releasenotes/notes/ironic-hardware-types-fe5140549d3bb792.yaml [new file with mode: 0644]
releasenotes/notes/ironic-neutron-integration-76c4f9e0d10785e4.yaml [new file with mode: 0644]
releasenotes/notes/leave-satellite-repo-enabled-8b60528bd5450c7b.yaml [new file with mode: 0644]
releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml [new file with mode: 0644]
releasenotes/notes/match-enable_dvr-with-NeutronEnableDVR-fe8aac6c4ce52bce.yaml [new file with mode: 0644]
releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml [new file with mode: 0644]
releasenotes/notes/nsx-support-1254839718d8df8c.yaml [new file with mode: 0644]
releasenotes/notes/octavia-1687026-c01313aab53f55a4.yaml [new file with mode: 0644]
releasenotes/notes/ovn-fcd4b0168e6745a8.yaml [new file with mode: 0644]
releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml [new file with mode: 0644]
releasenotes/notes/pluggable-server-type-per-role-314f38f8e5d4c84e.yaml [new file with mode: 0644]
releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml [new file with mode: 0644]
releasenotes/notes/restrict-access-to-kernel-message-buffer-809160674b92a073.yaml [new file with mode: 0644]
releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml [new file with mode: 0644]
releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml [new file with mode: 0644]
releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml [new file with mode: 0644]
releasenotes/notes/service-role-name-0b8609d314564885.yaml [new file with mode: 0644]
releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml [new file with mode: 0644]
releasenotes/notes/snmp_listen-2364188f73d43b14.yaml [new file with mode: 0644]
releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml [new file with mode: 0644]
releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml [new file with mode: 0644]
releasenotes/notes/sshd-service-extensions-0c4d0879942a2052.yaml [new file with mode: 0644]
releasenotes/notes/stack-name-input-73f4d4d052f1377e.yaml [new file with mode: 0644]
releasenotes/notes/swap-prepuppet-and-postpuppet-to-preconfig-and-postconfig-debd5f28bc578d51.yaml [new file with mode: 0644]
releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml [new file with mode: 0644]
releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml [new file with mode: 0644]
releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml [new file with mode: 0644]
releasenotes/notes/upgrade-stack-action-94598796a9d3511f.yaml [new file with mode: 0644]
releasenotes/notes/vpp-ml2-8e115f7763510531.yaml [new file with mode: 0644]
releasenotes/notes/zaqar-httpd-e7d91bf396da28d0.yaml [new file with mode: 0644]
releasenotes/source/conf.py
requirements.txt
roles_data.yaml
roles_data_undercloud.yaml
scripts/hosts-config.sh
setup.py
tools/process-templates.py
tools/yaml-nic-config-2-script.py
tools/yaml-validate.py
tox.ini
validation-scripts/all-nodes.sh

index 68fdd0e..4eed715 100644 (file)
@@ -66,7 +66,7 @@ and should be executed according to the following table:
 +================+=============+=============+=============+=============+=================+
 | keystone       |      X      |      X      |      X      |      X      |        X        |
 +----------------+-------------+-------------+-------------+-------------+-----------------+
-| glance         |    file     |    swift    |    file     |    file     |      swift      |
+| glance         |    rbd      |    swift    |    file     | swift + rbd |      swift      |
 +----------------+-------------+-------------+-------------+-------------+-----------------+
 | cinder         |     rbd     |    iscsi    |             |             |      iscsi      |
 +----------------+-------------+-------------+-------------+-------------+-----------------+
@@ -76,6 +76,8 @@ and should be executed according to the following table:
 +----------------+-------------+-------------+-------------+-------------+-----------------+
 | neutron        |     ovs     |     ovs     |     ovs     |     ovs     |        X        |
 +----------------+-------------+-------------+-------------+-------------+-----------------+
+| neutron-bgpvpn |             |             |             |      X      |                 |
++----------------+-------------+-------------+-------------+-------------+-----------------+
 | rabbitmq       |      X      |      X      |      X      |      X      |        X        |
 +----------------+-------------+-------------+-------------+-------------+-----------------+
 | mongodb        |      X      |      X      |             |             |                 |
@@ -128,3 +130,9 @@ and should be executed according to the following table:
 +----------------+-------------+-------------+-------------+-------------+-----------------+
 | manila         |             |             |             |      X      |                 |
 +----------------+-------------+-------------+-------------+-------------+-----------------+
+| collectd       |      X      |             |             |             |                 |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| fluentd        |      X      |             |             |             |                 |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| sensu-client   |      X      |             |             |             |                 |
++----------------+-------------+-------------+-------------+-------------+-----------------+
index 65d01d0..52cd6ac 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Software Config to drive validations that occur on all nodes.
@@ -10,6 +10,14 @@ parameters:
     default: ''
     description: A string containing a space separated list of IP addresses used to ping test each available network interface.
     type: string
+  ValidateFqdn:
+    default: false
+    description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.
+    type: boolean
+  ValidateNtp:
+    default: true
+    description: Validation to ensure at least one time source is accessible.
+    type: boolean
 
 resources:
   AllNodesValidationsImpl:
@@ -19,6 +27,10 @@ resources:
       inputs:
         - name: ping_test_ips
           default: {get_param: PingTestIps}
+        - name: validate_fqdn
+          default: {get_param: ValidateFqdn}
+        - name: validate_ntp
+          default: {get_param: ValidateNtp}
       config: {get_file: ./validation-scripts/all-nodes.sh}
 
 outputs:
diff --git a/bindep.txt b/bindep.txt
new file mode 100644 (file)
index 0000000..4f9b425
--- /dev/null
@@ -0,0 +1,2 @@
+# This is a cross-platform list tracking distribution packages needed by tests;
+# see http://docs.openstack.org/infra/bindep/ for additional information.
index a3fdee9..8e8a2a7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Bootstrap Config'
 
 parameters:
index 26ed7f2..1fe7790 100644 (file)
@@ -2,12 +2,6 @@
 # repository for deployment using puppet. It groups configuration by topic,
 # describes possible combinations of environments and resource capabilities.
 
-# root_template: identifies repository's root template
-# root_environment: identifies root_environment, this one is special in terms of
-#   order in which the environments are merged before deploying. This one serves as
-#   a base and it's parameters/resource_registry gets overridden by other environments
-#   if used.
-
 # topics:
 # High Level grouping by purpose of environments
 # Attributes:
@@ -38,8 +32,6 @@
 # only when that given environment is used. (resource_type of that environment can
 # be implemented using multiple templates).
 
-root_template: overcloud.yaml
-root_environment: overcloud-resource-registry-puppet.yaml
 topics:
   - title: Base Resources Configuration
     description:
@@ -308,6 +300,11 @@ topics:
         description: >
           Enable various Neutron plugins and backends
         environments:
+          - file: environments/neutron-bgpvpn.yaml
+            title: Neutron BGPVPN Service Plugin
+            description: Enables Neutron BGPVPN Service Plugin
+            requires:
+              - overcloud-resource-registry-puppet.yaml
           - file: environments/neutron-ml2-bigswitch.yaml
             title: BigSwitch Extensions
             description: >
@@ -365,6 +362,16 @@ topics:
             description: Enable FOS in the overcloud
             requires:
               - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-nsx.yaml
+            title: Deploy NSX Services
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-l2gw.yaml
+            title: Neutron L2 gateway Service Plugin
+            description: Enables Neutron L2 gateway Service Plugin and Agent
+            requires:
+              - overcloud-resource-registry-puppet.yaml
 
   - title: Nova Extensions
     description:
@@ -397,6 +404,11 @@ topics:
         description: >
           Enable various Cinder backends
         environments:
+          - file: environments/cinder-pure-config.yaml
+            title: Cinder Pure Storage FlashArray backend
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
           - file: environments/cinder-netapp-config.yaml
             title: Cinder NetApp backend
             description:
@@ -542,7 +554,7 @@ topics:
         description: Enable monitoring agents
         environments:
           - file: environments/monitoring-environment.yaml
-            title: enable monitoring agents
+            title: Enable monitoring agents
             description:
             requires:
               - overcloud-resource-registry-puppet.yaml
@@ -554,6 +566,14 @@ topics:
             description:
             requires:
               - overcloud-resource-registry-puppet.yaml
+      - title: Performance monitoring
+        description: Enable performance monitoring agents
+        environments:
+          - file: environments/collectd-environment.yaml
+            title: Enable performance monitoring agents
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
 
   - title: Security Options
     description: Security Hardening Options
@@ -582,3 +602,13 @@ topics:
             description:
             requires:
               - overcloud-resource-registry-puppet.yaml
+      - title: Keystone CADF auditing
+        description: Enable CADF notifications in Keystone for auditing
+        environments:
+          - file: environments/cadf.yaml
+            title: Keystone CADF auditing
+      - title: SecureTTY Values
+        description: Set values within /etc/securetty
+        environments:
+          - file: environments/securetty.yaml
+            title: SecureTTY Values
index 8c50b64..6f4542b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Software Config to drive os-net-config for a simple bridge configured
index dc31235..f7e250e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Software Config to drive os-net-config for a simple bridge configured
index d6e2376..ef51a77 100644 (file)
@@ -24,7 +24,7 @@
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CinderApi
     - OS::TripleO::Services::CinderScheduler
-    - OS::TripleO::Services::Core
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
@@ -56,6 +56,7 @@
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Sshd
 
 - name: Controller
   CountDefault: 1
@@ -63,7 +64,7 @@
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CinderBackup
     - OS::TripleO::Services::CinderVolume
-    - OS::TripleO::Services::Core
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::MySQL
     - OS::TripleO::Services::MySQLClient
@@ -77,3 +78,4 @@
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::Sshd
diff --git a/ci/environments/multinode-container-upgrade.yaml b/ci/environments/multinode-container-upgrade.yaml
new file mode 100644 (file)
index 0000000..8997041
--- /dev/null
@@ -0,0 +1,70 @@
+# NOTE: This is an environment specific for containers upgrade
+# CI. Mainly we deploy non-pacemakerized overcloud, as at the time
+# being containerization of services managed by pacemaker is not
+# complete, so we deploy and upgrade the non-HA services for now.
+
+resource_registry:
+  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+
+  # NOTE: This is needed because of upgrades from Ocata to Pike. We
+  # deploy the initial environment with Ocata templates, and
+  # overcloud-resource-registry.yaml there doesn't have this Docker
+  # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
+  # remove this.
+  OS::TripleO::Services::Docker: OS::Heat::None
+
+parameter_defaults:
+  ControllerServices:
+    - OS::TripleO::Services::CephMon
+    - OS::TripleO::Services::CephOSD
+    - OS::TripleO::Services::CinderApi
+    - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::HeatApi
+    - OS::TripleO::Services::HeatApiCfn
+    - OS::TripleO::Services::HeatApiCloudwatch
+    - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronMetadataAgent
+    - OS::TripleO::Services::NeutronServer
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::HAproxy
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::SwiftStorage
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoPackages
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::Sshd
+  ControllerExtraConfig:
+    nova::compute::libvirt::services::libvirt_virt_type: qemu
+    nova::compute::libvirt::libvirt_virt_type: qemu
+    # Required for Centos 7.3 and Qemu 2.6.0
+    nova::compute::libvirt::libvirt_cpu_mode: 'none'
+    #NOTE(gfidente): not great but we need this to deploy on ext4
+    #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+    ceph::profile::params::osd_max_object_name_len: 256
+    ceph::profile::params::osd_max_object_namespace_len: 64
+  SwiftCeilometerPipelineEnabled: False
+  Debug: True
index 0c07a1b..b5316f1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Core Service
index c946ec8..20e37e3 100644 (file)
@@ -7,8 +7,8 @@ resource_registry:
   OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
   OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
   OS::TripleO::Services::Keepalived: OS::Heat::None
-  OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
-  OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
   OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
 
 parameter_defaults:
@@ -18,6 +18,7 @@ parameter_defaults:
     - OS::TripleO::Services::CinderApi
     - OS::TripleO::Services::CinderScheduler
     - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
@@ -51,6 +52,7 @@ parameter_defaults:
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::Sshd
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
index 2251cc0..609e06f 100644 (file)
@@ -7,13 +7,14 @@ resource_registry:
   OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
   OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
   OS::TripleO::Services::Keepalived: OS::Heat::None
-  OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
-  OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
   OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
 
 parameter_defaults:
   ControllerServices:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
@@ -55,6 +56,7 @@ parameter_defaults:
     - OS::TripleO::Services::NovaLibvirt
     - OS::TripleO::Services::Pacemaker
     - OS::TripleO::Services::Horizon
+    - OS::TripleO::Services::Sshd
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
index a6f3571..437d7c3 100644 (file)
@@ -16,12 +16,15 @@ resource_registry:
   OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
   OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
   OS::TripleO::Services::Keepalived: OS::Heat::None
-  OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
-  OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
   OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+  OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
+  OS::TripleO::Services::SensuClient: ../../puppet/services/monitoring/sensu-client.yaml
 
 parameter_defaults:
   ControllerServices:
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
@@ -50,6 +53,7 @@ parameter_defaults:
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
@@ -59,10 +63,8 @@ parameter_defaults:
     - OS::TripleO::Services::AodhEvaluator
     - OS::TripleO::Services::AodhNotifier
     - OS::TripleO::Services::AodhListener
-    - OS::TripleO::Services::CeilometerApi
-    - OS::TripleO::Services::CeilometerCollector
-    - OS::TripleO::Services::CeilometerExpirer
     - OS::TripleO::Services::CeilometerAgentCentral
+    - OS::TripleO::Services::CeilometerAgentIpmi
     - OS::TripleO::Services::CeilometerAgentNotification
     - OS::TripleO::Services::GnocchiApi
     - OS::TripleO::Services::GnocchiMetricd
@@ -80,6 +82,9 @@ parameter_defaults:
     - OS::TripleO::Services::Congress
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::SensuClient
+
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
@@ -113,3 +118,17 @@ parameter_defaults:
     ******************************************************************
   CollectdExtraPlugins:
     - rrdtool
+  LoggingServers:
+    - host: 127.0.0.1
+      port: 24224
+  MonitoringRabbitHost: 127.0.0.1
+  MonitoringRabbitPort: 5676
+  MonitoringRabbitPassword: sensu
+  TtyValues:
+    - console
+    - tty1
+    - tty2
+    - tty3
+    - tty4
+    - tty5
+    - tty6
index cbcfa9b..e3ecf74 100644 (file)
@@ -11,12 +11,13 @@ resource_registry:
   OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
   OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
   OS::TripleO::Services::Keepalived: OS::Heat::None
-  OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
-  OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
   OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
 
 parameter_defaults:
   ControllerServices:
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
@@ -60,6 +61,7 @@ parameter_defaults:
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::Sshd
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
index 6e926f7..d1c8bc1 100644 (file)
@@ -11,12 +11,13 @@ resource_registry:
   OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
   OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
   OS::TripleO::Services::Keepalived: OS::Heat::None
-  OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
-  OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
   OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
 
 parameter_defaults:
   ControllerServices:
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
@@ -54,6 +55,7 @@ parameter_defaults:
     - OS::TripleO::Services::MistralExecutor
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::Sshd
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
index dc05ab4..24fb2bf 100644 (file)
@@ -12,9 +12,10 @@ resource_registry:
   OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
   OS::TripleO::Services::ManilaShare: ../../puppet/services/pacemaker/manila-share.yaml
   OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+  OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
   # These enable Pacemaker
-  OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
-  OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
   OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
   OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
   OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
@@ -30,6 +31,7 @@ parameter_defaults:
     - OS::TripleO::Services::CephMon
     - OS::TripleO::Services::CephOSD
     - OS::TripleO::Services::CephRgw
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
@@ -39,6 +41,7 @@ parameter_defaults:
     - OS::TripleO::Services::HeatEngine
     - OS::TripleO::Services::MySQL
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronBgpVpnApi
     - OS::TripleO::Services::NeutronDhcpAgent
     - OS::TripleO::Services::NeutronL3Agent
     - OS::TripleO::Services::NeutronMetadataAgent
@@ -66,6 +69,7 @@ parameter_defaults:
     - OS::TripleO::Services::NovaLibvirt
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::Sshd
   ControllerExtraConfig:
     nova::compute::libvirt::services::libvirt_virt_type: qemu
     nova::compute::libvirt::libvirt_virt_type: qemu
@@ -83,3 +87,5 @@ parameter_defaults:
   CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
   CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
   SwiftCeilometerPipelineEnabled: false
+  NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+  BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
index 2651c0d..141a389 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HOT template to created resources deployed by scenario001.
@@ -128,21 +128,19 @@ resources:
       ram: 512
       vcpus: 1
 
-# Disabling this resource now
-# https://bugs.launchpad.net/tripleo/+bug/1646506
-#  gnocchi_res_alarm:
-#    type: OS::Aodh::GnocchiResourcesAlarm
-#    properties:
-#      description: Do stuff with gnocchi
-#      metric: cpu_util
-#      aggregation_method: mean
-#      granularity: 60
-#      evaluation_periods: 1
-#      threshold: 50
-#      alarm_actions: []
-#      resource_type: instance
-#      resource_id: { get_resource: server1 }
-#      comparison_operator: gt
+  gnocchi_res_alarm:
+    type: OS::Aodh::GnocchiResourcesAlarm
+    properties:
+      description: Do stuff with gnocchi
+      metric: cpu_util
+      aggregation_method: mean
+      granularity: 60
+      evaluation_periods: 1
+      threshold: 50
+      alarm_actions: []
+      resource_type: instance
+      resource_id: { get_resource: server1 }
+      comparison_operator: gt
 
   asg:
     type: OS::Heat::AutoScalingGroup
index da1ae60..72aac4c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HOT template to created resources deployed by scenario002.
index c3ceada..7685cfa 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HOT template to created resources deployed by scenario003.
index ebdfea1..8ba60e2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HOT template to created resources deployed by scenario004.
@@ -123,10 +123,13 @@ resources:
     properties:
       name: default
       driver_handles_share_servers: false
+      snapshot_support: false
 
   manila_share:
     type: OS::Manila::Share
     properties:
+      name: pingtest
+      share_type: { get_resource: manila_share_type }
       share_protocol: CEPHFS
       size: 1
 
index b910d6c..28b0911 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   This template resides in tripleo-ci for Mitaka CI jobs only.
index c85881e..04828d9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Passwords we manage at the top level
 
index e4d8299..8638818 100644 (file)
@@ -67,11 +67,11 @@ example:
 parameter_defaults:
   ControlPlaneDefaultRoute: 192.168.122.130
   ControlPlaneSubnetCidr: "24"
-  EC2MetadataIp: "192.0.2.1"
+  EC2MetadataIp: "192.168.24.1"
 
 In this example, 192.168.122.130 is the external management IP of an
 undercloud, thus it is the default route for the configured local_ip value of
-192.0.2.1.
+192.168.24.1.
 
 
 os-collect-config
index 7b5cdf1..8cd22ca 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 parameters:
   network:
index bddf8bc..861ee91 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: "
   A fake OS::Neutron::Port stack which outputs fixed_ips and subnets based on
index 7266ca5..6f2bb12 100644 (file)
@@ -8,9 +8,13 @@ yum install -y \
     openstack-puppet-modules \
     os-net-config \
     openvswitch \
-    python-heat-agent*
+    python-heat-agent* \
+    openstack-selinux
 
 ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
 
 setenforce 0
 sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config
+
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
index c1740d7..5b26823 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: 'Deployed Server Bootstrap Config'
 
index 36ff007..9e9e9b3 100644 (file)
@@ -8,6 +8,10 @@ yum install -y \
     openstack-puppet-modules \
     os-net-config \
     openvswitch \
-    python-heat-agent*
+    python-heat-agent* \
+    openstack-selinux
 
 ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
+
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
index 2d2f515..a901851 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: 'Deployed Server Bootstrap Config'
 
index 04da556..084c2f8 100644 (file)
@@ -26,6 +26,7 @@
   disable_constraints: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephMon
     - OS::TripleO::Services::CephExternal
     - OS::TripleO::Services::CephRgw
   disable_constraints: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephClient
     - OS::TripleO::Services::CephExternal
     - OS::TripleO::Services::Timezone
   disable_constraints: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::BlockStorageCinderVolume
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
   disable_constraints: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::SwiftStorage
   disable_constraints: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephOSD
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
index 1e8afb2..0847bfb 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 parameters:
   image:
     type: string
@@ -81,6 +81,7 @@ resources:
   InstanceIdDeployment:
     type: OS::Heat::StructuredDeployment
     properties:
+      name: InstanceIdDeployment
       config: {get_resource: InstanceIdConfig}
       server: {get_resource: deployed-server}
     depends_on: UpgradeInitDeployment
@@ -103,6 +104,7 @@ resources:
   HostsEntryDeployment:
     type: OS::Heat::SoftwareDeployment
     properties:
+      name: HostsEntryDeployment
       config: {get_resource: HostsEntryConfig}
       server: {get_resource: deployed-server}
 
index 6c196f9..d79121b 100755 (executable)
@@ -12,6 +12,7 @@ CEPHSTORAGE_HOSTS=${CEPHSTORAGE_HOSTS:-""}
 SUBNODES_SSH_KEY=${SUBNODES_SSH_KEY:-"~/.ssh/id_rsa"}
 SSH_OPTIONS="-tt -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=Verbose -o PasswordAuthentication=no -o ConnectionAttempts=32"
 OVERCLOUD_ROLES=${OVERCLOUD_ROLES:-"Controller Compute BlockStorage ObjectStorage CephStorage"}
+STACK_NAME=${STACK_NAME:-"overcloud"}
 
 # Set the _hosts vars for the default roles based on the old var names that
 # were all caps for backwards compatibility.
@@ -53,17 +54,17 @@ function check_stack {
 
 
 for role in $OVERCLOUD_ROLES; do
-    while ! check_stack overcloud; do
+    while ! check_stack $STACK_NAME; do
         sleep $SLEEP_TIME
     done
 
-    rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
+    rg_stack=$(openstack stack resource show $STACK_NAME $role -c physical_resource_id -f value)
     while ! check_stack $rg_stack; do
         sleep $SLEEP_TIME
-        rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
+        rg_stack=$(openstack stack resource show $STACK_NAME $role -c physical_resource_id -f value)
     done
 
-    stacks=$(openstack stack resource list $rg_stack -c physical_resource_id -f value)
+    stacks=$(openstack stack resource list $rg_stack -c resource_name -c physical_resource_id -f json | jq -r "sort_by(.resource_name) | .[] | .physical_resource_id")
 
     i=0
 
@@ -89,16 +90,16 @@ for role in $OVERCLOUD_ROLES; do
         done
 
         echo "======================"
-        echo "$role$i os-collect-config.conf configuration:"
+        echo "$role$i deployed-server.json configuration:"
 
-        config="
-[DEFAULT]
-collectors=request
-command=os-refresh-config
-polling_interval=30
-
-[request]
-metadata_url=$deployed_server_metadata_url"
+        config="{
+  \"os-collect-config\": {
+    \"collectors\": [\"request\", \"local\"],
+    \"request\": {
+      \"metadata_url\": \"$deployed_server_metadata_url\"
+    }
+  }
+}"
 
         echo "$config"
         echo "======================"
@@ -108,12 +109,11 @@ metadata_url=$deployed_server_metadata_url"
         host=
         eval host=\${${role}_hosts_a[i]}
         if [ -n "$host" ]; then
-            # Delete the os-collect-config.conf template so our file won't get
-            # overwritten
-            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo /bin/rm -f /usr/libexec/os-apply-config/templates/etc/os-collect-config.conf
-            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host "echo \"$config\" > os-collect-config.conf"
-            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo cp os-collect-config.conf /etc/os-collect-config.conf
-            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl restart os-collect-config
+            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host "echo '$config' > deployed-server.json"
+            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo mkdir -p -m 0700 /var/lib/os-collect-config/local-data/ || true
+            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo cp deployed-server.json /var/lib/os-collect-config/local-data/deployed-server.json
+            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl start os-collect-config
+            ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl enable os-collect-config
         fi
 
         let i+=1
index 157bf63..49dd00c 100755 (executable)
 # inside of a container.
 
 import json
+import logging
 import os
 import subprocess
 import sys
 import tempfile
 import multiprocessing
 
+log = logging.getLogger()
+log.setLevel(logging.DEBUG)
+ch = logging.StreamHandler(sys.stdout)
+ch.setLevel(logging.DEBUG)
+formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
+ch.setFormatter(formatter)
+log.addHandler(ch)
 
 # this is to match what we do in deployed-server
 def short_hostname():
@@ -36,39 +44,47 @@ def short_hostname():
 
 
 def pull_image(name):
-    print('Pulling image: %s' % name)
+    log.info('Pulling image: %s' % name)
     subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
     cmd_stdout, cmd_stderr = subproc.communicate()
-    print(cmd_stdout)
-    print(cmd_stderr)
+    if cmd_stdout:
+        log.debug(cmd_stdout)
+    if cmd_stderr:
+        log.debug(cmd_stderr)
 
 
 def rm_container(name):
     if os.environ.get('SHOW_DIFF', None):
-        print('Diffing container: %s' % name)
+        log.info('Diffing container: %s' % name)
         subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
         cmd_stdout, cmd_stderr = subproc.communicate()
-        print(cmd_stdout)
-        print(cmd_stderr)
+        if cmd_stdout:
+            log.debug(cmd_stdout)
+        if cmd_stderr:
+            log.debug(cmd_stderr)
 
-    print('Removing container: %s' % name)
+    log.info('Removing container: %s' % name)
     subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
     cmd_stdout, cmd_stderr = subproc.communicate()
-    print(cmd_stdout)
-    print(cmd_stderr)
+    if cmd_stdout:
+        log.debug(cmd_stdout)
+    if cmd_stderr and \
+           cmd_stderr != 'Error response from daemon: ' \
+           'No such container: {}\n'.format(name):
+        log.debug(cmd_stderr)
 
 process_count = int(os.environ.get('PROCESS_COUNT',
                                    multiprocessing.cpu_count()))
 
+log.info('Running docker-puppet')
 config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
-print('docker-puppet')
-print('CONFIG: %s' % config_file)
+log.debug('CONFIG: %s' % config_file)
 with open(config_file) as f:
     json_data = json.load(f)
 
@@ -105,16 +121,15 @@ for service in (json_data or []):
     if not manifest or not config_image:
         continue
 
-    print('---------')
-    print('config_volume %s' % config_volume)
-    print('puppet_tags %s' % puppet_tags)
-    print('manifest %s' % manifest)
-    print('config_image %s' % config_image)
-    print('volumes %s' % volumes)
+    log.debug('config_volume %s' % config_volume)
+    log.debug('puppet_tags %s' % puppet_tags)
+    log.debug('manifest %s' % manifest)
+    log.debug('config_image %s' % config_image)
+    log.debug('volumes %s' % volumes)
     # We key off of config volume for all configs.
     if config_volume in configs:
         # Append puppet tags and manifest.
-        print("Existing service, appending puppet tags and manifest\n")
+        log.info("Existing service, appending puppet tags and manifest")
         if puppet_tags:
             configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
                                                    puppet_tags)
@@ -122,24 +137,22 @@ for service in (json_data or []):
             configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
                                                     manifest)
         if configs[config_volume][3] != config_image:
-            print("WARNING: Config containers do not match even though"
-                  " shared volumes are the same!\n")
+            log.warn("Config containers do not match even though"
+                     " shared volumes are the same!")
     else:
-        print("Adding new service\n")
+        log.info("Adding new service")
         configs[config_volume] = service
 
-print('Service compilation completed.\n')
+log.info('Service compilation completed.')
 
 def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
 
-    print('---------')
-    print('config_volume %s' % config_volume)
-    print('puppet_tags %s' % puppet_tags)
-    print('manifest %s' % manifest)
-    print('config_image %s' % config_image)
-    print('volumes %s' % volumes)
-    hostname = short_hostname()
-    sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume
+    log.debug('config_volume %s' % config_volume)
+    log.debug('puppet_tags %s' % puppet_tags)
+    log.debug('manifest %s' % manifest)
+    log.debug('config_image %s' % config_image)
+    log.debug('volumes %s' % volumes)
+    sh_script = '/var/lib/docker-puppet/docker-puppet.sh'
 
     with open(sh_script, 'w') as script_file:
         os.chmod(script_file.name, 0755)
@@ -148,43 +161,40 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
         mkdir -p /etc/puppet
         cp -a /tmp/puppet-etc/* /etc/puppet
         rm -Rf /etc/puppet/ssl # not in use and causes permission errors
-        echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json
+        echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker.json
         TAGS=""
-        if [ -n "%(puppet_tags)s" ]; then
-            TAGS='--tags "%(puppet_tags)s"'
+        if [ -n "$PUPPET_TAGS" ]; then
+            TAGS="--tags \"$PUPPET_TAGS\""
         fi
-        FACTER_hostname=%(hostname)s FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
+        FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
 
         # Disables archiving
-        if [ -z "%(no_archive)s" ]; then
-            rm -Rf /var/lib/config-data/%(name)s
+        if [ -z "$NO_ARCHIVE" ]; then
+            rm -Rf /var/lib/config-data/${NAME}
 
             # copying etc should be enough for most services
-            mkdir -p /var/lib/config-data/%(name)s/etc
-            cp -a /etc/* /var/lib/config-data/%(name)s/etc/
+            mkdir -p /var/lib/config-data/${NAME}/etc
+            cp -a /etc/* /var/lib/config-data/${NAME}/etc/
 
             if [ -d /root/ ]; then
-              cp -a /root/ /var/lib/config-data/%(name)s/root/
+              cp -a /root/ /var/lib/config-data/${NAME}/root/
             fi
             if [ -d /var/lib/ironic/tftpboot/ ]; then
-              mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
-              cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/tftpboot/
+              mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
+              cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/tftpboot/
             fi
             if [ -d /var/lib/ironic/httpboot/ ]; then
-              mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
-              cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/httpboot/
+              mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
+              cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/httpboot/
             fi
 
             # apache services may files placed in /var/www/
             if [ -d /var/www/ ]; then
-             mkdir -p /var/lib/config-data/%(name)s/var/www
-             cp -a /var/www/* /var/lib/config-data/%(name)s/var/www/
+             mkdir -p /var/lib/config-data/${NAME}/var/www
+             cp -a /var/www/* /var/lib/config-data/${NAME}/var/www/
             fi
         fi
-        """ % {'puppet_tags': puppet_tags, 'name': config_volume,
-               'hostname': hostname,
-               'no_archive': os.environ.get('NO_ARCHIVE', ''),
-               'step': os.environ.get('STEP', '6')})
+        """)
 
     with tempfile.NamedTemporaryFile() as tmp_man:
         with open(tmp_man.name, 'w') as man_file:
@@ -197,33 +207,55 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
         dcmd = ['/usr/bin/docker', 'run',
                 '--user', 'root',
                 '--name', 'docker-puppet-%s' % config_volume,
+                '--env', 'PUPPET_TAGS=%s' % puppet_tags,
+                '--env', 'NAME=%s' % config_volume,
+                '--env', 'HOSTNAME=%s' % short_hostname(),
+                '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
+                '--env', 'STEP=%s' % os.environ.get('STEP', '6'),
                 '--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
                 '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
                 '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
                 '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
                 '--volume', 'tripleo_logs:/var/log/tripleo/',
+                # OpenSSL trusted CA injection
+                '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
+                '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',
+                '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
+                '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
+                # script injection
                 '--volume', '%s:%s:rw' % (sh_script, sh_script) ]
 
         for volume in volumes:
-            dcmd.extend(['--volume', volume])
+            if volume:
+                dcmd.extend(['--volume', volume])
 
         dcmd.extend(['--entrypoint', sh_script])
 
         env = {}
+        # NOTE(flaper87): Always copy the DOCKER_* environment variables as
+        # they contain the access data for the docker daemon.
+        for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
+            env[k] = os.environ.get(k)
+
         if os.environ.get('NET_HOST', 'false') == 'true':
-            print('NET_HOST enabled')
+            log.debug('NET_HOST enabled')
             dcmd.extend(['--net', 'host', '--volume',
                          '/etc/hosts:/etc/hosts:ro'])
         dcmd.append(config_image)
+        log.debug('Running docker command: %s' % ' '.join(dcmd))
 
         subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE, env=env)
         cmd_stdout, cmd_stderr = subproc.communicate()
-        print(cmd_stdout)
-        print(cmd_stderr)
+        if cmd_stdout:
+            log.debug(cmd_stdout)
+        if cmd_stderr:
+            log.debug(cmd_stderr)
         if subproc.returncode != 0:
-            print('Failed running docker-puppet.py for %s' % config_volume)
-        rm_container('docker-puppet-%s' % config_volume)
+            log.error('Failed running docker-puppet.py for %s' % config_volume)
+        else:
+            # only delete successful runs, for debugging
+            rm_container('docker-puppet-%s' % config_volume)
         return subproc.returncode
 
 # Holds all the information for each process to consume.
@@ -241,16 +273,25 @@ for config_volume in configs:
     volumes = service[4] if len(service) > 4 else []
 
     if puppet_tags:
-        puppet_tags = "file,file_line,concat,%s" % puppet_tags
+        puppet_tags = "file,file_line,concat,augeas,%s" % puppet_tags
     else:
-        puppet_tags = "file,file_line,concat"
+        puppet_tags = "file,file_line,concat,augeas"
 
     process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
 
 for p in process_map:
-    print '--\n%s' % p
+    log.debug('- %s' % p)
 
 # Fire off processes to perform each configuration.  Defaults
 # to the number of CPUs on the system.
 p = multiprocessing.Pool(process_count)
-p.map(mp_puppet_config, process_map)
+returncodes = list(p.map(mp_puppet_config, process_map))
+config_volumes = [pm[0] for pm in process_map]
+success = True
+for returncode, config_volume in zip(returncodes, config_volumes):
+    if returncode != 0:
+        log.error('ERROR configuring %s' % config_volume)
+        success = False
+
+if not success:
+    sys.exit(1)
diff --git a/docker/docker-steps.j2 b/docker/docker-steps.j2
new file mode 100644 (file)
index 0000000..d380191
--- /dev/null
@@ -0,0 +1,356 @@
+# certain initialization steps (run in a container) will occur
+# on the role marked as primary controller or the first role listed
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+  {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+    {%- set _ = primary_role.pop() -%}
+    {%- set _ = primary_role.append(role) -%}
+  {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
+{% set deploy_steps_max = 6 -%}
+
+heat_template_version: pike
+
+description: >
+  Post-deploy configuration steps via puppet for all roles,
+  as defined in ../roles_data.yaml
+
+parameters:
+  servers:
+    type: json
+    description: Mapping of Role name e.g Controller to a list of servers
+  role_data:
+    type: json
+    description: Mapping of Role name e.g Controller to the per-role data
+  DeployIdentifier:
+    default: ''
+    type: string
+    description: >
+      Setting this to a unique value will re-run any deployment tasks which
+      perform configuration on a Heat stack-update.
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+
+resources:
+
+  # These utility tasks use docker-puppet.py to execute tasks via puppet
+  # We only execute these on the first node in the primary role
+  {{primary_role_name}}DockerPuppetTasks:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+        yaql:
+          expression:
+            $.data.default_tasks + dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
+          data:
+            docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
+            default_tasks:
+{%- for step in range(1, deploy_steps_max) %}
+              step_{{step}}: {}
+{%- endfor %}
+
+# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
+{% for step in range(1, deploy_steps_max) %}
+
+  {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
+      type: OS::Heat::StructuredConfig
+      properties:
+        group: json-file
+        config:
+          /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
+            {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
+
+  {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      server: {get_param: [servers, {{primary_role_name}}, '0']}
+      config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
+
+  {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: {get_file: docker-puppet.py}
+      inputs:
+        - name: CONFIG
+        - name: NET_HOST
+        - name: NO_ARCHIVE
+        - name: STEP
+
+  {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
+    type: OS::Heat::SoftwareDeployment
+    depends_on:
+      {% for dep in roles %}
+      - {{dep.name}}Deployment_Step{{step}}
+      - {{dep.name}}ContainersDeployment_Step{{step}}
+      {% endfor %}
+      - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+    properties:
+      name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+      server: {get_param: [servers, {{primary_role_name}}, '0']}
+      config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
+      input_values:
+        CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
+        NET_HOST: 'true'
+        NO_ARCHIVE: 'true'
+        STEP: {{step}}
+
+{% endfor %}
+# END primary_role_name docker-puppet-tasks
+
+{% for role in roles %}
+  # Post deployment steps for all roles
+  # A single config is re-applied with an incrementing step number
+  # {{role.name}} Role steps
+  {{role.name}}ArtifactsConfig:
+    type: ../puppet/deploy-artifacts.yaml
+
+  {{role.name}}ArtifactsDeploy:
+    type: OS::Heat::StructuredDeploymentGroup
+    properties:
+      servers:  {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}ArtifactsConfig}
+
+  {{role.name}}PreConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PreConfig
+    properties:
+      servers: {get_param: [servers, {{role.name}}]}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}CreateConfigDir:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: {get_file: create-config-dir.sh}
+
+  {{role.name}}CreateConfigDirDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}CreateConfigDir}
+
+  {{role.name}}HostPrepAnsible:
+    type: OS::Heat::Value
+    properties:
+      value:
+        str_replace:
+          template: CONFIG
+          params:
+            CONFIG:
+              - hosts: localhost
+                connection: local
+                tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
+
+  {{role.name}}HostPrepConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: ansible
+      options:
+        modulepath: /usr/share/ansible-modules
+      config: {get_attr: [{{role.name}}HostPrepAnsible, value]}
+
+  {{role.name}}HostPrepDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}HostPrepConfig}
+
+  # this creates a JSON config file for our docker-puppet.py script
+  {{role.name}}GenPuppetConfig:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: json-file
+      config:
+        /var/lib/docker-puppet/docker-puppet.json:
+          {get_param: [role_data, {{role.name}}, puppet_config]}
+
+  {{role.name}}GenPuppetDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}GenPuppetConfig}
+
+  {{role.name}}GenerateConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: {get_file: docker-puppet.py}
+      inputs:
+        - name: NET_HOST
+
+  {{role.name}}GenerateConfigDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment, {{role.name}}HostPrepDeployment]
+    properties:
+      name: {{role.name}}GenerateConfigDeployment
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}GenerateConfig}
+      input_values:
+        NET_HOST: 'true'
+
+  {{role.name}}PuppetStepConfig:
+    type: OS::Heat::Value
+    properties:
+      type: string
+      value:
+        yaql:
+          expression:
+            # select 'step_config' only from services that do not have a docker_config
+            $.data.service_names.zip($.data.step_config, $.data.docker_config).where($[2] = null).where($[1] != null).select($[1]).join("\n")
+          data:
+            service_names: {get_param: [role_data, {{role.name}}, service_names]}
+            step_config: {get_param: [role_data, {{role.name}}, step_config]}
+            docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+
+  {{role.name}}DockerConfig:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+        yaql:
+          expression:
+            # select 'docker_config' only from services that have it
+            $.data.service_names.zip($.data.docker_config).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {})
+          data:
+            service_names: {get_param: [role_data, {{role.name}}, service_names]}
+            docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+
+  # Here we are dumping all the docker container startup configuration data
+  # so that we can have access to how they are started outside of heat
+  # and docker-cmd.  This lets us create command line tools to start and
+  # test these containers.
+  {{role.name}}DockerConfigJsonStartupData:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: json-file
+      config:
+        /var/lib/docker-container-startup-configs.json:
+          {get_attr: [{{role.name}}DockerConfig, value]}
+
+  {{role.name}}DockerConfigJsonStartupDataDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
+      servers: {get_param: [servers, {{role.name}}]}
+
+  {{role.name}}KollaJsonConfig:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: json-file
+      config:
+        {get_param: [role_data, {{role.name}}, kolla_config]}
+
+  {{role.name}}KollaJsonDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      name: {{role.name}}KollaJsonDeployment
+      config: {get_resource: {{role.name}}KollaJsonConfig}
+      servers: {get_param: [servers, {{role.name}}]}
+
+  # BEGIN BAREMETAL CONFIG STEPS
+
+  {{role.name}}PreConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PreConfig
+    properties:
+      servers: {get_param: [servers, {{role.name}}]}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}Config:
+    type: OS::TripleO::{{role.name}}Config
+    properties:
+      StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
+
+  {% for step in range(1, deploy_steps_max) %}
+
+  {{role.name}}Deployment_Step{{step}}:
+    type: OS::Heat::StructuredDeploymentGroup
+  {% if step == 1 %}
+    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+  {% else %}
+    depends_on:
+      {% for dep in roles %}
+      - {{dep.name}}Deployment_Step{{step -1}}
+      - {{dep.name}}ContainersDeployment_Step{{step -1}}
+      {% endfor %}
+      - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+  {% endif %}
+    properties:
+      name: {{role.name}}Deployment_Step{{step}}
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}Config}
+      input_values:
+        step: {{step}}
+        update_identifier: {get_param: DeployIdentifier}
+
+  {% endfor %}
+  # END BAREMETAL CONFIG STEPS
+
+  # BEGIN CONTAINER CONFIG STEPS
+  {% for step in range(1, deploy_steps_max) %}
+
+  {{role.name}}ContainersConfig_Step{{step}}:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: docker-cmd
+      config:
+        {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
+
+  {{role.name}}ContainersDeployment_Step{{step}}:
+    type: OS::Heat::StructuredDeploymentGroup
+  {% if step == 1 %}
+    depends_on:
+      - {{role.name}}KollaJsonDeployment
+      - {{role.name}}GenPuppetDeployment
+      - {{role.name}}GenerateConfigDeployment
+        {%- for dep in roles %}
+      - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
+        {%- endfor %}
+  {% else %}
+    depends_on:
+        {% for dep in roles %}
+        - {{dep.name}}ContainersDeployment_Step{{step -1}}
+        - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
+        - {{dep.name}}Deployment_Step{{step -1}}
+        {% endfor %}
+        - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+  {% endif %}
+    properties:
+      name: {{role.name}}ContainersDeployment_Step{{step}}
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
+
+  {% endfor %}
+  # END CONTAINER CONFIG STEPS
+
+  {{role.name}}PostConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PostConfig
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step5
+      - {{primary_role_name}}DockerPuppetTasksDeployment5
+  {% endfor %}
+    properties:
+      servers:  {get_param: servers}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
+  # Note, this should come last, so use depends_on to ensure
+  # this is created after any other resources.
+  {{role.name}}ExtraConfigPost:
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}PostConfig
+  {% endfor %}
+    type: OS::TripleO::NodeExtraConfigPost
+    properties:
+        servers: {get_param: [servers, {{role.name}}]}
+
+{% endfor %}
index b2287e9..8b4c6a0 100755 (executable)
@@ -1,26 +1,8 @@
 #!/bin/bash
 set -eux
-# TODO This would be better in puppet
+# This file contains setup steps that can't be or have not yet been moved to
+# puppet
 
-# TODO remove this when built image includes docker
-if [ ! -f "/usr/bin/docker" ]; then
-    yum -y install docker
-fi
-
-# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is
-# a place holder for text replacement done via heat
-if [ "$docker_namespace_is_registry" = "True" ]; then
-    /usr/bin/systemctl stop docker.service
-    # if namespace is used with local registry, trim all namespacing
-    trim_var=$docker_registry
-    registry_host="${trim_var%%/*}"
-    /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker
-fi
-
-# enable and start docker
-/usr/bin/systemctl enable docker.service
-/usr/bin/systemctl start docker.service
-
-# Disable libvirtd
+# Disable libvirtd since it conflicts with nova_libvirt container
 /usr/bin/systemctl disable libvirtd.service
 /usr/bin/systemctl stop libvirtd.service
index 2f25898..4b061e1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 parameters:
   DockerNamespace:
diff --git a/docker/post-upgrade.j2.yaml b/docker/post-upgrade.j2.yaml
new file mode 100644 (file)
index 0000000..4477f86
--- /dev/null
@@ -0,0 +1,4 @@
+# Note the include here is the same as post.j2.yaml but the data used at
+# # the time of rendering is different if any roles disable upgrades
+{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% include 'docker-steps.j2' %}
index 76232d1..fd95621 100644 (file)
@@ -1,325 +1 @@
-# certain initialization steps (run in a container) will occur
-# on the first role listed in the roles file
-{% set primary_role_name = roles[0].name -%}
-
-heat_template_version: ocata
-
-description: >
-  Post-deploy configuration steps via puppet for all roles,
-  as defined in ../roles_data.yaml
-
-parameters:
-  servers:
-    type: json
-    description: Mapping of Role name e.g Controller to a list of servers
-  role_data:
-    type: json
-    description: Mapping of Role name e.g Controller to the per-role data
-  DeployIdentifier:
-    default: ''
-    type: string
-    description: >
-      Setting this to a unique value will re-run any deployment tasks which
-      perform configuration on a Heat stack-update.
-  EndpointMap:
-    default: {}
-    description: Mapping of service endpoint -> protocol. Typically set
-                 via parameter_defaults in the resource registry.
-    type: json
-
-resources:
-
-  # These utility tasks use docker-puppet.py to execute tasks via puppet
-  # We only execute these on the first node in the primary role
-  {{primary_role_name}}DockerPuppetTasks:
-    type: OS::Heat::Value
-    properties:
-      type: json
-      value:
-        yaql:
-          expression:
-            dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
-          data:
-            docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
-
-# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
-{% for step in range(1, 6) %}
-
-  {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
-      type: OS::Heat::StructuredConfig
-      properties:
-        group: json-file
-        config:
-          /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
-            {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
-
-  {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
-    type: OS::Heat::SoftwareDeployment
-    properties:
-      server: {get_param: [servers, {{primary_role_name}}, '0']}
-      config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
-
-  {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: {get_file: docker-puppet.py}
-      inputs:
-        - name: CONFIG
-        - name: NET_HOST
-        - name: NO_ARCHIVE
-        - name: STEP
-
-  {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
-    type: OS::Heat::SoftwareDeployment
-    depends_on:
-      {% for dep in roles %}
-      - {{dep.name}}Deployment_Step{{step}}
-      - {{dep.name}}ContainersDeployment_Step{{step}}
-      {% endfor %}
-      - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
-    properties:
-      name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
-      server: {get_param: [servers, {{primary_role_name}}, '0']}
-      config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
-      input_values:
-        CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
-        NET_HOST: 'true'
-        NO_ARCHIVE: 'true'
-        STEP: {{step}}
-
-{% endfor %}
-# END primary_role_name docker-puppet-tasks
-
-{% for role in roles %}
-  # Post deployment steps for all roles
-  # A single config is re-applied with an incrementing step number
-  # {{role.name}} Role steps
-  {{role.name}}ArtifactsConfig:
-    type: ../puppet/deploy-artifacts.yaml
-
-  {{role.name}}ArtifactsDeploy:
-    type: OS::Heat::StructuredDeploymentGroup
-    properties:
-      servers:  {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}ArtifactsConfig}
-
-  {{role.name}}PreConfig:
-    type: OS::TripleO::Tasks::{{role.name}}PreConfig
-    properties:
-      servers: {get_param: [servers, {{role.name}}]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  {{role.name}}CreateConfigDir:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: {get_file: create-config-dir.sh}
-
-  {{role.name}}CreateConfigDirDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}CreateConfigDir}
-
-  # this creates a JSON config file for our docker-puppet.py script
-  {{role.name}}GenPuppetConfig:
-    type: OS::Heat::StructuredConfig
-    properties:
-      group: json-file
-      config:
-        /var/lib/docker-puppet/docker-puppet.json:
-          {get_param: [role_data, {{role.name}}, puppet_config]}
-
-  {{role.name}}GenPuppetDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}GenPuppetConfig}
-
-  {{role.name}}GenerateConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: {get_file: docker-puppet.py}
-
-  {{role.name}}GenerateConfigDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment]
-    properties:
-      name: {{role.name}}GenerateConfigDeployment
-      servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}GenerateConfig}
-
-  {{role.name}}PuppetStepConfig:
-    type: OS::Heat::Value
-    properties:
-      type: string
-      value:
-        yaql:
-          expression:
-            # select 'step_config' only from services that do not have a docker_image
-            $.data.service_names.zip($.data.step_config, $.data.docker_image).where($[2] = null).where($[1] != null).select($[1]).join("\n")
-          data:
-            service_names: {get_param: [role_data, {{role.name}}, service_names]}
-            step_config: {get_param: [role_data, {{role.name}}, step_config]}
-            docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
-
-  {{role.name}}DockerConfig:
-    type: OS::Heat::Value
-    properties:
-      type: json
-      value:
-        yaql:
-          expression:
-            # select 'docker_config' only from services that have a docker_image
-            $.data.service_names.zip($.data.docker_config, $.data.docker_image).where($[2] != null).select($[1]).reduce($1.mergeWith($2), {})
-          data:
-            service_names: {get_param: [role_data, {{role.name}}, service_names]}
-            docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
-            docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
-
-  # Here we are dumping all the docker container startup configuration data
-  # so that we can have access to how they are started outside of heat
-  # and docker-cmd.  This lets us create command line tools to start and
-  # test these containers.
-  {{role.name}}DockerConfigJsonStartupData:
-    type: OS::Heat::StructuredConfig
-    properties:
-      group: json-file
-      config:
-        /var/lib/docker-container-startup-configs.json:
-          {get_attr: [{{role.name}}DockerConfig, value]}
-
-  {{role.name}}DockerConfigJsonStartupDataDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
-      servers: {get_param: [servers, {{role.name}}]}
-
-  {{role.name}}KollaJsonConfig:
-    type: OS::Heat::StructuredConfig
-    properties:
-      group: json-file
-      config:
-        {get_param: [role_data, {{role.name}}, kolla_config]}
-
-  {{role.name}}KollaJsonDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      name: {{role.name}}KollaJsonDeployment
-      config: {get_resource: {{role.name}}KollaJsonConfig}
-      servers: {get_param: [servers, {{role.name}}]}
-
-  # BEGIN BAREMETAL CONFIG STEPS
-
-  {% if role.name == 'Controller' %}
-  ControllerPrePuppet:
-    type: OS::TripleO::Tasks::ControllerPrePuppet
-    properties:
-      servers: {get_param: [servers, Controller]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-  {% endif %}
-
-  {{role.name}}Config:
-    type: OS::TripleO::{{role.name}}Config
-    properties:
-      StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
-
-  {% for step in range(1, 6) %}
-
-  {{role.name}}Deployment_Step{{step}}:
-    type: OS::Heat::StructuredDeploymentGroup
-  {% if step == 1 %}
-    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
-  {% else %}
-    depends_on:
-      {% for dep in roles %}
-      - {{dep.name}}Deployment_Step{{step -1}}
-      - {{dep.name}}ContainersDeployment_Step{{step -1}}
-      {% endfor %}
-      - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
-  {% endif %}
-    properties:
-      name: {{role.name}}Deployment_Step{{step}}
-      servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}Config}
-      input_values:
-        step: {{step}}
-        update_identifier: {get_param: DeployIdentifier}
-
-  {% endfor %}
-  # END BAREMETAL CONFIG STEPS
-
-  # BEGIN CONTAINER CONFIG STEPS
-  {% for step in range(1, 6) %}
-
-  {{role.name}}ContainersConfig_Step{{step}}:
-    type: OS::Heat::StructuredConfig
-    properties:
-      group: docker-cmd
-      config:
-        {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
-
-  {{role.name}}ContainersDeployment_Step{{step}}:
-    type: OS::Heat::StructuredDeploymentGroup
-  {% if step == 1 %}
-    depends_on:
-      - {{role.name}}PreConfig
-      - {{role.name}}KollaJsonDeployment
-      - {{role.name}}GenPuppetDeployment
-      - {{role.name}}GenerateConfigDeployment
-  {% else %}
-    depends_on:
-        {% for dep in roles %}
-        - {{dep.name}}ContainersDeployment_Step{{step -1}}
-        - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
-        - {{dep.name}}Deployment_Step{{step -1}}
-        {% endfor %}
-        - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
-  {% endif %}
-    properties:
-      name: {{role.name}}ContainersDeployment_Step{{step}}
-      servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
-
-  {% endfor %}
-  # END CONTAINER CONFIG STEPS
-
-  {{role.name}}PostConfig:
-    type: OS::TripleO::Tasks::{{role.name}}PostConfig
-    depends_on:
-  {% for dep in roles %}
-      - {{dep.name}}Deployment_Step5
-      - {{primary_role_name}}DockerPuppetTasksDeployment5
-  {% endfor %}
-    properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  {{role.name}}ExtraConfigPost:
-    depends_on:
-  {% for dep in roles %}
-      - {{dep.name}}PostConfig
-  {% endfor %}
-    type: OS::TripleO::NodeExtraConfigPost
-    properties:
-        servers: {get_param: [servers, {{role.name}}]}
-
-  {% if role.name == 'Controller' %}
-  ControllerPostPuppet:
-    depends_on:
-      - ControllerExtraConfigPost
-    type: OS::TripleO::Tasks::ControllerPostPuppet
-    properties:
-      servers: {get_param: [servers, Controller]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-  {% endif %}
-
-{% endfor %}
+{% include 'docker-steps.j2' %}
index 881a2a3..84ac842 100644 (file)
@@ -23,7 +23,7 @@ puppet (our configuration tool of choice) into the Kolla base images. The
 undercloud nova-scheduler also requires openstack-tripleo-common to
 provide custom filters.
 
-To build Kolla images for TripleO adjust your kolla config to build your
+To build Kolla images for TripleO adjust your kolla config [*]_ to build your
 centos base image with puppet using the example below:
 
 .. code-block::
@@ -37,6 +37,10 @@ kolla-build --base centos --template-override template-overrides.j2
 
 ..
 
+.. [*] See the
+   `override file <https://github.com/openstack/tripleo-common/blob/master/contrib/tripleo_kolla_template_overrides.j2>`_
+   which can be used to build Kolla packages that work with TripleO, and an
+   `example build script <https://github.com/dprince/undercloud_containers/blob/master/build_kolla.sh>_.
 
 Docker settings
 ---------------
@@ -58,27 +62,34 @@ are re-asserted when applying latter ones.
    the container itself at the /var/lib/kolla/config_files/config.json
    location and drives how kolla's external config mechanisms work.
 
- * docker_image: The full name of the docker image that will be used.
-
  * docker_config: Data that is passed to the docker-cmd hook to configure
    a container, or step of containers at each step. See the available steps
    below and the related docker-cmd hook documentation in the heat-agents
    project.
 
- * puppet_tags: Puppet resource tag names that are used to generate config
-   files with puppet. Only the named config resources are used to generate
-   a config file. Any service that specifies tags will have the default
-   tags of 'file,concat,file_line' appended to the setting.
-   Example: keystone_config
-
- * config_volume: The name of the volume (directory) where config files
-   will be generated for this service. Use this as the location to
-   bind mount into the running Kolla container for configuration.
-
- * config_image: The name of the docker image that will be used for
-   generating configuration files. This is often the same value as
-   'docker_image' above but some containers share a common set of
-   config files which are generated in a common base container.
+ * puppet_config: This section is a nested set of key value pairs
+   that drive the creation of config files using puppet.
+   Required parameters include:
+
+     * puppet_tags: Puppet resource tag names that are used to generate config
+       files with puppet. Only the named config resources are used to generate
+       a config file. Any service that specifies tags will have the default
+       tags of 'file,concat,file_line,augeas' appended to the setting.
+       Example: keystone_config
+
+     * config_volume: The name of the volume (directory) where config files
+       will be generated for this service. Use this as the location to
+       bind mount into the running Kolla container for configuration.
+
+     * config_image: The name of the docker image that will be used for
+       generating configuration files. This is often the same container
+       that the runtime service uses. Some services share a common set of
+       config files which are generated in a common base container.
+
+     * step_config: This setting controls the manifest that is used to
+       create docker config files via puppet. The puppet tags below are
+       used along with this manifest to generate a config directory for
+       this container.
 
  * docker_puppet_tasks: This section provides data to drive the
    docker-puppet.py tool directly. The task is executed only once
diff --git a/docker/services/aodh-api.yaml b/docker/services/aodh-api.yaml
new file mode 100644 (file)
index 0000000..8a02d8f
--- /dev/null
@@ -0,0 +1,147 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized aodh service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerAodhApiImage:
+    description: image
+    default: 'centos-binary-aodh-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  AodhApiPuppetBase:
+      type: ../../puppet/services/aodh-api.yaml
+      properties:
+        EndpointMap: {get_param: EndpointMap}
+        ServiceNetMap: {get_param: ServiceNetMap}
+        DefaultPasswords: {get_param: DefaultPasswords}
+        RoleName: {get_param: RoleName}
+        RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the aodh API role.
+    value:
+      service_name: {get_attr: [AodhApiPuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [AodhApiPuppetBase, role_data, config_settings]
+          - apache::default_vhost: false
+      step_config: &step_config
+        get_attr: [AodhApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [AodhApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: aodh
+        puppet_tags: aodh_api_paste_ini,aodh_config
+        step_config: *step_config
+        config_image: &aodh_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/aodh-api.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/aodh
+              owner: aodh:aodh
+              recurse: true
+      docker_config:
+        # db sync runs before permissions set by kolla_config
+        step_3:
+          aodh_init_log:
+            start_order: 0
+            image: *aodh_image
+            user: root
+            volumes:
+              - /var/log/containers/aodh:/var/log/aodh
+            command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
+          aodh_db_sync:
+            start_order: 1
+            image: *aodh_image
+            net: host
+            privileged: false
+            detach: false
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+                  - /var/log/containers/aodh:/var/log/aodh
+            command: /usr/bin/aodh-dbsync
+        step_4:
+          aodh_api:
+            image: *aodh_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+                  - /var/lib/config-data/aodh/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/aodh/var/www/:/var/www/:ro
+                  - /var/log/containers/aodh:/var/log/aodh
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                      - ''
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                      - ''
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+      - name: create persistent logs directory
+        file:
+          path: /var/log/containers/aodh
+          state: directory
+      upgrade_tasks:
+        - name: Stop and disable aodh service (running under httpd)
+          tags: step2
+          service: name=httpd state=stopped enabled=no
+      metadata_settings:
+        get_attr: [AodhApiPuppetBase, role_data, metadata_settings]
diff --git a/docker/services/aodh-evaluator.yaml b/docker/services/aodh-evaluator.yaml
new file mode 100644 (file)
index 0000000..9d514d0
--- /dev/null
@@ -0,0 +1,103 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Aodh Evaluator service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerAodhEvaluatorImage:
+    description: image
+    default: 'centos-binary-aodh-evaluator:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  AodhEvaluatorBase:
+    type: ../../puppet/services/aodh-evaluator.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Aodh API role.
+    value:
+      service_name: {get_attr: [AodhEvaluatorBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [AodhEvaluatorBase, role_data, config_settings]
+      step_config: &step_config
+        get_attr: [AodhEvaluatorBase, role_data, step_config]
+      service_config_settings: {get_attr: [AodhEvaluatorBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: aodh
+        puppet_tags: aodh_config
+        step_config: *step_config
+        config_image: &aodh_evaluator_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/aodh-evaluator.json:
+          command: /usr/bin/aodh-evaluator
+          permissions:
+            - path: /var/log/aodh
+              owner: aodh:aodh
+              recurse: true
+      docker_config:
+        step_4:
+          aodh_evaluator:
+            image: *aodh_evaluator_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+                  - /var/log/containers/aodh:/var/log/aodh
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/aodh
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable openstack-aodh-evaluator service
+          tags: step2
+          service: name=openstack-aodh-evaluator.service state=stopped enabled=no
diff --git a/docker/services/aodh-listener.yaml b/docker/services/aodh-listener.yaml
new file mode 100644 (file)
index 0000000..dac6108
--- /dev/null
@@ -0,0 +1,103 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Aodh Listener service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerAodhListenerImage:
+    description: image
+    default: 'centos-binary-aodh-listener:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  AodhListenerBase:
+    type: ../../puppet/services/aodh-listener.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Aodh API role.
+    value:
+      service_name: {get_attr: [AodhListenerBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [AodhListenerBase, role_data, config_settings]
+      step_config: &step_config
+        get_attr: [AodhListenerBase, role_data, step_config]
+      service_config_settings: {get_attr: [AodhListenerBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: aodh
+        puppet_tags: aodh_config
+        step_config: *step_config
+        config_image: &aodh_listener_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/aodh-listener.json:
+          command: /usr/bin/aodh-listener
+          permissions:
+            - path: /var/log/aodh
+              owner: aodh:aodh
+              recurse: true
+      docker_config:
+        step_4:
+          aodh_listener:
+            image: *aodh_listener_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+                  - /var/log/containers/aodh:/var/log/aodh
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/aodh
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable openstack-aodh-listener service
+          tags: step2
+          service: name=openstack-aodh-listener.service state=stopped enabled=no
diff --git a/docker/services/aodh-notifier.yaml b/docker/services/aodh-notifier.yaml
new file mode 100644 (file)
index 0000000..a22ae85
--- /dev/null
@@ -0,0 +1,103 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Aodh Notifier service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerAodhNotifierImage:
+    description: image
+    default: 'centos-binary-aodh-notifier:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  AodhNotifierBase:
+    type: ../../puppet/services/aodh-notifier.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Aodh API role.
+    value:
+      service_name: {get_attr: [AodhNotifierBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [AodhNotifierBase, role_data, config_settings]
+      step_config: &step_config
+        get_attr: [AodhNotifierBase, role_data, step_config]
+      service_config_settings: {get_attr: [AodhNotifierBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: aodh
+        puppet_tags: aodh_config
+        step_config: *step_config
+        config_image: &aodh_notifier_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/aodh-notifier.json:
+          command: /usr/bin/aodh-notifier
+          permissions:
+            - path: /var/log/aodh
+              owner: aodh:aodh
+              recurse: true
+      docker_config:
+        step_4:
+          aodh_notifier:
+            image: *aodh_notifier_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+                  - /var/log/containers/aodh:/var/log/aodh
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/aodh
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable openstack-aodh-notifier service
+          tags: step2
+          service: name=openstack-aodh-notifier.service state=stopped enabled=no
diff --git a/docker/services/ceilometer-agent-central.yaml b/docker/services/ceilometer-agent-central.yaml
new file mode 100644 (file)
index 0000000..94caded
--- /dev/null
@@ -0,0 +1,113 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Ceilometer Agent Central service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCeilometerCentralImage:
+    description: image
+    default: 'centos-binary-ceilometer-central:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+   type: ./containers-common.yaml
+
+  CeilometerAgentCentralBase:
+    type: ../../puppet/services/ceilometer-agent-central.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Ceilometer Agent Central role.
+    value:
+      service_name: {get_attr: [CeilometerAgentCentralBase, role_data, service_name]}
+      config_settings: {get_attr: [CeilometerAgentCentralBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CeilometerAgentCentralBase, role_data, step_config]
+      service_config_settings: {get_attr: [CeilometerAgentCentralBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: ceilometer
+        puppet_tags: ceilometer_config
+        step_config: *step_config
+        config_image: &ceilometer_agent_central_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/ceilometer-agent-central.json:
+          command: /usr/bin/ceilometer-polling --polling-namespaces central
+      docker_config:
+        step_3:
+          ceilometer_init_log:
+            start_order: 0
+            image: *ceilometer_agent_central_image
+            user: root
+            command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+            volumes:
+              - /var/log/containers/ceilometer:/var/log/ceilometer
+        step_4:
+          ceilometer_agent_central:
+            image: *ceilometer_agent_central_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ceilometer-agent-central.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+        step_5:
+          ceilometer_gnocchi_upgrade:
+            start_order: 1
+            image: *ceilometer_agent_central_image
+            net: host
+            detach: false
+            privileged: false
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+                  - /var/log/containers/ceilometer:/var/log/ceilometer
+            command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
+      upgrade_tasks:
+        - name: Stop and disable ceilometer agent central service
+          tags: step2
+          service: name=openstack-ceilometer-agent-central state=stopped enabled=no
diff --git a/docker/services/ceilometer-agent-compute.yaml b/docker/services/ceilometer-agent-compute.yaml
new file mode 100644 (file)
index 0000000..9033cf4
--- /dev/null
@@ -0,0 +1,91 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Ceilometer Agent Compute service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCeilometerComputeImage:
+    description: image
+    default: 'centos-binary-ceilometer-compute:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CeilometerAgentComputeBase:
+    type: ../../puppet/services/ceilometer-agent-compute.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Ceilometer Agent Compute role.
+    value:
+      service_name: {get_attr: [CeilometerAgentComputeBase, role_data, service_name]}
+      config_settings: {get_attr: [CeilometerAgentComputeBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CeilometerAgentComputeBase, role_data, step_config]
+      service_config_settings: {get_attr: [CeilometerAgentComputeBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: ceilometer
+        puppet_tags: ceilometer_config
+        step_config: *step_config
+        config_image: &ceilometer_agent_compute_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/ceilometer-agent-compute.json:
+          command: /usr/bin/ceilometer-polling --polling-namespaces compute
+      docker_config:
+        step_4:
+          ceilometer_agent-compute:
+            image: *ceilometer_agent_compute_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      upgrade_tasks:
+        - name: Stop and disable ceilometer-agent-compute service
+          tags: step2
+          service: name=openstack-ceilometer-agent-compute state=stopped enabled=no
diff --git a/docker/services/ceilometer-agent-notification.yaml b/docker/services/ceilometer-agent-notification.yaml
new file mode 100644 (file)
index 0000000..79df330
--- /dev/null
@@ -0,0 +1,113 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Ceilometer Agent Notification service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCeilometerNotificationImage:
+    description: image
+    default: 'centos-binary-ceilometer-notification:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CeilometerAgentNotificationBase:
+    type: ../../puppet/services/ceilometer-agent-notification.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Ceilometer Agent Notification role.
+    value:
+      service_name: {get_attr: [CeilometerAgentNotificationBase, role_data, service_name]}
+      config_settings: {get_attr: [CeilometerAgentNotificationBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CeilometerAgentNotificationBase, role_data, step_config]
+      service_config_settings: {get_attr: [CeilometerAgentNotificationBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: ceilometer
+        puppet_tags: ceilometer_config
+        step_config: *step_config
+        config_image: &ceilometer_agent_notification_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/ceilometer-agent-notification.json:
+          command: /usr/bin/ceilometer-agent-notification
+      docker_config:
+        step_3:
+          ceilometer_init_log:
+            start_order: 0
+            image: *ceilometer_agent_notification_image
+            user: root
+            command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+            volumes:
+              - /var/log/containers/ceilometer:/var/log/ceilometer
+        step_4:
+          ceilometer_agent-notification:
+            image: *ceilometer_agent_notification_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ceilometer-agent-notification.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+        step_5:
+          ceilometer_gnocchi_upgrade:
+            start_order: 1
+            image: *ceilometer_agent_notification_image
+            net: host
+            detach: false
+            privileged: false
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+                  - /var/log/containers/ceilometer:/var/log/ceilometer
+            command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
+      upgrade_tasks:
+        - name: Stop and disable ceilometer agent notification service
+          tags: step2
+          service: name=openstack-ceilometer-notification state=stopped enabled=no
diff --git a/docker/services/containers-common.yaml b/docker/services/containers-common.yaml
new file mode 100644 (file)
index 0000000..a4ebe54
--- /dev/null
@@ -0,0 +1,18 @@
+heat_template_version: pike
+
+description: >
+  Contains a static list of common things necessary for containers
+
+outputs:
+  volumes:
+    description: Common volumes for the containers.
+    value:
+      - /etc/hosts:/etc/hosts:ro
+      - /etc/localtime:/etc/localtime:ro
+      # OpenSSL trusted CAs
+      - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
+      - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
+      - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
+      - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
+      # Syslog socket
+      - /dev/log:/dev/log
index 3c4146f..96a02f9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   MongoDB service deployment using puppet and docker
@@ -26,6 +26,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
@@ -35,6 +43,8 @@ resources:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -50,29 +60,25 @@ outputs:
           - "\n"
           - - "['Mongodb_database', 'Mongodb_user', 'Mongodb_replset'].each |String $val| { noop_resource($val) }"
             - {get_attr: [MongodbPuppetBase, role_data, step_config]}
-      upgrade_tasks: {get_attr: [MongodbPuppetBase, role_data, upgrade_tasks]}
       # BEGIN DOCKER SETTINGS #
-      docker_image: &mongodb_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
       puppet_config:
         config_volume: mongodb
         puppet_tags: file # set this even though file is the default
         step_config: *step_config
-        config_image: *mongodb_image
+        config_image: &mongodb_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
       kolla_config:
         /var/lib/kolla/config_files/mongodb.json:
           command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run
-          config_files:
-          - dest: /etc/mongod.conf
-            source: /var/lib/kolla/config_files/src/etc/mongod.conf
-            owner: mongodb
-            perm: '0600'
-          - dest: /etc/mongos.conf
-            source: /var/lib/kolla/config_files/src/etc/mongos.conf
-            owner: mongodb
-            perm: '0600'
+          permissions:
+            - path: /var/lib/mongodb
+              owner: mongodb:mongodb
+              recurse: true
+            - path: /var/log/mongodb
+              owner: mongodb:mongodb
+              recurse: true
       docker_config:
         step_2:
           mongodb:
@@ -81,10 +87,10 @@ outputs:
             privileged: false
             volumes: &mongodb_volumes
               - /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json
-              - /var/lib/config-data/mongodb/:/var/lib/kolla/config_files/src:ro
+              - /var/lib/config-data/mongodb/etc/:/etc/:ro
               - /etc/localtime:/etc/localtime:ro
-              - logs:/var/log/kolla
-              - mongodb:/var/lib/mongodb/
+              - /var/log/containers/mongodb:/var/log/mongodb
+              - /var/lib/mongodb:/var/lib/mongodb
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
       docker_puppet_tasks:
@@ -93,10 +99,19 @@ outputs:
           config_volume: 'mongodb_init_tasks'
           puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset'
           step_config: 'include ::tripleo::profile::base::database::mongodb'
-          config_image:
-            list_join:
-            - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+          config_image: *mongodb_image
           volumes:
-          - "mongodb:/var/lib/mongodb/"
-          - "logs:/var/log/kolla:ro"
+            - /var/lib/mongodb:/var/lib/mongodb
+            - /var/log/containers/mongodb:/var/log/mongodb
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/mongodb
+            - /var/lib/mongodb
+      upgrade_tasks:
+        - name: Stop and disable mongodb service
+          tags: step2
+          service: name=mongod state=stopped enabled=no
index fd92e99..73578e1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   MySQL service deployment using puppet
@@ -26,6 +26,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   MysqlRootPassword:
     type: string
     hidden: true
@@ -39,6 +47,8 @@ resources:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -59,48 +69,53 @@ outputs:
           - "\n"
           - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }"
             - {get_attr: [MysqlPuppetBase, role_data, step_config]}
-      upgrade_tasks: {get_attr: [MysqlPuppetBase, role_data, upgrade_tasks]}
       # BEGIN DOCKER SETTINGS #
-      docker_image: &mysql_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
       puppet_config:
         config_volume: mysql
         puppet_tags: file # set this even though file is the default
         step_config: *step_config
-        config_image: *mysql_image
+        config_image: &mysql_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
       kolla_config:
         /var/lib/kolla/config_files/mysql.json:
           command: /usr/bin/mysqld_safe
-          config_files:
-          - dest: /etc/mysql/my.cnf
-            source: /var/lib/kolla/config_files/src/etc/my.cnf
-            owner: mysql
-            perm: '0644'
-          - dest: /etc/my.cnf.d/galera.cnf
-            source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf
-            owner: mysql
-            perm: '0644'
+          permissions:
+            - path: /var/lib/mysql
+              owner: mysql:mysql
+              recurse: true
       docker_config:
+        # Kolla_bootstrap runs before permissions set by kolla_config
         step_2:
-          mysql_bootstrap:
+          mysql_init_logs:
             start_order: 0
+            image: *mysql_image
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/mysql:/var/log/mariadb
+            command: ['/bin/bash', '-c', 'chown -R mysql:mysql /var/log/mariadb']
+          mysql_bootstrap:
+            start_order: 1
             detach: false
             image: *mysql_image
             net: host
+            # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done
+            command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start']
             volumes: &mysql_volumes
               - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json
-              - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro
+              - /var/lib/config-data/mysql/etc/:/etc/:ro
               - /etc/localtime:/etc/localtime:ro
               - /etc/hosts:/etc/hosts:ro
-              - mariadb:/var/lib/mysql/
+              - /var/lib/mysql:/var/lib/mysql
+              - /var/log/containers/mysql:/var/log/mariadb
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
               - KOLLA_BOOTSTRAP=True
               # NOTE(mandre) skip wsrep cluster status check
               - KOLLA_KUBERNETES=True
-              - 
+              -
                 list_join:
                   - '='
                   - - 'DB_ROOT_PASSWORD'
@@ -112,7 +127,7 @@ outputs:
                             - {get_param: MysqlRootPassword}
                             - {get_param: [DefaultPasswords, mysql_root_password]}
           mysql:
-            start_order: 1
+            start_order: 2
             image: *mysql_image
             restart: always
             net: host
@@ -125,10 +140,20 @@ outputs:
           config_volume: 'mysql_init_tasks'
           puppet_tags: 'mysql_database,mysql_grant,mysql_user'
           step_config: 'include ::tripleo::profile::base::database::mysql'
-          config_image:
-            list_join:
-              - '/'
-              - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+          config_image: *mysql_image
           volumes:
-            - "mariadb:/var/lib/mysql/:ro"
-            - "/var/lib/config-data/mysql/root:/root:ro" #provides .my.cnf
+            - /var/lib/mysql:/var/lib/mysql/:ro
+            - /var/log/containers/mysql:/var/log/mariadb
+            - /var/lib/config-data/mysql/root:/root:ro #provides .my.cnf
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/mysql
+            - /var/lib/mysql
+      upgrade_tasks:
+        - name: Stop and disable mysql service
+          tags: step2
+          service: name=mariadb state=stopped enabled=no
diff --git a/docker/services/database/redis.yaml b/docker/services/database/redis.yaml
new file mode 100644 (file)
index 0000000..73df96c
--- /dev/null
@@ -0,0 +1,101 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Redis services
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerRedisImage:
+    description: image
+    default: 'centos-binary-redis:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  RedisBase:
+    type: ../../../puppet/services/database/redis.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Redis API role.
+    value:
+      service_name: {get_attr: [RedisBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - {get_attr: [RedisBase, role_data, config_settings]}
+          - redis::daemonize: false
+      step_config: &step_config
+        get_attr: [RedisBase, role_data, step_config]
+      service_config_settings: {get_attr: [RedisBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: 'redis'
+        # NOTE: we need the exec tag to copy /etc/redis.conf.puppet to
+        # /etc/redis.conf
+        # https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763
+        puppet_tags: 'exec'
+        step_config: *step_config
+        config_image: &redis_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/redis.json:
+          command: /usr/bin/redis-server /etc/redis.conf
+          permissions:
+            - path: /var/run/redis
+              owner: redis:redis
+              recurse: true
+      docker_config:
+        step_1:
+          redis:
+            image: *redis_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              - /run:/run
+              - /var/lib/kolla/config_files/redis.json:/var/lib/kolla/config_files/config.json:ro
+              - /var/lib/config-data/redis/etc/:/etc/:ro
+              - /etc/localtime:/etc/localtime:ro
+              - logs:/var/log/kolla
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /var/run/redis
+          file:
+            path: /var/run/redis
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable redis service
+          tags: step2
+          service: name=redis state=stopped enabled=no
diff --git a/docker/services/etcd.yaml b/docker/services/etcd.yaml
new file mode 100644 (file)
index 0000000..e5a7096
--- /dev/null
@@ -0,0 +1,113 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized etcd services
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerEtcdImage:
+    description: image
+    default: 'centos-binary-etcd:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EtcdInitialClusterToken:
+    description: Initial cluster token for the etcd cluster during bootstrap.
+    type: string
+    hidden: true
+
+resources:
+
+  EtcdPuppetBase:
+    type: ../../puppet/services/etcd.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EtcdInitialClusterToken: {get_param: EtcdInitialClusterToken}
+
+outputs:
+  role_data:
+    description: Role data for the etcd role.
+    value:
+      service_name: {get_attr: [EtcdPuppetBase, role_data, service_name]}
+      step_config: &step_config
+        list_join:
+          - "\n"
+          - - "['Etcd_key'].each |String $val| { noop_resource($val) }"
+            - get_attr: [EtcdPuppetBase, role_data, step_config]
+      config_settings:
+        map_merge:
+          - {get_attr: [EtcdPuppetBase, role_data, config_settings]}
+          - etcd::manage_service: false
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: etcd
+        step_config: *step_config
+        config_image: &etcd_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/etcd.json:
+          command: /usr/bin/etcd --config-file /etc/etcd/etcd.yml
+          permissions:
+            - path: /var/lib/etcd
+              owner: etcd:etcd
+              recurse: true
+      docker_config:
+        step_2:
+          etcd:
+            image: *etcd_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              - /var/lib/etcd:/var/lib/etcd
+              - /etc/localtime:/etc/localtime:ro
+              - /var/lib/kolla/config_files/etcd.json:/var/lib/kolla/config_files/config.json:ro
+              - /var/lib/config-data/etcd/etc/etcd/etcd.yml:/etc/etcd/etcd.yml:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      docker_puppet_tasks:
+        # Etcd keys initialization occurs only on single node
+        step_2:
+          config_volume: 'etcd_init_tasks'
+          puppet_tags: 'etcd_key'
+          step_config: 'include ::tripleo::profile::base::etcd'
+          config_image: *etcd_image
+          volumes:
+            - /var/lib/config-data/etcd/etc/:/etc
+            - /var/lib/etcd:/var/lib/etcd:ro
+      host_prep_tasks:
+        - name: create /var/lib/etcd
+          file:
+            path: /var/lib/etcd
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable etcd service
+          tags: step2
+          service: name=etcd state=stopped enabled=no
index ab62f7e..ef1e00e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Glance service configured with Puppet
@@ -26,15 +26,36 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   GlanceApiPuppetBase:
     type: ../../puppet/services/glance-api.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -49,51 +70,82 @@ outputs:
         get_attr: [GlanceApiPuppetBase, role_data, step_config]
       service_config_settings: {get_attr: [GlanceApiPuppetBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS #
-      docker_image: &glance_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
       puppet_config:
         config_volume: glance_api
         puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
         step_config: *step_config
-        config_image: *glance_image
+        config_image: &glance_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/glance-api.json:
-           command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
-           config_files:
-           - dest: /etc/glance/glance-api.conf
-             owner: glance
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/glance/glance-api.conf
-           - dest: /etc/glance/glance-swift.conf
-             owner: glance
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/glance/glance-swift.conf
+        /var/lib/kolla/config_files/glance-api.json:
+          command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
+        /var/lib/kolla/config_files/glance_api_tls_proxy.json:
+          command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
+        # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
         step_3:
+          glance_init_logs:
+            start_order: 0
+            image: *glance_image
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/glance:/var/log/glance
+            command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
           glance_api_db_sync:
+            start_order: 1
             image: *glance_image
             net: host
             privileged: false
             detach: false
             volumes: &glance_volumes
-              - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
-              - /etc/localtime:/etc/localtime:ro
-              - /lib/modules:/lib/modules:ro
-              - /var/lib/config-data/glance_api/:/var/lib/kolla/config_files/src:ro
-              - /run:/run
-              - /dev:/dev
-              - /etc/hosts:/etc/hosts:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/glance_api/etc/glance/:/etc/glance/:ro
+                  - /var/log/containers/glance:/var/log/glance
             environment:
               - KOLLA_BOOTSTRAP=True
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
         step_4:
-          glance_api:
-            image: *glance_image
-            net: host
-            privileged: false
-            restart: always
-            volumes: *glance_volumes
-            environment:
-              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+          map_merge:
+            - glance_api:
+                start_order: 2
+                image: *glance_image
+                net: host
+                privileged: false
+                restart: always
+                volumes: *glance_volumes
+                environment:
+                  - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+            - if:
+                - internal_tls_enabled
+                - glance_api_tls_proxy:
+                    start_order: 2
+                    image: *glance_image
+                    net: host
+                    user: root
+                    restart: always
+                    volumes:
+                      list_concat:
+                        - {get_attr: [ContainersCommon, volumes]}
+                        -
+                          - /var/lib/kolla/config_files/glance_api_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+                          - /var/lib/config-data/glance_api/etc/httpd/:/etc/httpd/:ro
+                          - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                          - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                    environment:
+                      - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+                - {}
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/glance
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable glance_api service
+          tags: step2
+          service: name=openstack-glance-api state=stopped enabled=no
diff --git a/docker/services/gnocchi-api.yaml b/docker/services/gnocchi-api.yaml
new file mode 100644 (file)
index 0000000..9b47473
--- /dev/null
@@ -0,0 +1,147 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized gnocchi service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerGnocchiApiImage:
+    description: image
+    default: 'centos-binary-gnocchi-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  GnocchiApiPuppetBase:
+      type: ../../puppet/services/gnocchi-api.yaml
+      properties:
+        EndpointMap: {get_param: EndpointMap}
+        ServiceNetMap: {get_param: ServiceNetMap}
+        DefaultPasswords: {get_param: DefaultPasswords}
+        RoleName: {get_param: RoleName}
+        RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the gnocchi API role.
+    value:
+      service_name: {get_attr: [GnocchiApiPuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [GnocchiApiPuppetBase, role_data, config_settings]
+          - apache::default_vhost: false
+      step_config: &step_config
+        get_attr: [GnocchiApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [GnocchiApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: gnocchi
+        puppet_tags: gnocchi_api_paste_ini,gnocchi_config
+        step_config: *step_config
+        config_image: &gnocchi_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/gnocchi-api.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/gnocchi
+              owner: gnocchi:gnocchi
+              recurse: true
+      docker_config:
+        # db sync runs before permissions set by kolla_config
+        step_3:
+          gnocchi_init_log:
+            start_order: 0
+            image: *gnocchi_image
+            user: root
+            volumes:
+              - /var/log/containers/gnocchi:/var/log/gnocchi
+            command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
+          gnocchi_db_sync:
+            start_order: 1
+            image: *gnocchi_image
+            net: host
+            detach: false
+            privileged: false
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+                  - /var/log/containers/gnocchi:/var/log/gnocchi
+            command: ["/usr/bin/gnocchi-upgrade", "--skip-storage"]
+        step_4:
+          gnocchi_api:
+            image: *gnocchi_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+                  - /var/lib/config-data/gnocchi/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/gnocchi/var/www/:/var/www/:ro
+                  - /var/log/containers/gnocchi:/var/log/gnocchi
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                      - ''
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                      - ''
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/gnocchi
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable httpd service
+          tags: step2
+          service: name=httpd state=stopped enabled=no
+      metadata_settings:
+        get_attr: [GnocchiApiPuppetBase, role_data, metadata_settings]
diff --git a/docker/services/gnocchi-metricd.yaml b/docker/services/gnocchi-metricd.yaml
new file mode 100644 (file)
index 0000000..2724805
--- /dev/null
@@ -0,0 +1,101 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Gnocchi Metricd service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerGnocchiMetricdImage:
+    description: image
+    default: 'centos-binary-gnocchi-metricd:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  GnocchiMetricdBase:
+    type: ../../puppet/services/gnocchi-metricd.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Gnocchi API role.
+    value:
+      service_name: {get_attr: [GnocchiMetricdBase, role_data, service_name]}
+      config_settings: {get_attr: [GnocchiMetricdBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [GnocchiMetricdBase, role_data, step_config]
+      service_config_settings: {get_attr: [GnocchiMetricdBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: gnocchi
+        puppet_tags: gnocchi_config
+        step_config: *step_config
+        config_image: &gnocchi_metricd_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/gnocchi-metricd.json:
+          command: /usr/bin/gnocchi-metricd
+          permissions:
+            - path: /var/log/gnocchi
+              owner: gnocchi:gnocchi
+              recurse: true
+      docker_config:
+        step_4:
+          gnocchi_metricd:
+            image: *gnocchi_metricd_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+                  - /var/log/containers/gnocchi:/var/log/gnocchi
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/gnocchi
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable openstack-gnocchi-metricd service
+          tags: step2
+          service: name=openstack-gnocchi-metricd.service state=stopped enabled=no
diff --git a/docker/services/gnocchi-statsd.yaml b/docker/services/gnocchi-statsd.yaml
new file mode 100644 (file)
index 0000000..305971f
--- /dev/null
@@ -0,0 +1,101 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Gnocchi Statsd service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerGnocchiStatsdImage:
+    description: image
+    default: 'centos-binary-gnocchi-statsd:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  GnocchiStatsdBase:
+    type: ../../puppet/services/gnocchi-statsd.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Gnocchi API role.
+    value:
+      service_name: {get_attr: [GnocchiStatsdBase, role_data, service_name]}
+      config_settings: {get_attr: [GnocchiStatsdBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [GnocchiStatsdBase, role_data, step_config]
+      service_config_settings: {get_attr: [GnocchiStatsdBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: gnocchi
+        puppet_tags: gnocchi_config
+        step_config: *step_config
+        config_image: &gnocchi_statsd_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/gnocchi-statsd.json:
+          command: /usr/bin/gnocchi-statsd
+          permissions:
+            - path: /var/log/gnocchi
+              owner: gnocchi:gnocchi
+              recurse: true
+      docker_config:
+        step_4:
+          gnocchi_statsd:
+            image: *gnocchi_statsd_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+                  - /var/log/containers/gnocchi:/var/log/gnocchi
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/gnocchi
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable openstack-gnocchi-statsd service
+          tags: step2
+          service: name=openstack-gnocchi-statsd.service state=stopped enabled=no
index fbaacbe..2631928 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Heat API CFN service
@@ -12,10 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-heat-api-cfn:latest'
     type: string
-  # we configure all heat services in the same heat engine container
-  DockerHeatEngineImage:
+  # puppet needs the heat-wsgi-api-cfn binary from centos-binary-heat-api-cfn
+  DockerHeatConfigImage:
     description: image
-    default: 'centos-binary-heat-engine:latest'
+    default: 'centos-binary-heat-api-cfn:latest'
     type: string
   EndpointMap:
     default: {}
@@ -31,16 +31,35 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
 
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   HeatBase:
     type: ../../puppet/services/heat-api-cfn.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -55,39 +74,61 @@ outputs:
         get_attr: [HeatBase, role_data, step_config]
       service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &heat_api_cfn_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ]
       puppet_config:
-        config_volume: heat
+        config_volume: heat_api_cfn
         puppet_tags: heat_config,file,concat,file_line
         step_config: *step_config
         config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/heat_api_cfn.json:
-           command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
-           config_files:
-           - dest: /etc/heat/heat.conf
-             owner: heat
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+        /var/lib/kolla/config_files/heat_api_cfn.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/heat
+              owner: heat:heat
+              recurse: true
       docker_config:
         step_4:
           heat_api_cfn:
-            image: *heat_api_cfn_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ]
             net: host
             privileged: false
             restart: always
+            # NOTE(mandre) kolla image changes the user to 'heat', we need it
+            # to be root to run httpd
+            user: root
             volumes:
-              - /run:/run
-              - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/heat_api_cfn/etc/heat/:/etc/heat/:ro
+                  - /var/lib/config-data/heat_api_cfn/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/heat_api_cfn/var/www/:/var/www/:ro
+                  - /var/log/containers/heat:/var/log/heat
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                      - ''
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                      - ''
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/heat
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable heat_api_cfn service
+          tags: step2
+          service: name=httpd state=stopped enabled=no
index df9160f..b2f4eb6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Heat API service
@@ -12,10 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-heat-api:latest'
     type: string
-  # we configure all heat services in the same heat engine container
-  DockerHeatEngineImage:
+  # puppet needs the heat-wsgi-api binary from centos-binary-heat-api
+  DockerHeatConfigImage:
     description: image
-    default: 'centos-binary-heat-engine:latest'
+    default: 'centos-binary-heat-api:latest'
     type: string
   EndpointMap:
     default: {}
@@ -31,16 +31,35 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
 
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   HeatBase:
     type: ../../puppet/services/heat-api.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -55,39 +74,61 @@ outputs:
         get_attr: [HeatBase, role_data, step_config]
       service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &heat_api_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ]
       puppet_config:
-        config_volume: heat
+        config_volume: heat_api
         puppet_tags: heat_config,file,concat,file_line
         step_config: *step_config
         config_image:
           list_join:
             - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/heat_api.json:
-           command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
-           config_files:
-           - dest: /etc/heat/heat.conf
-             owner: heat
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+        /var/lib/kolla/config_files/heat_api.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/heat
+              owner: heat:heat
+              recurse: true
       docker_config:
         step_4:
           heat_api:
-            image: *heat_api_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ]
             net: host
             privileged: false
             restart: always
+            # NOTE(mandre) kolla image changes the user to 'heat', we need it
+            # to be root to run httpd
+            user: root
             volumes:
-              - /run:/run
-              - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/heat_api/etc/heat/:/etc/heat/:ro
+                  - /var/lib/config-data/heat_api/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/heat_api/var/www/:/var/www/:ro
+                  - /var/log/containers/heat:/var/log/heat
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                      - ''
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                      - ''
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/heat
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable heat_api service
+          tags: step2
+          service: name=httpd state=stopped enabled=no
index c8259f9..8c554a5 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Heat Engine service
@@ -26,16 +26,29 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   HeatBase:
     type: ../../puppet/services/heat-engine.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -50,34 +63,43 @@ outputs:
         get_attr: [HeatBase, role_data, step_config]
       service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &heat_engine_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
       puppet_config:
         config_volume: heat
         puppet_tags: heat_config,file,concat,file_line
         step_config: *step_config
-        config_image: *heat_engine_image
+        config_image: &heat_engine_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/heat_engine.json:
-           command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
-           config_files:
-           - dest: /etc/heat/heat.conf
-             owner: heat
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+        /var/lib/kolla/config_files/heat_engine.json:
+          command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+          permissions:
+            - path: /var/log/heat
+              owner: heat:heat
+              recurse: true
       docker_config:
+        # db sync runs before permissions set by kolla_config
         step_3:
+          heat_init_log:
+            start_order: 0
+            image: *heat_engine_image
+            user: root
+            volumes:
+              - /var/log/containers/heat:/var/log/heat
+            command: ['/bin/bash', '-c', 'chown -R heat:heat /var/log/heat']
           heat_engine_db_sync:
+            start_order: 1
             image: *heat_engine_image
             net: host
             privileged: false
             detach: false
             volumes:
-              - /var/lib/config-data/heat/etc/heat:/etc/heat:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+                  - /var/log/containers/heat:/var/log/heat
             command: ['heat-manage', 'db_sync']
         step_4:
           heat_engine:
@@ -86,10 +108,20 @@ outputs:
             privileged: false
             restart: always
             volumes:
-              - /run:/run
-              - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+                  - /var/log/containers/heat:/var/log/heat
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/heat
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable heat_engine service
+          tags: step2
+          service: name=openstack-heat-engine state=stopped enabled=no
index 97ba970..300aa0b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Ironic API service
@@ -30,15 +30,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   IronicApiBase:
     type: ../../puppet/services/ironic-api.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -52,10 +65,6 @@ outputs:
         get_attr: [IronicApiBase, role_data, step_config]
       service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &ironic_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
       puppet_config:
         config_volume: ironic
         puppet_tags: ironic_config
@@ -65,26 +74,38 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/ironic_api.json:
-           command: /usr/bin/ironic-api
-           config_files:
-           - dest: /etc/ironic/ironic.conf
-             owner: ironic
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+        /var/lib/kolla/config_files/ironic_api.json:
+          command: /usr/bin/ironic-api
+          permissions:
+            - path: /var/log/ironic
+              owner: ironic:ironic
+              recurse: true
       docker_config:
+        # db sync runs before permissions set by kolla_config
         step_3:
+          ironic_init_logs:
+            start_order: 0
+            image: &ironic_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/ironic:/var/log/ironic
+            command: ['/bin/bash', '-c', 'chown -R ironic:ironic /var/log/ironic']
           ironic_db_sync:
+            start_order: 1
             image: *ironic_image
             net: host
             privileged: false
             detach: false
             volumes:
-              - /var/lib/config-data/ironic/etc/:/etc/:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-            environment:
-              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/ironic/etc/:/etc/:ro
+                  - /var/log/containers/ironic:/var/log/ironic
             command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf']
         step_4:
           ironic_api:
@@ -94,9 +115,20 @@ outputs:
             privileged: false
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ironic/etc/:/etc/:ro
+                  - /var/log/containers/ironic:/var/log/ironic
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/ironic
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable ironic_api service
+          tags: step2
+          service: name=openstack-ironic-api state=stopped enabled=no
index 2cc2905..360eb66 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Ironic Conductor service
@@ -30,15 +30,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   IronicConductorBase:
     type: ../../puppet/services/ironic-conductor.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -50,6 +63,7 @@ outputs:
           - get_attr: [IronicConductorBase, role_data, config_settings]
           # to avoid hard linking errors we store these on the same
           # volume/device as the ironic master_path
+          # https://github.com/docker/docker/issues/7457
           - ironic::drivers::pxe::tftp_root: /var/lib/ironic/tftpboot
           - ironic::drivers::pxe::tftp_master_path: /var/lib/ironic/tftpboot/master_images
           - ironic::pxe::tftp_root: /var/lib/ironic/tftpboot
@@ -59,10 +73,6 @@ outputs:
         get_attr: [IronicConductorBase, role_data, step_config]
       service_config_settings: {get_attr: [IronicConductorBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &ironic_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
       puppet_config:
         config_volume: ironic
         puppet_tags: ironic_config
@@ -72,43 +82,77 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/ironic_conductor.json:
-           command: /usr/bin/ironic-conductor
-           config_files:
-           - dest: /etc/ironic/ironic.conf
-             owner: ironic
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
-           permissions:
-           - path: /var/lib/ironic/httpboot
-             owner: ironic:ironic
-             recurse: true
-           - path: /var/lib/ironic/tftpboot
-             owner: ironic:ironic
-             recurse: true
+        /var/lib/kolla/config_files/ironic_conductor.json:
+          command: /usr/bin/ironic-conductor
+          permissions:
+            - path: /var/lib/ironic
+              owner: ironic:ironic
+              recurse: true
+            - path: /var/log/ironic
+              owner: ironic:ironic
+              recurse: true
       docker_config:
         step_4:
-          ironic-init-dirs:
-            image: *ironic_image
-            user: root
-            command: ['/bin/bash', '-c', 'mkdir /var/lib/ironic/httpboot && mkdir /var/lib/ironic/tftpboot']
-            volumes:
-              - ironic:/var/lib/ironic
           ironic_conductor:
             start_order: 80
-            image: *ironic_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
             net: host
             privileged: true
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /lib/modules:/lib/modules:ro
-              - /sys:/sys
-              - /dev:/dev
-              - /run:/run #shared?
-              - ironic:/var/lib/ironic
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /sys:/sys
+                  - /dev:/dev
+                  - /run:/run #shared?
+                  - /var/lib/ironic:/var/lib/ironic
+                  - /var/log/containers/ironic:/var/log/ironic
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/ironic
+            - /var/lib/ironic
+        - name: stat /httpboot
+          stat: path=/httpboot
+          register: stat_httpboot
+        - name: stat /tftpboot
+          stat: path=/tftpboot
+          register: stat_tftpboot
+        - name: stat /var/lib/ironic/httpboot
+          stat: path=/var/lib/ironic/httpboot
+          register: stat_ironic_httpboot
+        - name: stat /var/lib/ironic/tftpboot
+          stat: path=/var/lib/ironic/tftpboot
+          register: stat_ironic_tftpboot
+        # cannot use 'copy' module as with 'remote_src' it doesn't support recursion
+        - name: migrate /httpboot to containerized (if applicable)
+          command: /bin/cp -R /httpboot /var/lib/ironic/httpboot
+          when: stat_httpboot.stat.exists and not stat_ironic_httpboot.stat.exists
+        - name: migrate /tftpboot to containerized (if applicable)
+          command: /bin/cp -R /tftpboot /var/lib/ironic/tftpboot
+          when: stat_tftpboot.stat.exists and not stat_ironic_tftpboot.stat.exists
+        # Even if there was nothing to copy from original locations,
+        # we need to create the dirs before starting the containers
+        - name: ensure ironic pxe directories exist
+          file:
+            path: /var/lib/ironic/{{ item }}
+            state: directory
+          with_items:
+            - httpboot
+            - tftpboot
+      upgrade_tasks:
+        - name: Stop and disable ironic_conductor service
+          tags: step2
+          service: name=openstack-ironic-conductor state=stopped enabled=no
index 2550519..bc828e6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Ironic PXE service
@@ -30,6 +30,19 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
 
 outputs:
   role_data:
@@ -40,10 +53,6 @@ outputs:
       step_config: &step_config ''
       service_config_settings: {}
       # BEGIN DOCKER SETTINGS
-      docker_image: &ironic_pxe_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ]
       puppet_config:
         config_volume: ironic
         puppet_tags: ironic_config
@@ -53,67 +62,43 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/ironic_pxe_http.json:
-           command: /usr/sbin/httpd -DFOREGROUND
-           config_files:
-           - dest: /etc/ironic/ironic.conf
-             owner: ironic
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
-           - dest: /etc/httpd/conf.d/10-ipxe_vhost.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-ipxe_vhost.conf
-           - dest: /etc/httpd/conf/httpd.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
-           - dest: /etc/httpd/conf/ports.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
-         /var/lib/kolla/config_files/ironic_pxe_tftp.json:
-           command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
-           config_files:
-           - dest: /etc/ironic/ironic.conf
-             owner: ironic
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
-           - dest: /var/lib/ironic/tftpboot/chain.c32
-             owner: ironic
-             perm: '0744'
-             source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/chain.c32
-           - dest: /var/lib/ironic/tftpboot/pxelinux.0
-             owner: ironic
-             perm: '0744'
-             source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/pxelinux.0
-           - dest: /var/lib/ironic/tftpboot/ipxe.efi
-             owner: ironic
-             perm: '0744'
-             source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/ipxe.efi
-           - dest: /var/lib/ironic/tftpboot/undionly.kpxe
-             owner: ironic
-             perm: '0744'
-             source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/undionly.kpxe
-           - dest: /var/lib/ironic/tftpboot/map-file
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/map-file
+        /var/lib/kolla/config_files/ironic_pxe_http.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+        /var/lib/kolla/config_files/ironic_pxe_tftp.json:
+          command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
+          permissions:
+            - path: /var/log/ironic
+              owner: ironic:ironic
+              recurse: true
       docker_config:
         step_4:
           ironic_pxe_tftp:
             start_order: 90
-            image: *ironic_pxe_image
+            image: &ironic_pxe_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ]
             net: host
             privileged: false
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /dev/log:/dev/log
-              - ironic:/var/lib/ironic/
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+                  # TODO(mandre) check how docker like mounting in a bind-mounted tree
+                  # This directory may contain migrated data from BM
+                  - /var/lib/ironic:/var/lib/ironic/
+                  # These files were generated by puppet inside the config container
+                  # TODO(mandre) check the mount permission (ro/rw)
+                  - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/chain.c32:/var/lib/ironic/tftpboot/chain.c32
+                  - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/pxelinux.0:/var/lib/ironic/tftpboot/pxelinux.0
+                  - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/ipxe.efi:/var/lib/ironic/tftpboot/ipxe.efi
+                  - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/undionly.kpxe:/var/lib/ironic/tftpboot/undionly.kpxe
+                  - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/map-file:/var/lib/ironic/tftpboot/map-file
+                  - /dev/log:/dev/log
+                  - /var/log/containers/ironic:/var/log/ironic
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
           ironic_pxe_http:
@@ -123,11 +108,22 @@ outputs:
             privileged: false
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/ironic/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - ironic:/var/lib/ironic/
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+                  - /var/lib/config-data/ironic/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/ironic/var/www/:/var/www/:ro
+                  - /var/lib/ironic:/var/lib/ironic/
+                  - /var/log/containers/ironic:/var/log/ironic
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/lib/ironic
+            - /var/log/containers/ironic
index f9b94a7..ca64374 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Keystone service
@@ -26,19 +26,45 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   AdminPassword:
     description: The password for the keystone admin account, used for monitoring, querying neutron etc.
     type: string
     hidden: true
+  KeystoneTokenProvider:
+    description: The keystone token format
+    type: string
+    default: 'fernet'
+    constraints:
+      - allowed_values: ['uuid', 'fernet']
+  EnableInternalTLS:
+    type: boolean
+    default: false
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   KeystoneBase:
     type: ../../puppet/services/keystone.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 outputs:
   role_data:
@@ -56,64 +82,27 @@ outputs:
             - {get_attr: [KeystoneBase, role_data, step_config]}
       service_config_settings: {get_attr: [KeystoneBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &keystone_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
       puppet_config:
         config_volume: keystone
         puppet_tags: keystone_config
         step_config: *step_config
-        config_image: *keystone_image
+        config_image: &keystone_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/keystone.json:
-           command: /usr/sbin/httpd -DFOREGROUND
-           config_files:
-           - dest: /etc/keystone/keystone.conf
-             owner: keystone
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/keystone/keystone.conf
-           - dest: /etc/keystone/credential-keys/0
-             owner: keystone
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/0
-           - dest: /etc/keystone/credential-keys/1
-             owner: keystone
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1
-           - dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_admin.conf
-           - dest: /etc/httpd/conf.d/10-keystone_wsgi_main.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_main.conf
-           - dest: /etc/httpd/conf/httpd.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
-           - dest: /etc/httpd/conf/ports.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
-           - dest: /var/www/cgi-bin/keystone/keystone-admin
-             owner: keystone
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-admin
-           - dest: /var/www/cgi-bin/keystone/keystone-public
-             owner: keystone
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-public
+        /var/lib/kolla/config_files/keystone.json:
+          command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
+        # Kolla_bootstrap/db sync runs before permissions set by kolla_config
         step_3:
-          keystone-init-log:
+          keystone_init_log:
             start_order: 0
             image: *keystone_image
             user: root
-            command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/keystone && chown keystone:keystone /var/log/keystone']
+            command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
             volumes:
-              - logs:/var/log
+              - /var/log/containers/keystone:/var/log/keystone
           keystone_db_sync:
             start_order: 1
             image: *keystone_image
@@ -121,12 +110,24 @@ outputs:
             privileged: false
             detach: false
             volumes: &keystone_volumes
-              - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/keystone/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/keystone/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - logs:/var/log
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/keystone/var/www/:/var/www/:ro
+                  - /var/lib/config-data/keystone/etc/keystone/:/etc/keystone/:ro
+                  - /var/lib/config-data/keystone/etc/httpd/:/etc/httpd/:ro
+                  - /var/log/containers/keystone:/var/log/keystone
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                      - ''
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                      - ''
             environment:
               - KOLLA_BOOTSTRAP=True
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
@@ -150,7 +151,15 @@ outputs:
           config_volume: 'keystone_init_tasks'
           puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
           step_config: 'include ::tripleo::profile::base::keystone'
-          config_image:
-            list_join:
-            - '/'
-            - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+          config_image: *keystone_image
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/keystone
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable keystone service (running under httpd)
+          tags: step2
+          service: name=httpd state=stopped enabled=no
+      metadata_settings:
+        get_attr: [KeystoneBase, role_data, metadata_settings]
index 3e51f5b..d85a087 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Memcached services
@@ -26,15 +26,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   MemcachedBase:
     type: ../../puppet/services/memcached.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -46,27 +59,42 @@ outputs:
         get_attr: [MemcachedBase, role_data, step_config]
       service_config_settings: {get_attr: [MemcachedBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &memcached_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
       puppet_config:
         config_volume: 'memcached'
         puppet_tags: 'file'
         step_config: *step_config
-        config_image: *memcached_image
+        config_image: &memcached_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
       kolla_config: {}
       docker_config:
         step_1:
+          memcached_init_logs:
+            start_order: 0
+            image: *memcached_image
+            privileged: false
+            user: root
+            volumes:
+               - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+               - /var/log/memcached.log:/var/log/memcached.log
+            command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; chown ${USER} /var/log/memcached.log']
           memcached:
+            start_order: 1
             image: *memcached_image
             net: host
             privileged: false
             restart: always
             volumes:
-              - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+            # TODO(bogdando) capture memcached syslog logs from a container
             command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      upgrade_tasks:
+        - name: Stop and disable memcached service
+          tags: step2
+          service: name=memcached state=stopped enabled=no
index 843d5b2..3b256fd 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Mistral API service
@@ -30,15 +30,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   MistralApiBase:
     type: ../../puppet/services/mistral-api.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -52,10 +65,6 @@ outputs:
         get_attr: [MistralApiBase, role_data, step_config]
       service_config_settings: {get_attr: [MistralApiBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &mistral_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
       puppet_config:
         config_volume: mistral
         puppet_tags: mistral_config
@@ -65,15 +74,26 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/mistral_api.json:
-           command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
-           config_files:
-           - dest: /etc/mistral/mistral.conf
-             owner: mistral
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+        /var/lib/kolla/config_files/mistral_api.json:
+          command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
+          permissions:
+            - path: /var/log/mistral
+              owner: mistral:mistral
+              recurse: true
       docker_config:
+        # db sync runs before permissions set by kolla_config
         step_3:
+          mistral_init_logs:
+            start_order: 0
+            image: &mistral_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/mistral:/var/log/mistral
+            command: ['/bin/bash', '-c', 'chown -R mistral:mistral /var/log/mistral']
           mistral_db_sync:
             start_order: 1
             image: *mistral_image
@@ -81,11 +101,11 @@ outputs:
             privileged: false
             detach: false
             volumes:
-              - /var/lib/config-data/mistral/etc/:/etc/:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-            environment:
-              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/mistral/etc/:/etc/:ro
+                  - /var/log/containers/mistral:/var/log/mistral
             command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head']
           mistral_db_populate:
             start_order: 2
@@ -94,11 +114,11 @@ outputs:
             privileged: false
             detach: false
             volumes:
-              - /var/lib/config-data/mistral/etc/:/etc/:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-            environment:
-              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/mistral/etc/:/etc/:ro
+                  - /var/log/containers/mistral:/var/log/mistral
             # NOTE: dprince this requires that we install openstack-tripleo-common into
             # the Mistral API image so that we get tripleo* actions
             command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate']
@@ -110,9 +130,20 @@ outputs:
             privileged: false
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+                  - /var/log/containers/mistral:/var/log/mistral
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/mistral
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable mistral_api service
+          tags: step2
+          service: name=openstack-mistral-api state=stopped enabled=no
index e50fc0a..d60d847 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Mistral Engine service
@@ -30,16 +30,29 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   MistralBase:
     type: ../../puppet/services/mistral-engine.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -53,10 +66,6 @@ outputs:
         get_attr: [MistralBase, role_data, step_config]
       service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &mistral_engine_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
       puppet_config:
         config_volume: mistral
         puppet_tags: mistral_config
@@ -66,25 +75,38 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/mistral_engine.json:
-           command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
-           config_files:
-           - dest: /etc/mistral/mistral.conf
-             owner: mistral
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+        /var/lib/kolla/config_files/mistral_engine.json:
+          command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
+          permissions:
+            - path: /var/log/mistral
+              owner: mistral:mistral
+              recurse: true
       docker_config:
         step_4:
           mistral_engine:
-            image: *mistral_engine_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
             net: host
             privileged: false
             restart: always
             volumes:
-              - /run:/run
-              - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /run:/run
+                  - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+                  - /var/log/containers/mistral:/var/log/mistral
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/mistral
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable mistral_engine service
+          tags: step2
+          service: name=openstack-mistral-engine state=stopped enabled=no
index 8833154..76ae052 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Mistral Executor service
@@ -30,16 +30,29 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   MistralBase:
     type: ../../puppet/services/mistral-executor.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -53,10 +66,6 @@ outputs:
         get_attr: [MistralBase, role_data, step_config]
       service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &mistral_executor_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
       puppet_config:
         config_volume: mistral
         puppet_tags: mistral_config
@@ -66,29 +75,42 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/mistral_executor.json:
-           command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
-           config_files:
-           - dest: /etc/mistral/mistral.conf
-             owner: mistral
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+        /var/lib/kolla/config_files/mistral_executor.json:
+          command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
+          permissions:
+            - path: /var/log/mistral
+              owner: mistral:mistral
+              recurse: true
       docker_config:
         step_4:
           mistral_executor:
-            image: *mistral_executor_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
             net: host
             privileged: false
             restart: always
             volumes:
-              - /run:/run
-              - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              # FIXME: this is required in order for Nova cells
-              # initialization workflows on the Undercloud. Need to
-              # exclude this on the overcloud for security reasons.
-              - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+                  - /run:/run
+                  # FIXME: this is required in order for Nova cells
+                  # initialization workflows on the Undercloud. Need to
+                  # exclude this on the overcloud for security reasons.
+                  - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+                  - /var/log/containers/mistral:/var/log/mistral
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/mistral
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable mistral_executor service
+          tags: step2
+          service: name=openstack-mistral-executor state=stopped enabled=no
index bf19586..748371d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Neutron API service
@@ -15,7 +15,7 @@ parameters:
   # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
     description: image
-    default: 'centos-binary-neutron-openvswitch-agent:latest'
+    default: 'centos-binary-neutron-server:latest'
     type: string
   EndpointMap:
     default: {}
@@ -31,15 +31,35 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NeutronBase:
     type: ../../puppet/services/neutron-api.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -53,10 +73,6 @@ outputs:
         get_attr: [NeutronBase, role_data, step_config]
       service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &neutron_api_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
       puppet_config:
         config_volume: neutron
         puppet_tags: neutron_config,neutron_api_config
@@ -66,20 +82,30 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/neutron_api.json:
-           command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
-           config_files:
-           - dest: /etc/neutron/neutron.conf
-             owner: neutron
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
-           - dest: /etc/neutron/plugin.ini
-             owner: neutron
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
+        /var/lib/kolla/config_files/neutron_api.json:
+          command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
+          permissions:
+            - path: /var/log/neutron
+              owner: neutron:neutron
+              recurse: true
+        /var/lib/kolla/config_files/neutron_server_tls_proxy.json:
+          command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
+        # db sync runs before permissions set by kolla_config
         step_3:
+          neutron_init_logs:
+            start_order: 0
+            image: &neutron_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/neutron:/var/log/neutron
+            command: ['/bin/bash', '-c', 'chown -R neutron:neutron /var/log/neutron']
           neutron_db_sync:
+            start_order: 1
             image: *neutron_api_image
             net: host
             privileged: false
@@ -88,21 +114,53 @@ outputs:
             # and run as neutron user
             user: root
             volumes:
-              - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
-              - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
+                  - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
+                  - /var/log/containers/neutron:/var/log/neutron
             command: ['neutron-db-manage', 'upgrade', 'heads']
         step_4:
-          neutron_api:
-            image: *neutron_api_image
-            net: host
-            privileged: false
-            restart: always
-            volumes:
-              - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-            environment:
-              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+          map_merge:
+            - neutron_api:
+                image: *neutron_api_image
+                net: host
+                privileged: false
+                restart: always
+                volumes:
+                  list_concat:
+                    - {get_attr: [ContainersCommon, volumes]}
+                    -
+                      - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
+                      - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+                      - /var/log/containers/neutron:/var/log/neutron
+                environment:
+                  - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+            - if:
+                - internal_tls_enabled
+                - neutron_server_tls_proxy:
+                    image: *neutron_api_image
+                    net: host
+                    user: root
+                    restart: always
+                    volumes:
+                      list_concat:
+                        - {get_attr: [ContainersCommon, volumes]}
+                        -
+                          - /var/lib/kolla/config_files/neutron_server_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+                          - /var/lib/config-data/neutron/etc/httpd/:/etc/httpd/:ro
+                          - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                          - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                    environment:
+                      - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+                - {}
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/neutron
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable neutron_api service
+          tags: step2
+          service: name=neutron-server state=stopped enabled=no
index 15f3055..d14f525 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Neutron DHCP service
@@ -8,14 +8,14 @@ parameters:
     description: namespace
     default: 'tripleoupstream'
     type: string
-  DockerNeutronApiImage:
+  DockerNeutronDHCPImage:
     description: image
     default: 'centos-binary-neutron-dhcp-agent:latest'
     type: string
   # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
     description: image
-    default: 'centos-binary-neutron-openvswitch-agent:latest'
+    default: 'centos-binary-neutron-server:latest'
     type: string
   EndpointMap:
     default: {}
@@ -31,15 +31,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NeutronBase:
     type: ../../puppet/services/neutron-dhcp.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -53,10 +66,6 @@ outputs:
         get_attr: [NeutronBase, role_data, step_config]
       service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &neutron_dhcp_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
       puppet_config:
         config_volume: neutron
         puppet_tags: neutron_config,neutron_dhcp_agent_config
@@ -66,31 +75,40 @@ outputs:
             - '/'
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/neutron_dhcp.json:
-           command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
-           config_files:
-           - dest: /etc/neutron/neutron.conf
-             owner: neutron
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
-           - dest: /etc/neutron/dhcp_agent.ini
-             owner: neutron
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/neutron/dhcp_agent.ini
+        /var/lib/kolla/config_files/neutron_dhcp.json:
+          command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
+          permissions:
+            - path: /var/log/neutron
+              owner: neutron:neutron
+              recurse: true
       docker_config:
         step_4:
           neutron_dhcp:
-            image: *neutron_dhcp_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNeutronDHCPImage} ]
             net: host
             pid: host
             privileged: true
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /lib/modules:/lib/modules:ro
-              - /run/:/run
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /run/:/run
+                  - /var/log/containers/neutron:/var/log/neutron
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/neutron
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable neutron_dhcp service
+          tags: step2
+          service: name=neutron-dhcp-agent state=stopped enabled=no
index c74ab4f..97901bc 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Neutron L3 agent
@@ -15,7 +15,7 @@ parameters:
   # we configure all neutron services in the same neutron
   DockerNeutronConfigImage:
     description: image
-    default: 'centos-binary-neutron-openvswitch-agent:latest'
+    default: 'centos-binary-neutron-server:latest'
     type: string
   ServiceNetMap:
     default: {}
@@ -26,6 +26,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -34,12 +42,17 @@ parameters:
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NeutronL3Base:
     type: ../../puppet/services/neutron-l3.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -47,42 +60,47 @@ outputs:
     value:
       service_name: {get_attr: [NeutronL3Base, role_data, service_name]}
       config_settings: {get_attr: [NeutronL3Base, role_data, config_settings]}
-      step_config: {get_attr: [NeutronL3Base, role_data, step_config]}
-      docker_image: &neutron_l3_agent_image
-        list_join:
-        - '/'
-        - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ]
-      puppet_tags: neutron_config,neutron_l3_agent_config
-      config_volume: neutron
-      config_image:
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+      step_config: &step_config
+        get_attr: [NeutronL3Base, role_data, step_config]
+      puppet_config:
+        puppet_tags: neutron_config,neutron_l3_agent_config
+        config_volume: neutron
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron-l3-agent.json:
-           command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
-           config_files:
-           - dest: /etc/neutron/neutron.conf
-             owner: neutron
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
-           - dest: /etc/neutron/l3_agent.ini
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/neutron/l3_agent.ini
+          command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
+          permissions:
+            - path: /var/log/neutron
+              owner: neutron:neutron
+              recurse: true
       docker_config:
         step_4:
           neutronl3agent:
-            image: *neutron_l3_agent_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ]
             net: host
             pid: host
             privileged: true
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /lib/modules:/lib/modules:ro
-              - /run:/run
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /run:/run
+                  - /var/log/containers/neutron:/var/log/neutron
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/neutron
+            state: directory
diff --git a/docker/services/neutron-metadata.yaml b/docker/services/neutron-metadata.yaml
new file mode 100644 (file)
index 0000000..88b2ca5
--- /dev/null
@@ -0,0 +1,110 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Neutron Metadata agent
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerNeutronMetadataImage:
+    description: image
+    default: 'centos-binary-neutron-metadata-agent:latest'
+    type: string
+  # we configure all neutron services in the same neutron
+  DockerNeutronConfigImage:
+    description: image
+    default: 'centos-binary-neutron-server:latest'
+    type: string
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  NeutronMetadataBase:
+    type: ../../puppet/services/neutron-metadata.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for Neutron Metadata agent
+    value:
+      service_name: {get_attr: [NeutronMetadataBase, role_data, service_name]}
+      config_settings: {get_attr: [NeutronMetadataBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [NeutronMetadataBase, role_data, step_config]
+      puppet_config:
+        puppet_tags: neutron_config,neutron_metadata_agent_config
+        config_volume: neutron
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/neutron-metadata-agent.json:
+          command: /usr/bin/neutron-metadata-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-metadata-agent
+          permissions:
+            - path: /var/log/neutron
+              owner: neutron:neutron
+              recurse: true
+      docker_config:
+        step_4:
+          neutron_metadata_agent:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNeutronMetadataImage} ]
+            net: host
+            pid: host
+            privileged: true
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/neutron-metadata-agent.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /run:/run
+                  - /var/log/containers/neutron:/var/log/neutron
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/neutron
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable neutron_metadata service
+          tags: step2
+          service: name=neutron-metadata-agent state=stopped enabled=no
index f6b2929..89bf866 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron openvswitch service
@@ -12,6 +12,10 @@ parameters:
     description: image
     default: 'centos-binary-neutron-openvswitch-agent:latest'
     type: string
+  DockerNeutronConfigImage:
+    description: image
+    default: 'centos-binary-neutron-server:latest'
+    type: string
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -21,6 +25,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,12 +41,17 @@ parameters:
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NeutronOvsAgentBase:
     type: ../../puppet/services/neutron-ovs-agent.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -44,44 +61,49 @@ outputs:
       config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]}
       step_config: &step_config
         get_attr: [NeutronOvsAgentBase, role_data, step_config]
-      docker_image: &neutron_ovs_agent_image
-        list_join:
-        - '/'
-        - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
       puppet_config:
         config_volume: neutron
         puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
         step_config: *step_config
-        config_image: *neutron_ovs_agent_image
+        config_image:
+          list_join:
+          - '/'
+          - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron-openvswitch-agent.json:
-           command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
-           config_files:
-           - dest: /etc/neutron/neutron.conf
-             owner: neutron
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
-           - dest: /etc/neutron/plugins/ml2/openvswitch_agent.ini
-             owner: neutron
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/openvswitch_agent.ini
-           - dest: /etc/neutron/plugins/ml2/ml2_conf.ini
-             owner: neutron
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
+          command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+          permissions:
+            - path: /var/log/neutron
+              owner: neutron:neutron
+              recurse: true
       docker_config:
         step_4:
           neutronovsagent:
-            image: *neutron_ovs_agent_image
+            image: &neutron_ovs_agent_image
+              list_join:
+              - '/'
+              - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
             net: host
             pid: host
             privileged: true
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /lib/modules:/lib/modules:ro
-              - /run:/run
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /run:/run
+                  - /var/log/containers/neutron:/var/log/neutron
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/neutron
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable neutron_ovs_agent service
+          tags: step2
+          service: name=neutron-openvswitch-agent state=stopped enabled=no
index 5d1a348..1739a5b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Neutron ML2 Plugin configured with Puppet
@@ -21,11 +21,19 @@ parameters:
     type: string
   DockerNeutronConfigImage:
     description: image
-    default: 'centos-binary-neutron-openvswitch-agent:latest'
+    default: 'centos-binary-neutron-server:latest'
     type: string
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
@@ -35,6 +43,8 @@ resources:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -48,14 +58,13 @@ outputs:
         get_attr: [NeutronBase, role_data, step_config]
       service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &docker_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       puppet_config:
         config_volume: 'neutron'
         puppet_tags: ''
         step_config: *step_config
-        config_image: *docker_image
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config: {}
       docker_config: {}
index 28b3a9d..d571b21 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Nova API service
@@ -12,7 +12,7 @@ parameters:
     description: image
     default: 'centos-binary-nova-api:latest'
     type: string
-  DockerNovaBaseImage:
+  DockerNovaConfigImage:
     description: image
     default: 'centos-binary-nova-base:latest'
     type: string
@@ -30,15 +30,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NovaApiBase:
     type: ../../puppet/services/nova-api.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -50,13 +63,12 @@ outputs:
           - get_attr: [NovaApiBase, role_data, config_settings]
           - apache::default_vhost: false
       step_config: &step_config
-        get_attr: [NovaApiBase, role_data, step_config]
+        list_join:
+          - "\n"
+          - - "['Nova_cell_v2'].each |String $val| { noop_resource($val) }"
+            - {get_attr: [NovaApiBase, role_data, step_config]}
       service_config_settings: {get_attr: [NovaApiBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &nova_api_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
       puppet_config:
         config_volume: nova
         puppet_tags: nova_config
@@ -64,26 +76,40 @@ outputs:
         config_image:
           list_join:
           - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/nova_api.json:
-           command: /usr/bin/nova-api
-           config_files:
-           - dest: /etc/nova/nova.conf
-             owner: nova
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+        /var/lib/kolla/config_files/nova_api.json:
+          command: /usr/bin/nova-api
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
       docker_config:
+        # db sync runs before permissions set by kolla_config
         step_3:
+          nova_init_logs:
+            start_order: 0
+            image: &nova_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/nova:/var/log/nova
+            command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
           nova_api_db_sync:
             start_order: 1
             image: *nova_api_image
             net: host
             detach: false
             volumes: &nova_api_volumes
-              - /var/lib/config-data/nova/etc/:/etc/:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+                  - /var/log/containers/nova:/var/log/nova
             command: ['/usr/bin/nova-manage', 'api_db', 'sync']
           # FIXME: we probably want to wait on the 'cell_v2 update' in order for this
           # to be capable of upgrading a baremetal setup. This is to ensure the name
@@ -128,15 +154,12 @@ outputs:
             user: nova
             privileged: true
             restart: always
-            volumes:
-              - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+            volumes: *nova_api_volumes
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+        step_5:
           nova_api_discover_hosts:
-            start_order: 3
+            start_order: 1
             image: *nova_api_image
             net: host
             detach: false
@@ -145,3 +168,12 @@ outputs:
               - '/usr/bin/nova-manage'
               - 'cell_v2'
               - 'discover_hosts'
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/nova
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable nova_api service
+          tags: step2
+          service: name=openstack-nova-api state=stopped enabled=no
index 9f4e353..4f10a1a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Nova Compute service
@@ -21,6 +21,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ parameters:
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
 
   NovaComputeBase:
     type: ../../puppet/services/nova-compute.yaml
@@ -36,36 +46,43 @@ resources:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
     description: Role data for the Nova Compute service.
     value:
       service_name: {get_attr: [NovaComputeBase, role_data, service_name]}
-      config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]}
+      config_settings:
+        map_merge:
+          - get_attr: [NovaComputeBase, role_data, config_settings]
+          # FIXME: we need to disable migration for now as the
+          # hieradata is common for all services, and this means nova
+          # and nova_placement puppet runs also try to configure
+          # libvirt, and they fail. We can remove this override when
+          # we have hieradata separation between containers.
+          - tripleo::profile::base::nova::manage_migration: false
       step_config: &step_config
         get_attr: [NovaComputeBase, role_data, step_config]
-      docker_image: &nova_compute_image
-        list_join:
-        - '/'
-        - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
       puppet_config:
         config_volume: nova_libvirt
         puppet_tags: nova_config,nova_paste_api_ini
         step_config: *step_config
-        config_image: *nova_compute_image
+        config_image: &nova_compute_image
+          list_join:
+          - '/'
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
       kolla_config:
         /var/lib/kolla/config_files/nova-compute.json:
-           command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
-           config_files:
-           - dest: /etc/nova/nova.conf
-             owner: nova
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
-           - dest: /etc/nova/rootwrap.conf
-             owner: nova
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
+          command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
+            - path: /var/lib/nova
+              owner: nova:nova
+              recurse: true
       docker_config:
         # FIXME: run discover hosts here
         step_4:
@@ -76,14 +93,30 @@ outputs:
             user: root
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
-              - /dev:/dev
-              - /etc/iscsi:/etc/iscsi
-              - /etc/localtime:/etc/localtime:ro
-              - /lib/modules:/lib/modules:ro
-              - /run:/run
-              - /var/lib/nova:/var/lib/nova
-              - libvirtd:/var/lib/libvirt
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova_libvirt/etc/nova/:/etc/nova/:ro
+                  - /dev:/dev
+                  - /etc/iscsi:/etc/iscsi
+                  - /lib/modules:/lib/modules:ro
+                  - /run:/run
+                  - /var/lib/nova:/var/lib/nova
+                  - /var/lib/libvirt:/var/lib/libvirt
+                  - /var/log/containers/nova:/var/log/nova
             environment:
              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/nova
+            - /var/lib/nova
+            - /var/lib/libvirt
+      upgrade_tasks:
+        - name: Stop and disable nova-compute service
+          tags: step2
+          service: name=openstack-nova-compute state=stopped enabled=no
index 73acd0a..131355d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Nova Conductor service
@@ -12,7 +12,7 @@ parameters:
     description: image
     default: 'centos-binary-nova-conductor:latest'
     type: string
-  DockerNovaBaseImage:
+  DockerNovaConfigImage:
     description: image
     default: 'centos-binary-nova-base:latest'
     type: string
@@ -30,16 +30,29 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NovaConductorBase:
     type: ../../puppet/services/nova-conductor.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -51,10 +64,6 @@ outputs:
         get_attr: [NovaConductorBase, role_data, step_config]
       service_config_settings: {get_attr: [NovaConductorBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &nova_conductor_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
       puppet_config:
         config_volume: nova
         puppet_tags: nova_config
@@ -62,27 +71,39 @@ outputs:
         config_image:
           list_join:
           - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/nova_conductor.json:
-           command: /usr/bin/nova-conductor
-           config_files:
-           - dest: /etc/nova/nova.conf
-             owner: nova
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+        /var/lib/kolla/config_files/nova_conductor.json:
+          command: /usr/bin/nova-conductor
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
       docker_config:
         step_4:
           nova_conductor:
-            image: *nova_conductor_image
+            image: &nova_conductor_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
             net: host
             privileged: false
             restart: always
             volumes:
-              - /run:/run
-              - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+                  - /var/log/containers/nova:/var/log/nova
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/nova
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable nova_conductor service
+          tags: step2
+          service: name=openstack-nova-conductor state=stopped enabled=no
index 5b46010..be0dd11 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Nova Ironic Compute service
@@ -12,7 +12,7 @@ parameters:
     description: image
     default: 'centos-binary-nova-compute-ironic:latest'
     type: string
-  DockerNovaBaseImage:
+  DockerNovaConfigImage:
     description: image
     default: 'centos-binary-nova-base:latest'
     type: string
@@ -25,6 +25,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -33,11 +41,17 @@ parameters:
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
 
   NovaIronicBase:
     type: ../../puppet/services/nova-ironic.yaml
     properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -47,10 +61,6 @@ outputs:
       config_settings: {get_attr: [NovaIronicBase, role_data, config_settings]}
       step_config: &step_config
         get_attr: [NovaIronicBase, role_data, step_config]
-      docker_image: &nova_ironic_image
-        list_join:
-        - '/'
-        - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
       puppet_config:
         config_volume: nova
         puppet_tags: nova_config,nova_paste_api_ini
@@ -58,34 +68,50 @@ outputs:
         config_image:
           list_join:
           - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/nova_ironic.json:
-           command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
-           config_files:
-           - dest: /etc/nova/nova.conf
-             owner: nova
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
-           - dest: /etc/nova/rootwrap.conf
-             owner: nova
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
+          command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
+            - path: /var/lib/nova
+              owner: nova:nova
+              recurse: true
       docker_config:
         step_5:
           novacompute:
-            image: *nova_ironic_image
+            image:
+              list_join:
+              - '/'
+              - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
             net: host
             privileged: true
             user: root
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/nova:/var/lib/kolla/config_files/src:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - /dev:/dev
-              - /etc/iscsi:/etc/iscsi
-              - nova_compute:/var/lib/nova/
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+                  - /run:/run
+                  - /dev:/dev
+                  - /etc/iscsi:/etc/iscsi
+                  - /var/lib/nova/:/var/lib/nova
+                  - /var/log/containers/nova:/var/log/nova
             environment:
              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/nova
+            - /var/lib/nova
+      upgrade_tasks:
+        - name: Stop and disable nova-compute service
+          tags: step2
+          service: name=openstack-nova-compute state=stopped enabled=no
index ed54f3d..ebf0da7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Libvirt Service
@@ -14,7 +14,7 @@ parameters:
     type: string
   # we configure libvirt via the nova-compute container due to coupling
   # in the puppet modules
-  DockerNovaComputeImage:
+  DockerNovaConfigImage:
     description: image
     default: 'centos-binary-nova-compute:latest'
     type: string
@@ -27,6 +27,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -35,25 +43,34 @@ parameters:
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NovaLibvirtBase:
     type: ../../puppet/services/nova-libvirt.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
     description: Role data for the Libvirt service.
     value:
       service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]}
-      config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]}
+      config_settings:
+        map_merge:
+          - get_attr: [NovaLibvirtBase, role_data, config_settings]
+          # FIXME: we need to disable migration for now as the
+          # hieradata is common for all services, and this means nova
+          # and nova_placement puppet runs also try to configure
+          # libvirt, and they fail. We can remove this override when
+          # we have hieradata separation between containers.
+          - tripleo::profile::base::nova::manage_migration: false
       step_config: &step_config
         get_attr: [NovaLibvirtBase, role_data, step_config]
-      docker_image: &libvirt_image
-        list_join:
-        - '/'
-        - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
       puppet_config:
         config_volume: nova_libvirt
         puppet_tags: nova_config
@@ -61,35 +78,53 @@ outputs:
         config_image:
           list_join:
           - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/nova-libvirt.json:
-           command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
-           config_files:
-           - dest: /etc/libvirt/libvirtd.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/libvirt/libvirtd.conf
+          command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
       docker_config:
         step_3:
           nova_libvirt:
-            image: *libvirt_image
+            image:
+              list_join:
+              - '/'
+              - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
             net: host
             pid: host
             privileged: true
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
-              - /dev:/dev
-              - /etc/localtime:/etc/localtime:ro
-              - /lib/modules:/lib/modules:ro
-              - /run:/run
-              - /sys/fs/cgroup:/sys/fs/cgroup
-              - /var/lib/nova:/var/lib/nova
-              # Needed to use host's virtlogd
-              - /var/run/libvirt:/var/run/libvirt
-              - libvirtd:/var/lib/libvirt
-              - nova_libvirt_qemu:/etc/libvirt/qemu
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova_libvirt/etc/libvirt/:/etc/libvirt/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /dev:/dev
+                  - /run:/run
+                  - /sys/fs/cgroup:/sys/fs/cgroup
+                  - /var/lib/nova:/var/lib/nova
+                  # Needed to use host's virtlogd
+                  - /var/run/libvirt:/var/run/libvirt
+                  - /var/lib/libvirt:/var/lib/libvirt
+                  - /etc/libvirt/qemu:/etc/libvirt/qemu
+                  - /var/log/containers/nova:/var/log/nova
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create libvirt persistent data directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /etc/libvirt/qemu
+            - /var/lib/libvirt
+            - /var/log/containers/nova
+      upgrade_tasks:
+        - name: Stop and disable libvirtd service
+          tags: step2
+          service: name=libvirtd state=stopped enabled=no
index 90c4c1c..e158d3b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Nova Metadata service
@@ -18,7 +18,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
-
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
@@ -28,6 +35,8 @@ resources:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -41,7 +50,6 @@ outputs:
         get_attr: [NovaMetadataBase, role_data, step_config]
       service_config_settings: {get_attr: [NovaMetadataBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: ''
       puppet_config:
         config_volume: ''
         puppet_tags: ''
index 5446979..ae4ccf6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Nova Placement API service
@@ -10,7 +10,7 @@ parameters:
     type: string
   DockerNovaPlacementImage:
     description: image
-    default: 'centos-binary-nova-placement-api'
+    default: 'centos-binary-nova-placement-api:latest'
     type: string
   EndpointMap:
     default: {}
@@ -26,15 +26,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NovaPlacementBase:
     type: ../../puppet/services/nova-placement.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -49,42 +62,21 @@ outputs:
         get_attr: [NovaPlacementBase, role_data, step_config]
       service_config_settings: {get_attr: [NovaPlacementBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &nova_placement_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
       puppet_config:
         config_volume: nova_placement
         puppet_tags: nova_config
         step_config: *step_config
-        config_image:
+        config_image: &nova_placement_image
           list_join:
           - '/'
           - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/nova_placement.json:
-           command: /usr/sbin/httpd -DFOREGROUND
-           config_files:
-           - dest: /etc/nova/nova.conf
-             owner: nova
-             perm: '0640'
-             source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
-           - dest: /etc/httpd/conf.d/10-placement_wsgi.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-placement_wsgi.conf
-           - dest: /etc/httpd/conf/httpd.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
-           - dest: /etc/httpd/conf/ports.conf
-             owner: root
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
-           - dest: /var/www/cgi-bin/nova/nova-placement-api
-             owner: nova
-             perm: '0644'
-             source: /var/lib/kolla/config_files/src/var/www/cgi-bin/nova/nova-placement-api
+        /var/lib/kolla/config_files/nova_placement.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
       docker_config:
         # start this early so it is up before computes start reporting
         step_3:
@@ -95,10 +87,22 @@ outputs:
             user: root
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/nova_placement/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/nova_placement/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova_placement/etc/nova/:/etc/nova/:ro
+                  - /var/lib/config-data/nova_placement/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/nova_placement/var/www/:/var/www/:ro
+                  - /var/log/containers/nova:/var/log/nova
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/nova
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable nova_placement service (running under httpd)
+          tags: step2
+          service: name=httpd state=stopped enabled=no
index e3955a2..6285e98 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Nova Scheduler service
@@ -12,7 +12,7 @@ parameters:
     description: image
     default: 'centos-binary-nova-scheduler:latest'
     type: string
-  DockerNovaBaseImage:
+  DockerNovaConfigImage:
     description: image
     default: 'centos-binary-nova-base:latest'
     type: string
@@ -30,15 +30,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   NovaSchedulerBase:
     type: ../../puppet/services/nova-scheduler.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -50,38 +63,47 @@ outputs:
         get_attr: [NovaSchedulerBase, role_data, step_config]
       service_config_settings: {get_attr: [NovaSchedulerBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &nova_scheduler_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ]
       puppet_config:
-      config_volume: nova
-      puppet_tags: nova_config
-      step_config: *step_config
-      config_image:
-        list_join:
-        - '/'
-        - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+        config_volume: nova
+        puppet_tags: nova_config
+        step_config: *step_config
+        config_image:
+          list_join:
+          - '/'
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
       kolla_config:
-         /var/lib/kolla/config_files/nova_scheduler.json:
-           command: /usr/bin/nova-scheduler
-           config_files:
-           - dest: /etc/nova/nova.conf
-             owner: nova
-             perm: '0600'
-             source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+        /var/lib/kolla/config_files/nova_scheduler.json:
+          command: /usr/bin/nova-scheduler
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
       docker_config:
         step_4:
           nova_scheduler:
-            image: *nova_scheduler_image
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ]
             net: host
             privileged: false
             restart: always
             volumes:
-              - /run:/run
-              - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+                  - /run:/run
+                  - /var/log/containers/nova:/var/log/nova
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/nova
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable nova_scheduler service
+          tags: step2
+          service: name=openstack-nova-scheduler state=stopped enabled=no
diff --git a/docker/services/panko-api.yaml b/docker/services/panko-api.yaml
new file mode 100644 (file)
index 0000000..46cfa5a
--- /dev/null
@@ -0,0 +1,145 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Panko service configured with docker.
+  Note, this service is deprecated in Pike release and
+  will be disabled in future releases.
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerPankoApiImage:
+    description: image
+    default: 'centos-binary-panko-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  PankoApiPuppetBase:
+    type: ../../puppet/services/panko-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Panko API role.
+    value:
+      service_name: {get_attr: [PankoApiPuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [PankoApiPuppetBase, role_data, config_settings]
+          - apache::default_vhost: false
+      step_config: &step_config
+        get_attr: [PankoApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [PankoApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: panko
+        puppet_tags: panko_api_paste_ini,panko_config
+        step_config: *step_config
+        config_image: &panko_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/panko-api.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/panko
+              owner: panko:panko
+              recurse: true
+      docker_config:
+        step_3:
+          panko-init-log:
+            start_order: 0
+            image: *panko_image
+            user: root
+            volumes:
+              - /var/log/containers/panko:/var/log/panko
+            command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
+          panko_db_sync:
+            start_order: 1
+            image: *panko_image
+            net: host
+            detach: false
+            privileged: false
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/panko/etc/panko:/etc/panko:ro
+                  - /var/log/containers/panko:/var/log/panko
+            command: /usr/bin/panko-dbsync
+        step_4:
+          panko_api:
+            start_order: 2
+            image: *panko_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/panko-api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/panko/etc/panko/:/etc/panko/:ro
+                  - /var/lib/config-data/panko/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/panko/var/www/:/var/www/:ro
+                  - /var/log/containers/panko:/var/log/panko
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                      - ''
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                      - ''
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/panko
+            state: directory
+      metadata_settings:
+        get_attr: [PankoApiPuppetBase, role_data, metadata_settings]
index aa60443..e2f8228 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Rabbitmq service
@@ -26,6 +26,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   RabbitCookie:
     type: string
     default: ''
@@ -33,69 +41,77 @@ parameters:
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   RabbitmqBase:
     type: ../../puppet/services/rabbitmq.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
     description: Role data for the Rabbitmq API role.
     value:
       service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
-      config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]}
+      # RabbitMQ plugins initialization occurs on every node
+      config_settings:
+        map_merge:
+          - {get_attr: [RabbitmqBase, role_data, config_settings]}
+          - rabbitmq::admin_enable: false
       step_config: &step_config
-        get_attr: [RabbitmqBase, role_data, step_config]
+        list_join:
+          - "\n"
+          - - "['Rabbitmq_policy', 'Rabbitmq_user'].each |String $val| { noop_resource($val) }"
+            - get_attr: [RabbitmqBase, role_data, step_config]
       service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &rabbitmq_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
       puppet_config:
         config_volume: rabbitmq
-        puppet_tags: file
         step_config: *step_config
-        config_image: *rabbitmq_image
+        config_image: &rabbitmq_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
       kolla_config:
         /var/lib/kolla/config_files/rabbitmq.json:
           command: /usr/lib/rabbitmq/bin/rabbitmq-server
-          config_files:
-          - dest: /etc/rabbitmq/rabbitmq.config
-            owner: root
-            perm: '0644'
-            source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq.config
-          - dest: /etc/rabbitmq/enabled_plugins
-            owner: root
-            perm: '0644'
-            source: /var/lib/kolla/config_files/src/etc/rabbitmq/enabled_plugins
-          - dest: /etc/rabbitmq/rabbitmq-env.conf
-            owner: root
-            perm: '0644'
-            source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq-env.conf
-          - dest: /etc/rabbitmq/rabbitmqadmin.conf
-            owner: root
-            perm: '0644'
-            source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmqadmin.conf
+          permissions:
+            - path: /var/lib/rabbitmq
+              owner: rabbitmq:rabbitmq
+              recurse: true
       docker_config:
+        # Kolla_bootstrap runs before permissions set by kolla_config
         step_1:
-          rabbitmq_bootstrap:
+          rabbitmq_init_logs:
             start_order: 0
             image: *rabbitmq_image
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/rabbitmq:/var/log/rabbitmq
+            command: ['/bin/bash', '-c', 'chown -R rabbitmq:rabbitmq /var/log/rabbitmq']
+          rabbitmq_bootstrap:
+            start_order: 1
+            image: *rabbitmq_image
             net: host
             privileged: false
             volumes:
-              - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - rabbitmq:/var/lib/rabbitmq/
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
+                  - /var/lib/rabbitmq:/var/lib/rabbitmq
+                  - /var/log/containers/rabbitmq:/var/log/rabbitmq
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
               - KOLLA_BOOTSTRAP=True
-              - 
+              -
                 list_join:
                   - '='
                   - - 'RABBITMQ_CLUSTER_COOKIE'
@@ -107,16 +123,40 @@ outputs:
                             - {get_param: RabbitCookie}
                             - {get_param: [DefaultPasswords, rabbit_cookie]}
           rabbitmq:
-            start_order: 1
+            start_order: 2
             image: *rabbitmq_image
             net: host
             privileged: false
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - rabbitmq:/var/lib/rabbitmq/
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
+                  - /var/lib/rabbitmq:/var/lib/rabbitmq
+                  - /var/log/containers/rabbitmq:/var/log/rabbitmq
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      docker_puppet_tasks:
+        # RabbitMQ users and policies initialization occurs only on single node
+        step_1:
+          config_volume: 'rabbit_init_tasks'
+          puppet_tags: 'rabbitmq_policy,rabbitmq_user'
+          step_config: 'include ::tripleo::profile::base::rabbitmq'
+          config_image: *rabbitmq_image
+          volumes:
+            - /var/lib/config-data/rabbitmq/etc/:/etc/
+            - /var/lib/rabbitmq:/var/lib/rabbitmq:ro
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/rabbitmq
+            - /var/lib/rabbitmq
+      upgrade_tasks:
+        - name: Stop and disable rabbitmq service
+          tags: step2
+          service: name=rabbitmq-server state=stopped enabled=no
index 125c446..2ad3b63 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Utility stack to convert an array of services into a set of combined
@@ -26,6 +26,14 @@ parameters:
     description: Mapping of service -> default password. Used to help
                  pass top level passwords managed by Heat into services.
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
@@ -36,6 +44,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       EndpointMap: {get_param: EndpointMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
   ServiceChain:
     type: OS::Heat::ResourceChain
@@ -46,6 +56,8 @@ resources:
         ServiceNetMap: {get_param: ServiceNetMap}
         EndpointMap: {get_param: EndpointMap}
         DefaultPasswords: {get_param: DefaultPasswords}
+        RoleName: {get_param: RoleName}
+        RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -67,7 +79,6 @@ outputs:
         {get_attr: [PuppetServices, role_data, global_config_settings]}
       step_config:
         {get_attr: [ServiceChain, role_data, step_config]}
-      docker_image: {get_attr: [ServiceChain, role_data, docker_image]}
       puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
       kolla_config:
         map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
@@ -75,3 +86,20 @@ outputs:
         {get_attr: [ServiceChain, role_data, docker_config]}
       docker_puppet_tasks:
         {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+      host_prep_tasks:
+        yaql:
+          # Note we use distinct() here to filter any identical tasks
+          expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+          data: {get_attr: [ServiceChain, role_data]}
+      upgrade_tasks:
+        yaql:
+          # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+          expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
+          data: {get_attr: [ServiceChain, role_data]}
+      upgrade_batch_tasks:
+        yaql:
+          # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+          expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
+          data: {get_attr: [ServiceChain, role_data]}
+      service_metadata_settings:
+        get_attr: [PuppetServices, role_data, service_metadata_settings]
index cdfefe2..60972f9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized swift proxy service
@@ -26,15 +26,35 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   SwiftProxyBase:
     type: ../../puppet/services/swift-proxy.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -46,34 +66,72 @@ outputs:
         get_attr: [SwiftProxyBase, role_data, step_config]
       service_config_settings: {get_attr: [SwiftProxyBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &swift_proxy_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
       puppet_config:
         config_volume: swift
         puppet_tags: swift_proxy_config
         step_config: *step_config
-        config_image: *swift_proxy_image
+        config_image: &swift_proxy_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
       kolla_config:
         /var/lib/kolla/config_files/swift_proxy.json:
           command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
+          permissions:
+            - path: /var/log/swift
+              owner: swift:swift
+              recurse: true
+        /var/lib/kolla/config_files/swift_proxy_tls_proxy.json:
+          command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
         step_4:
-          swift_proxy:
-            image: *swift_proxy_image
-            net: host
-            user: swift
-            restart: always
-            # I'm mounting /etc/swift as rw.  Are the rings written to at all during runtime?
-            volumes:
-              - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
-            environment:
-              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+          map_merge:
+            - swift_proxy:
+                image: *swift_proxy_image
+                net: host
+                user: swift
+                restart: always
+                volumes:
+                  list_concat:
+                    - {get_attr: [ContainersCommon, volumes]}
+                    -
+                      - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
+                      # FIXME I'm mounting /etc/swift as rw.  Are the rings written to
+                      # at all during runtime?
+                      - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                      - /run:/run
+                      - /srv/node:/srv/node
+                      - /dev:/dev
+                      - /var/log/containers/swift:/var/log/swift
+                environment:
+                  - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+            - if:
+                - internal_tls_enabled
+                - swift_proxy_tls_proxy:
+                    image: *swift_proxy_image
+                    net: host
+                    user: root
+                    restart: always
+                    volumes:
+                      list_concat:
+                        - {get_attr: [ContainersCommon, volumes]}
+                        -
+                          - /var/lib/kolla/config_files/swift_proxy_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+                          - /var/lib/config-data/swift/etc/httpd/:/etc/httpd/:ro
+                          - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                          - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                    environment:
+                      - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+                - {}
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/swift
+            - /srv/node
+      upgrade_tasks:
+        - name: Stop and disable swift_proxy service
+          tags: step2
+          service: name=openstack-swift-proxy state=stopped enabled=no
index 027a695..bfd445d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Swift Ringbuilder
@@ -21,6 +21,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -59,6 +67,8 @@ resources:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -70,14 +80,13 @@ outputs:
         get_attr: [SwiftRingbuilderBase, role_data, step_config]
       service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &docker_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
       puppet_config:
         config_volume: 'swift'
         puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
         step_config: *step_config
-        config_image: *docker_image
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
       kolla_config: {}
       docker_config: {}
index ab9946f..017fb12 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Swift Storage services.
@@ -32,6 +32,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -41,12 +49,17 @@ parameters:
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   SwiftStorageBase:
     type: ../../puppet/services/swift-storage.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -58,15 +71,14 @@ outputs:
         get_attr: [SwiftStorageBase, role_data, step_config]
       service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &swift_proxy_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
       puppet_config:
         config_volume: swift
         puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
         step_config: *step_config
-        config_image: *swift_proxy_image
+        config_image: &swift_proxy_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
       kolla_config:
         /var/lib/kolla/config_files/swift_account_auditor.json:
           command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf
@@ -94,96 +106,92 @@ outputs:
           command: /usr/bin/swift-object-updater /etc/swift/object-server.conf
         /var/lib/kolla/config_files/swift_object_server.json:
           command: /usr/bin/swift-object-server /etc/swift/object-server.conf
+          permissions:
+            - path: /var/log/swift
+              owner: swift:swift
+              recurse: true
       docker_config:
         step_3:
           # The puppet config sets this up but we don't have a way to mount the named
           # volume during the configuration stage.  We just need to create this
           # directory and make sure it's owned by swift.
           swift_setup_srv:
-            image:
+            image: &swift_account_image
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
             user: root
-            command: ['/bin/bash', '-c', 'mkdir /srv/node && chown swift:swift /srv/node']
+            command: ['chown', '-R', 'swift:', '/srv/node']
             volumes:
-              - swift-srv:/srv
+              - /srv/node:/srv/node
         step_4:
           swift_account_auditor:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+            image: *swift_account_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: &kolla_env
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
           swift_account_reaper:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+            image: *swift_account_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_account_replicator:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+            image: *swift_account_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_account_server:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+            image: *swift_account_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_container_auditor:
-            image:
+            image: &swift_container_image
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
@@ -191,71 +199,66 @@ outputs:
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_container_replicator:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+            image: *swift_container_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_container_updater:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+            image: *swift_container_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_container_server:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+            image: *swift_container_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_object_auditor:
-            image:
+            image: &swift_object_image
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
@@ -263,84 +266,102 @@ outputs:
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_object_expirer:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+            image: *swift_proxy_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_object_replicator:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+            image: *swift_object_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_object_updater:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+            image: *swift_object_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
           swift_object_server:
-            image:
-              list_join:
-                - '/'
-                - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+            image: *swift_object_image
             net: host
             user: swift
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
-              - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
-              - /run:/run
-              - swift-srv:/srv
-              - /dev:/dev
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/swift
+            - /srv/node
+      upgrade_tasks:
+        - name: Stop and disable swift storage services
+          tags: step2
+          service: name={{ item }} state=stopped enabled=no
+          with_items:
+            - openstack-swift-account-auditor
+            - openstack-swift-account-reaper
+            - openstack-swift-account-replicator
+            - openstack-swift-account
+            - openstack-swift-container-auditor
+            - openstack-swift-container-replicator
+            - openstack-swift-container-updater
+            - openstack-swift-container
+            - openstack-swift-object-auditor
+            - openstack-swift-object-replicator
+            - openstack-swift-object-updater
+            - openstack-swift-object
index 1704278..594df69 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack containerized Zaqar services
@@ -26,15 +26,28 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 resources:
 
+  ContainersCommon:
+    type: ./containers-common.yaml
+
   ZaqarBase:
     type: ../../puppet/services/zaqar.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -46,34 +59,23 @@ outputs:
        get_attr: [ZaqarBase, role_data, step_config]
       service_config_settings: {get_attr: [ZaqarBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
-      docker_image: &zaqar_image
-        list_join:
-          - '/'
-          - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
       puppet_config:
         config_volume: zaqar
         puppet_tags: zaqar_config
         step_config: *step_config
-        config_image: *zaqar_image
+        config_image: &zaqar_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
       kolla_config:
         /var/lib/kolla/config_files/zaqar.json:
-          command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf
-          config_files:
-          - dest: /etc/zaqar/zaqar.conf
-            owner: zaqar
-            perm: '0640'
-            source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
+          command: /usr/sbin/httpd -DFOREGROUND
         /var/lib/kolla/config_files/zaqar_websocket.json:
           command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf
-          config_files:
-          - dest: /etc/zaqar/zaqar.conf
-            owner: zaqar
-            perm: '0640'
-            source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
-          - dest: /etc/zaqar/1.conf
-            owner: zaqar
-            perm: '0640'
-            source: /var/lib/kolla/config_files/src/etc/zaqar/1.conf
+          permissions:
+            - path: /var/log/zaqar
+              owner: zaqar:zaqar
+              recurse: true
       docker_config:
         step_4:
           zaqar:
@@ -81,11 +83,18 @@ outputs:
             net: host
             privileged: false
             restart: always
+            # NOTE(mandre) kolla image changes the user to 'zaqar', we need it
+            # to be root to run httpd
+            user: root
             volumes:
-              - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
+                  - /var/lib/config-data/zaqar/var/www/:/var/www/:ro
+                  - /var/lib/config-data/zaqar/etc/httpd/:/etc/httpd/:ro
+                  - /var/log/containers/zaqar:/var/log/zaqar
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
           zaqar_websocket:
@@ -94,9 +103,22 @@ outputs:
             privileged: false
             restart: always
             volumes:
-              - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
-              - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
-              - /etc/hosts:/etc/hosts:ro
-              - /etc/localtime:/etc/localtime:ro
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
+                  - /var/lib/config-data/zaqar/var/www/:/var/www/:ro
+                  - /var/lib/config-data/zaqar/etc/httpd/:/etc/httpd/:ro
+                  - /var/log/containers/zaqar:/var/log/zaqar
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/zaqar
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable zaqar service
+          tags: step2
+          service: name=httpd state=stopped enabled=no
diff --git a/environments/cadf.yaml b/environments/cadf.yaml
new file mode 100644 (file)
index 0000000..af5c7fd
--- /dev/null
@@ -0,0 +1,2 @@
+parameter_defaults:
+  KeystoneNotificationFormat: cadf
index b9a8434..dfd1589 100644 (file)
@@ -1,7 +1,7 @@
 # A Heat environment file which can be used to enable a
 # a Cinder NetApp backend, configured via puppet
 resource_registry:
-  OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+  OS::TripleO::Services::CinderBackendNetApp: ../puppet/services/cinder-backend-netapp.yaml
 
 parameter_defaults:
   CinderEnableNetappBackend: true
diff --git a/environments/cinder-pure-config.yaml b/environments/cinder-pure-config.yaml
new file mode 100644 (file)
index 0000000..84820d1
--- /dev/null
@@ -0,0 +1,13 @@
+# A Heat environment file which can be used to enable a
+# Cinder Pure Storage FlashArray iSCSI backend, configured via puppet
+resource_registry:
+  OS::TripleO::Services::CinderBackendPure: ../puppet/services/cinder-backend-pure.yaml
+
+parameter_defaults:
+  CinderEnablePureBackend: true
+  CinderPureBackendName: 'tripleo_pure'
+  CinderPureStorageProtocol: 'iSCSI'
+  CinderPureSanIp: ''
+  CinderPureAPIToken: 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
+  CinderPureUseChap: false
+  CinderPureMultipathXfer: true
index 7780530..e40aedf 100644 (file)
@@ -3,8 +3,36 @@ resource_registry:
 
 # parameter_defaults:
 #
-## You can specify additional plugins to load using the
-## CollectdExtraPlugins key:
+## Collectd server configuration
+#   CollectdServer: collectd0.example.com
+#
+################
+#### Other config parameters, the values shown here are the defaults
+################ 
+#
+#   CollectdServerPort: 25826 
+#   CollectdSecurityLevel: None
+#
+################
+#### If CollectdSecurityLevel is set to Encrypt or Sign
+#### the following parameters are also needed
+###############
+#
+#   CollectdUsername: user
+#   CollectdPassword: password
+#
+## CollectdDefaultPlugins, These are the default plugins used by collectd 
+#
+#   CollectdDefaultPlugins:
+#     - disk
+#     - interface
+#     - load
+#     - memory
+#     - processes
+#     - tcpconns
+# 
+## Extra plugins can be enabled by the CollectdExtraPlugins parameter:
+## All the plugins availables are:
 #
 #   CollectdExtraPlugins:
 #     - disk
index 1e64f91..cca9bea 100644 (file)
@@ -8,7 +8,7 @@ resource_registry:
 
 parameter_defaults:
   ControlPlaneSubnetCidr: '24'
-  ControlPlaneDefaultRoute: 192.0.2.254
+  ControlPlaneDefaultRoute: 192.168.24.254
   InternalApiNetCidr: 10.0.0.0/24
   InternalApiAllocationPools: [{'start': '10.0.0.10', 'end': '10.0.0.200'}]
   InternalApiDefaultRoute: 10.0.0.1
@@ -17,7 +17,7 @@ parameter_defaults:
   ManagementInterfaceDefaultRoute: 10.1.0.1
   ExternalNetCidr: 10.2.0.0/24
   ExternalAllocationPools: [{'start': '10.2.0.10', 'end': '10.2.0.200'}]
-  EC2MetadataIp: 192.0.2.1  # Generally the IP of the Undercloud
+  EC2MetadataIp: 192.168.24.1  # Generally the IP of the Undercloud
   DnsServers: ["8.8.8.8","8.8.4.4"]
   VrouterPhysicalInterface: eth1
   VrouterGateway: 10.0.0.1
index 3007638..a5f0eca 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Software Config to drive os-net-config to configure multiple interfaces
index 5f6c469..d6d6f29 100644 (file)
@@ -29,6 +29,7 @@
   CountDefault: 1
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephMds
     - OS::TripleO::Services::CephMon
     - OS::TripleO::Services::CephExternal
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephClient
     - OS::TripleO::Services::CephExternal
     - OS::TripleO::Services::Timezone
 - name: BlockStorage
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::BlockStorageCinderVolume
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::SwiftStorage
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephOSD
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
 - name: ContrailController
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::ContrailConfig
     - OS::TripleO::Services::ContrailControl
     - OS::TripleO::Services::ContrailDatabase
 - name: ContrailAnalytics
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::ContrailAnalytics
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
 - name: ContrailAnalyticsDatabase
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::ContrailAnalyticsDatabase
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
 - name: ContrailTsn
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::ContrailTsn
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
diff --git a/environments/deployed-server-environment.j2.yaml b/environments/deployed-server-environment.j2.yaml
new file mode 100644 (file)
index 0000000..327934d
--- /dev/null
@@ -0,0 +1,11 @@
+resource_registry:
+  OS::TripleO::Server: ../deployed-server/deployed-server.yaml
+  OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
+  OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
+
+{% for role in roles %}
+  # Default nic config mappings
+  OS::TripleO::{{role.name}}::Net::SoftwareConfig: ../net-config-static.yaml
+{% endfor %}
+
+  OS::TripleO::ControllerDeployedServer::Net::SoftwareConfig: ../net-config-static-bridge.yaml
diff --git a/environments/deployed-server-environment.yaml b/environments/deployed-server-environment.yaml
deleted file mode 100644 (file)
index 7bc1bd9..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-resource_registry:
-  OS::TripleO::Server: ../deployed-server/deployed-server.yaml
-  OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
-  OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
index 85fa7d2..cc9ea99 100644 (file)
@@ -1,4 +1,4 @@
 resource_registry:
-  OS::TripleO::Tasks::ControllerDeployedServerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
-  OS::TripleO::Tasks::ControllerDeployedServerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerDeployedServerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerDeployedServerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
   OS::TripleO::Tasks::ControllerDeployedServerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
diff --git a/environments/docker-services-tls-everywhere.yaml b/environments/docker-services-tls-everywhere.yaml
new file mode 100644 (file)
index 0000000..e37f251
--- /dev/null
@@ -0,0 +1,52 @@
+# This environment contains the services that can work with TLS-everywhere.
+resource_registry:
+  # This can be used when you don't want to run puppet on the host,
+  # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
+  # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+  OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+  # The compute node still needs extra initialization steps
+  OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+
+  # NOTE: add roles to be docker enabled as we support them.
+  OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml
+  OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml
+  OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
+  OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
+  OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+  OS::TripleO::Services::GlanceApi: ../docker/services/glance-api.yaml
+  OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml
+  OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml
+  OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml
+  OS::TripleO::Services::HeatApi: ../docker/services/heat-api.yaml
+  OS::TripleO::Services::HeatApiCfn: ../docker/services/heat-api-cfn.yaml
+  OS::TripleO::Services::HeatEngine: ../docker/services/heat-engine.yaml
+  OS::TripleO::Services::Keystone: ../docker/services/keystone.yaml
+  OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
+  OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
+  OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
+  OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
+  OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+  OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
+  OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
+  OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
+  OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+  OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
+
+  OS::TripleO::PostDeploySteps: ../docker/post.yaml
+  OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
+
+  OS::TripleO::Services: ../docker/services/services.yaml
+
+parameter_defaults:
+  # Defaults to 'tripleoupstream'.  Specify a local docker registry
+  # Example: 192.168.24.1:8787/tripleoupstream
+  DockerNamespace: tripleoupstream
+  DockerNamespaceIsRegistry: false
+
+  ComputeServices:
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+    - OS::TripleO::Services::ComputeNeutronOvsAgent
+    - OS::TripleO::Services::Docker
index cb13c5c..94b4bcd 100644 (file)
@@ -1,5 +1,10 @@
 resource_registry:
-  OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+  # This can be used when you don't want to run puppet on the host,
+  # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
+  # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+  OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+  # The compute node still needs extra initialization steps
+  OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
 
   #NOTE (dprince) add roles to be docker enabled as we support them
   OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
@@ -14,30 +19,36 @@ resource_registry:
   OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
   OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
   OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
-  # FIXME: these need to go into a environments/services-docker dir?
-  OS::TripleO::Services::NovaIronic: ../docker/services/nova-ironic.yaml
-  OS::TripleO::Services::IronicApi: ../docker/services/ironic-api.yaml
-  OS::TripleO::Services::IronicConductor: ../docker/services/ironic-conductor.yaml
-  OS::TripleO::Services::IronicPxe: ../docker/services/ironic-pxe.yaml
   OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
   OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
   OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
+  OS::TripleO::Services::NeutronMetadataAgent: ../docker/services/neutron-metadata.yaml
   OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
   OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
   OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
   OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
-  OS::TripleO::Services::MistralApi: ../docker/services/mistral-api.yaml
-  OS::TripleO::Services::MistralEngine: ../docker/services/mistral-engine.yaml
-  OS::TripleO::Services::MistralExecutor: ../docker/services/mistral-executor.yaml
-  OS::TripleO::Services::Zaqar: ../docker/services/zaqar.yaml
   OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
   OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
+  OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
   OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml
   OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
   OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
   OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+  OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml
+  OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml
+  OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml
+  OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml
+  OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml
+  OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
+  OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
+  OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
+  OS::TripleO::Services::CeilometerAgentCentral: ../docker/services/ceilometer-agent-central.yaml
+  OS::TripleO::Services::CeilometerAgentCompute: ../docker/services/ceilometer-agent-compute.yaml
+  OS::TripleO::Services::CeilometerAgentNotification: ../docker/services/ceilometer-agent-notification.yaml
 
   OS::TripleO::PostDeploySteps: ../docker/post.yaml
+  OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
+
   OS::TripleO::Services: ../docker/services/services.yaml
 
 parameter_defaults:
@@ -50,3 +61,5 @@ parameter_defaults:
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
     - OS::TripleO::Services::ComputeNeutronOvsAgent
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::CeilometerAgentCompute
index ff4ecfb..2fdecb4 100644 (file)
@@ -2,15 +2,17 @@
 # a TLS for in the internal network via certmonger
 parameter_defaults:
   EnableInternalTLS: true
+  RabbitClientUseSSL: true
 
   # Required for novajoin to enroll the overcloud nodes
   ServerMetadata:
     ipa_enroll: True
 
 resource_registry:
+  OS::TripleO::Services::CertmongerUser: ../puppet/services/certmonger-user.yaml
+
   OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
-  OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml
-  OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml
+
   # We use apache as a TLS proxy
   OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml
 
index fbd1fb9..c8375fc 100644 (file)
@@ -1,4 +1,9 @@
 resource_registry:
+  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip_v6.yaml
   OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool_v6.yaml
   OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool_v6.yaml
   OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool_v6.yaml
@@ -13,7 +18,7 @@ parameter_defaults:
   # to control your VIPs (currently one per network)
   # NOTE: we will eventually move to one VIP per service
   #
-  ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+  ControlFixedIPs: [{'ip_address':'192.168.24.251'}]
   PublicVirtualFixedIPs: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
   InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
   StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:0005'}]
index 1759c04..33f145d 100644 (file)
@@ -1,4 +1,9 @@
 resource_registry:
+  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
   OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
   OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
   OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
@@ -12,7 +17,7 @@ parameter_defaults:
   # to control your VIPs (currently one per network)
   # NOTE: we will eventually move to one VIP per service
   #
-  ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+  ControlFixedIPs: [{'ip_address':'192.168.24.251'}]
   PublicVirtualFixedIPs: [{'ip_address':'10.0.0.251'}]
   InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.251'}]
   StorageVirtualFixedIPs: [{'ip_address':'172.16.1.251'}]
diff --git a/environments/fixed-ip-vips-v6.yaml b/environments/fixed-ip-vips-v6.yaml
new file mode 100644 (file)
index 0000000..c288d7b
--- /dev/null
@@ -0,0 +1,21 @@
+# This template allows the IPs to be preselected for each VIP. Note that
+# this template should be included after other templates which affect the
+# network such as network-isolation.yaml.
+
+resource_registry:
+  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+parameter_defaults:
+  # Set the IP addresses of the VIPs here.
+  # NOTE: we will eventually move to one VIP per service
+  #
+  ControlFixedIPs: [{'ip_address':'192.168.24.240'}]
+  PublicVirtualFixedIps: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
+  InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
+  StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:000'}]
+  StorageMgmtVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:4000:0000:0000:0000:0005'}]
+  RedisVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0006'}]
diff --git a/environments/fixed-ip-vips.yaml b/environments/fixed-ip-vips.yaml
new file mode 100644 (file)
index 0000000..3860f41
--- /dev/null
@@ -0,0 +1,21 @@
+# This template allows the IPs to be preselected for each VIP. Note that
+# this template should be included after other templates which affect the
+# network such as network-isolation.yaml.
+
+resource_registry:
+  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+parameter_defaults:
+  # Set the IP addresses of the VIPs here.
+  # NOTE: we will eventually move to one VIP per service
+  #
+  ControlFixedIPs: [{'ip_address':'192.168.24.240'}]
+  PublicVirtualFixedIps: [{'ip_address':'10.0.0.240'}]
+  InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.240'}]
+  StorageVirtualFixedIPs: [{'ip_address':'172.16.1.240'}]
+  StorageMgmtVirtualFixedIPs: [{'ip_address':'172.16.3.240'}]
+  RedisVirtualFixedIPs: [{'ip_address':'172.16.2.241'}]
index f59b041..6fd7101 100644 (file)
@@ -6,12 +6,14 @@ resource_registry:
 parameter_defaults:
   ComputeServices:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephClient
     - OS::TripleO::Services::CephExternal
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
     - OS::TripleO::Services::Kernel
@@ -30,4 +32,6 @@ parameter_defaults:
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::CephOSD
     - OS::TripleO::Services::Vpp
+    - OS::TripleO::Services::NeutronVppAgent
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Docker
index c583ca7..ae8bd7b 100644 (file)
@@ -18,7 +18,7 @@ resource_registry:
 ## (note the use of port 24284 for ssl connections)
 #
 # LoggingServers:
-#   - host: 192.0.2.11
+#   - host: 192.168.24.11
 #     port: 24284
 # LoggingUsesSSL: true
 # LoggingSharedKey: secret
diff --git a/environments/major-upgrade-all-in-one.yaml b/environments/major-upgrade-all-in-one.yaml
deleted file mode 100644 (file)
index 4283b21..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-resource_registry:
-  OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
diff --git a/environments/major-upgrade-aodh-migration.yaml b/environments/major-upgrade-aodh-migration.yaml
deleted file mode 100644 (file)
index 9d6ce73..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-resource_registry:
-  # aodh data migration
-  OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
-
-  # no-op the rest
-  OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml b/environments/major-upgrade-ceilometer-wsgi-mitaka-newton.yaml
deleted file mode 100644 (file)
index 6798c25..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-resource_registry:
-
-  # This initiates the upgrades for ceilometer api to run under apache wsgi
-  OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
-
-  # no-op the rest
-  OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-composable-steps-docker.yaml b/environments/major-upgrade-composable-steps-docker.yaml
new file mode 100644 (file)
index 0000000..24eedf8
--- /dev/null
@@ -0,0 +1,12 @@
+resource_registry:
+  # FIXME(shardy) do we need to break major_upgrade_steps.yaml apart to
+  # enable docker specific logic, or is just overridding PostUpgradeSteps
+  # enough (as we want to share the ansible tasks steps etc)
+  OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
+parameter_defaults:
+  EnableConfigPurge: false
+  StackUpdateType: UPGRADE
+  UpgradeLevelNovaCompute: auto
+  UpgradeInitCommonCommand: |
+    #!/bin/bash
+    # Ocata to Pike, put any needed host-level workarounds here
index 9ecc225..5a69517 100644 (file)
@@ -1,13 +1,14 @@
 resource_registry:
   OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
 parameter_defaults:
+  EnableConfigPurge: true
+  StackUpdateType: UPGRADE
   UpgradeLevelNovaCompute: auto
   UpgradeInitCommonCommand: |
     #!/bin/bash
     # Newton to Ocata, we need to remove old hiera hook data and
     # install ansible heat agents and ansible-pacemaker
     set -eu
-    yum install -y openstack-heat-agents
     yum install -y python-heat-agent-*
     yum install -y ansible-pacemaker
     rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml
diff --git a/environments/major-upgrade-converge-docker.yaml b/environments/major-upgrade-converge-docker.yaml
new file mode 100644 (file)
index 0000000..163d1de
--- /dev/null
@@ -0,0 +1,10 @@
+# Use this to reset any mappings only used for upgrades after the
+# update of all nodes is completed
+resource_registry:
+  OS::TripleO::PostDeploySteps: ../docker/post.yaml
+parameter_defaults:
+  EnableConfigPurge: false
+  StackUpdateType: ''
+  UpgradeLevelNovaCompute: ''
+  UpgradeInitCommonCommand: ''
+  UpgradeInitCommand: ''
index f09fb20..d222fb8 100644 (file)
@@ -3,5 +3,8 @@
 resource_registry:
   OS::TripleO::PostDeploySteps: ../puppet/post.yaml
 parameter_defaults:
+  EnableConfigPurge: false
+  StackUpdateType: ''
   UpgradeLevelNovaCompute: ''
   UpgradeInitCommonCommand: ''
+  UpgradeInitCommand: ''
diff --git a/environments/major-upgrade-pacemaker-converge.yaml b/environments/major-upgrade-pacemaker-converge.yaml
deleted file mode 100644 (file)
index e9a5f9b..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-parameter_defaults:
-  UpgradeLevelNovaCompute: ''
-
-resource_registry:
-  OS::TripleO::Services::SaharaApi: ../puppet/services/sahara-api.yaml
-  OS::TripleO::Services::SaharaEngine: ../puppet/services/sahara-engine.yaml
diff --git a/environments/major-upgrade-pacemaker-init.yaml b/environments/major-upgrade-pacemaker-init.yaml
deleted file mode 100644 (file)
index f4f361d..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-parameter_defaults:
-  UpgradeLevelNovaCompute: mitaka
-
-resource_registry:
-  OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
-  OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-pacemaker.yaml b/environments/major-upgrade-pacemaker.yaml
deleted file mode 100644 (file)
index 9fb51a4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-parameter_defaults:
-  UpgradeLevelNovaCompute: mitaka
-
-resource_registry:
-  OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml
-  OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-remove-sahara.yaml b/environments/major-upgrade-remove-sahara.yaml
deleted file mode 100644 (file)
index e0aaf13..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-parameter_defaults:
-  KeepSaharaServicesOnUpgrade: false
-resource_registry:
-  OS::TripleO::Services::SaharaApi: OS::Heat::None
-  OS::TripleO::Services::SaharaEngine: OS::Heat::None
-
index 5632d8d..0cc8fb7 100644 (file)
@@ -14,4 +14,4 @@ parameter_defaults:
   ManilaCephFSNativeCephFSConfPath: '/etc/ceph/ceph.conf'
   ManilaCephFSNativeCephFSAuthId: 'manila'
   ManilaCephFSNativeCephFSClusterName: 'ceph'
-  ManilaCephFSNativeCephFSEnableSnapshots: true
+  ManilaCephFSNativeCephFSEnableSnapshots: false
index 210b6b0..3de5dba 100644 (file)
@@ -18,8 +18,8 @@ parameter_defaults:
   # CIDR subnet mask length for provisioning network
   ControlPlaneSubnetCidr: '24'
   # Gateway router for the provisioning network (or Undercloud IP)
-  ControlPlaneDefaultRoute: 192.0.2.254
-  EC2MetadataIp: 192.0.2.1  # Generally the IP of the Undercloud
+  ControlPlaneDefaultRoute: 192.168.24.254
+  EC2MetadataIp: 192.168.24.1  # Generally the IP of the Undercloud
   # Customize the IP subnets to match the local environment
   InternalApiNetCidr: 172.17.0.0/24
   StorageNetCidr: 172.18.0.0/24
diff --git a/environments/neutron-bgpvpn.yaml b/environments/neutron-bgpvpn.yaml
new file mode 100644 (file)
index 0000000..2a63248
--- /dev/null
@@ -0,0 +1,16 @@
+# A Heat environment file that can be used to deploy Neutron BGPVPN service
+#
+# Currently there are four types of service provider for Neutron BGPVPN
+# The default option is a dummy driver that allows to enable the API.
+# In order to enable other backend, replace the content of BgpvpnServiceProvider
+#
+# - Bagpipe: BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe.BaGPipeBGPVPNDriver:default
+# - OpenContrail: BGPVPN:OpenContrail:networking_bgpvpn.neutron.services.service_drivers.opencontrail.opencontrail.OpenContrailBGPVPNDriver:default
+# - OpenDaylight: BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default
+# - Nuage: BGPVPN:Nuage:nuage_neutron.bgpvpn.services.service_drivers.driver.NuageBGPVPNDriver:default
+resource_registry:
+  OS::TripleO::Services::NeutronBgpVpnApi: ../puppet/services/neutron-bgpvpn-api.yaml
+
+parameter_defaults:
+  NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+  BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
diff --git a/environments/neutron-l2gw.yaml b/environments/neutron-l2gw.yaml
new file mode 100644 (file)
index 0000000..bba0968
--- /dev/null
@@ -0,0 +1,27 @@
+# A Heat environment file that can be used to deploy Neutron L2 Gateway service
+#
+# Currently there are only two service provider for Neutron L2 Gateway
+# The default option is a dummy driver that allows to enable the API.
+# In order to enable other backend, replace the content of L2gwServiceProvider
+#
+# - L2 gateway agent: L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.rpc_l2gw.L2gwRpcDriver:default
+# - OpenDaylight: L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
+resource_registry:
+  OS::TripleO::Services::NeutronL2gwApi: ../puppet/services/neutron-l2gw-api.yaml
+  OS::TripleO::Services::NeutronL2gwAgent: ../puppet/services/neutron-l2gw-agent.yaml
+
+parameter_defaults:
+  NeutronServicePlugins: "networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
+  L2gwServiceProvider: ['L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default']
+
+  # Optional
+  # L2gwServiceDefaultInterfaceName: "FortyGigE1/0/1"
+  # L2gwServiceDefaultDeviceName: "Switch1"
+  # L2gwServiceQuotaL2Gateway: 10
+  # L2gwServicePeriodicMonitoringInterval: 5
+  # L2gwAgentOvsdbHosts: ["ovsdb1:127.0.0.1:6632"]
+  # L2gwAgentEnableManager: False
+  # L2gwAgentManagerTableListeningPort: "6633"
+  # L2gwAgentPeriodicInterval: 20
+  # L2gwAgentMaxConnectionRetries: 10
+  # L2gwAgentSocketTimeout: 30
index 750d3c4..8a4a144 100644 (file)
@@ -3,12 +3,17 @@
 resource_registry:
   OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
   OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+  OS::TripleO::NeutronBigswitchAgent: ../puppet/services/neutron-bigswitch-agent.yaml
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
 
 parameter_defaults:
   # Required to fill in:
   NeutronBigswitchRestproxyServers:
   NeutronBigswitchRestproxyServerAuth:
-  NeutronMechanismDrivers: bsn_ml2
+  NeutronMechanismDrivers: openvswitch,bsn_ml2
+  NeutronServicePlugins: bsn_l3,bsn_service_plugin
+  KeystoneNotificationDriver: messaging
 
   # Optional:
   # NeutronBigswitchRestproxyAutoSyncOnFailure:
@@ -19,3 +24,9 @@ parameter_defaults:
   # NeutronBigswitchAgentEnabled:
   # NeutronBigswitchLLDPEnabled:
 
+  ControllerExtraConfig:
+    neutron::agents::l3::enabled: false
+    neutron::agents::dhcp::enable_force_metadata: true
+    neutron::agents::dhcp::enable_isolated_metadata: true
+    neutron::agents::dhcp::enable_metadata_network: false
+    neutron::server::l3_ha: false
index 651e956..8d46e1c 100644 (file)
@@ -5,7 +5,7 @@ resource_registry:
   OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
 
 parameter_defaults:
-  N1000vVSMIP: '192.0.2.50'
-  N1000vMgmtGatewayIP: '192.0.2.1'
+  N1000vVSMIP: '192.168.24.50'
+  N1000vMgmtGatewayIP: '192.168.24.1'
   N1000vVSMDomainID: '100'
   N1000vVSMHostMgmtIntf: 'br-ex'
index ad11175..f5a0a39 100644 (file)
@@ -2,6 +2,8 @@
 # a Cisco Neutron plugin.
 resource_registry:
   OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
 
 parameter_defaults:
   NetworkUCSMIp: '127.0.0.1'
diff --git a/environments/neutron-ml2-vpp.yaml b/environments/neutron-ml2-vpp.yaml
new file mode 100644 (file)
index 0000000..1dec395
--- /dev/null
@@ -0,0 +1,22 @@
+# Environment file used to enable networking-vpp ML2 mechanism driver
+
+resource_registry:
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronVppAgent: ../puppet/services/neutron-vpp-agent.yaml
+  OS::TripleO::Services::Etcd: ../puppet/services/etcd.yaml
+  OS::TripleO::Services::Vpp: ../puppet/services/vpp.yaml
+
+parameter_defaults:
+  #Comma delimited list of <physical_network>:<VPP Interface>.
+  #Example: "datacentre:GigabitEthernet2/2/0"
+  #NeutronVPPAgentPhysnets: ""
+
+  NeutronMechanismDrivers: vpp
+  NeutronNetworkType: vlan
+  NeutronServicePlugins: router
+  NeutronTypeDrivers: vlan,flat
+  ExtraConfig:
+    # Use Linux Bridge driver for DHCP and L3 agent.
+    neutron::agents::dhcp::interface_driver: "neutron.agent.linux.interface.BridgeInterfaceDriver"
+    neutron::agents::l3::interface_driver: "neutron.agent.linux.interface.BridgeInterfaceDriver"
diff --git a/environments/neutron-nsx.yaml b/environments/neutron-nsx.yaml
new file mode 100644 (file)
index 0000000..eb1dcec
--- /dev/null
@@ -0,0 +1,15 @@
+# A Heat environment that can be used to deploy NSX Services
+# extensions, configured via puppet
+resource_registry:
+  # NSX doesn't require dhcp, l3, metadata, and ovs agents
+  OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+  OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+  # Override the Neutron core plugin to use NSX
+  OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginNSX
+  OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+
+parameter_defaults:
+  NeutronCorePlugin: vmware_nsx.plugin.NsxV3Plugin
index 7489924..601554a 100644 (file)
@@ -10,7 +10,6 @@ resource_registry:
   OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-nuage.yaml
 
 parameter_defaults:
-  NeutronNuageOSControllerIp: '0.0.0.0'
   NeutronNuageNetPartitionName: 'default_name'
   NeutronNuageVSDIp: '0.0.0.0:0'
   NeutronNuageVSDUsername: 'username'
index ed7292b..4644725 100644 (file)
@@ -3,6 +3,7 @@ resource_registry:
   OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+  OS::TripleO::Services::NeutronCorePlugin: ../puppet/services/neutron-plugin-ml2-odl.yaml
   OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
   OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
   OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
diff --git a/environments/nova-api-policy.yaml b/environments/nova-api-policy.yaml
new file mode 100644 (file)
index 0000000..681bd01
--- /dev/null
@@ -0,0 +1,10 @@
+# A Heat environment file which can be used to configure access policies for
+# Nova API resources. It is here for example and doesn't cover all services
+# but just Nova here.
+# While recipes for editing policy.json files is supported, modifying the
+# policy can have unexpected side effects and is not encouraged.
+
+parameter_defaults:
+  # The target is "compute:get_all", the "list all instances" API of the Compute service.
+  # The rule is an empty string meaning "always". This policy allows anybody to list instances.
+  NovaApiPolicies: { nova-context_is_admin: { key: 'compute:get_all', value: '' } }
index da607a7..21a51f6 100644 (file)
@@ -1,8 +1,8 @@
 # An environment which enables configuration of an
 # Overcloud controller with Pacemaker.
 resource_registry:
-  OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
-  OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
   OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
 
   # custom pacemaker services
diff --git a/environments/securetty.yaml b/environments/securetty.yaml
new file mode 100644 (file)
index 0000000..cdadf37
--- /dev/null
@@ -0,0 +1,12 @@
+resource_registry:
+  OS::TripleO::Services::Securetty: ../puppet/services/securetty.yaml
+
+parameter_defaults:
+  TtyValues:
+    - console
+    - tty1
+    - tty2
+    - tty3
+    - tty4
+    - tty5
+    - tty6
diff --git a/environments/services-docker/etcd.yaml b/environments/services-docker/etcd.yaml
new file mode 100644 (file)
index 0000000..c4201cf
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::Etcd: ../../docker/services/etcd.yaml
diff --git a/environments/services-docker/ironic.yaml b/environments/services-docker/ironic.yaml
new file mode 100644 (file)
index 0000000..e927ecb
--- /dev/null
@@ -0,0 +1,5 @@
+resource_registry:
+  OS::TripleO::Services::IronicApi: ../../docker/services/ironic-api.yaml
+  OS::TripleO::Services::IronicConductor: ../../docker/services/ironic-conductor.yaml
+  OS::TripleO::Services::IronicPxe: ../../docker/services/ironic-pxe.yaml
+  OS::TripleO::Services::NovaIronic: ../../docker/services/nova-ironic.yaml
diff --git a/environments/services-docker/mistral.yaml b/environments/services-docker/mistral.yaml
new file mode 100644 (file)
index 0000000..a215d2a
--- /dev/null
@@ -0,0 +1,4 @@
+resource_registry:
+  OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+  OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+  OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
diff --git a/environments/services-docker/undercloud-aodh.yaml b/environments/services-docker/undercloud-aodh.yaml
new file mode 100644 (file)
index 0000000..95d4a87
--- /dev/null
@@ -0,0 +1,5 @@
+resource_registry:
+  OS::TripleO::Services::UndercloudAodhApi: ../../docker/services/aodh-api.yaml
+  OS::TripleO::Services::UndercloudAodhEvaluator: ../../docker/services/aodh-evaluator.yaml
+  OS::TripleO::Services::UndercloudAodhNotifier: ../../docker/services/aodh-notifier.yaml
+  OS::TripleO::Services::UndercloudAodhListener: ../../docker/services/aodh-listener.yaml
diff --git a/environments/services-docker/undercloud-ceilometer.yaml b/environments/services-docker/undercloud-ceilometer.yaml
new file mode 100644 (file)
index 0000000..07a61c2
--- /dev/null
@@ -0,0 +1,3 @@
+resource_registry:
+  OS::TripleO::Services::UndercloudCeilometerAgentCentral: ../../docker/services/ceilometer-agent-central.yaml
+  OS::TripleO::Services::UndercloudCeilometerAgentNotification: ../../docker/services/ceilometer-agent-notification.yaml
diff --git a/environments/services-docker/undercloud-gnocchi.yaml b/environments/services-docker/undercloud-gnocchi.yaml
new file mode 100644 (file)
index 0000000..4b898cb
--- /dev/null
@@ -0,0 +1,4 @@
+resource_registry:
+  OS::TripleO::Services::UndercloudGnocchiApi: ../../docker/services/gnocchi-api.yaml
+  OS::TripleO::Services::UndercloudGnocchiMetricd: ../../docker/services/gnocchi-metricd.yaml
+  OS::TripleO::Services::UndercloudGnocchiStatsd: ../../docker/services/gnocchi-statsd.yaml
diff --git a/environments/services-docker/undercloud-panko.yaml b/environments/services-docker/undercloud-panko.yaml
new file mode 100644 (file)
index 0000000..8384f31
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::UndercloudPankoApi: ../../docker/services/panko-api.yaml
diff --git a/environments/services-docker/zaqar.yaml b/environments/services-docker/zaqar.yaml
new file mode 100644 (file)
index 0000000..ca0b3b1
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
diff --git a/environments/services/ceilometer-api.yaml b/environments/services/ceilometer-api.yaml
new file mode 100644 (file)
index 0000000..1e37e73
--- /dev/null
@@ -0,0 +1,6 @@
+resource_registry:
+  OS::TripleO::Services::CeilometerApi: ../../puppet/services/ceilometer-api.yaml
+
+parameter_defaults:
+  CeilometerApiEndpoint: true
+
diff --git a/environments/services/ceilometer-collector.yaml b/environments/services/ceilometer-collector.yaml
new file mode 100644 (file)
index 0000000..4cc765f
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::CeilometerCollector: ../../puppet/services/ceilometer-collector.yaml
diff --git a/environments/services/ceilometer-expirer.yaml b/environments/services/ceilometer-expirer.yaml
new file mode 100644 (file)
index 0000000..d20ee25
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::CeilometerExpirer: ../../puppet/services/ceilometer-expirer.yaml
diff --git a/environments/services/disable-ceilometer-api.yaml b/environments/services/disable-ceilometer-api.yaml
deleted file mode 100644 (file)
index 94cd8d5..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-resource_registry:
-  OS::TripleO::Services::CeilometerApi: OS::Heat::None
diff --git a/environments/services/keystone_domain_specific_ldap_backend.yaml b/environments/services/keystone_domain_specific_ldap_backend.yaml
new file mode 100644 (file)
index 0000000..3cc9c7b
--- /dev/null
@@ -0,0 +1,18 @@
+# This is an example template on how to configure keystone domain specific LDAP
+# backends. This will configure a domain called tripleoldap will the attributes
+# specified.
+parameter_defaults:
+  KeystoneLDAPDomainEnable: true
+  KeystoneLDAPBackendConfigs:
+    tripleoldap:
+      url: ldap://192.168.24.251
+      user: cn=openstack,ou=Users,dc=tripleo,dc=example,dc=com
+      password: Secrete
+      suffix: dc=tripleo,dc=example,dc=com
+      user_tree_dn: ou=Users,dc=tripleo,dc=example,dc=com
+      user_filter: "(memberOf=cn=OSuser,ou=Groups,dc=tripleo,dc=example,dc=com)"
+      user_objectclass: person
+      user_id_attribute: cn
+      user_allow_create: false
+      user_allow_update: false
+      user_allow_delete: false
diff --git a/environments/services/panko.yaml b/environments/services/panko.yaml
deleted file mode 100644 (file)
index 28bf99f..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-resource_registry:
-  OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
diff --git a/environments/services/qdr.yaml b/environments/services/qdr.yaml
new file mode 100644 (file)
index 0000000..e4ad87b
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::Qdr: ../../puppet/services/qdr.yaml
index 041c099..894bf1c 100644 (file)
@@ -1,6 +1,3 @@
-resource_registry:
-  OS::TripleO::Services::Sshd: ../puppet/services/sshd.yaml
-
 parameter_defaults:
   BannerText: |
     ******************************************************************
@@ -11,3 +8,6 @@ parameter_defaults:
     * evidence of criminal activity, system personnel may provide    *
     * the evidence from such monitoring to law enforcement officials.*
     ******************************************************************
+  MessageOfTheDay: |
+    ALERT! You are entering into a secured area!
+    This service is restricted to authorized users only.
diff --git a/environments/swift-external.yaml b/environments/swift-external.yaml
new file mode 100644 (file)
index 0000000..0bf0d39
--- /dev/null
@@ -0,0 +1,12 @@
+resource_registry:
+  OS::TripleO::Services::ExternalSwiftProxy: ../puppet/services/external-swift-proxy.yaml
+  OS::TripleO::Services::SwiftProxy: OS::Heat::None
+  OS::TripleO::Services::SwiftStorage: OS::Heat::None
+  OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+
+parameter_defaults:
+  ExternalPublicUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+  ExternalInternalUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+  ExternalAdminUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+  ExternalSwiftUserTenant: 'service'
+
index 2540fbe..7a2716d 100644 (file)
@@ -11,6 +11,7 @@ parameter_defaults:
   NeutronBridgeMappings: ctlplane:br-ctlplane
   NeutronAgentExtensions: []
   NeutronFlatNetworks: '*'
+  NeutronDnsDomain: ''
   NovaSchedulerAvailableFilters: 'tripleo_common.filters.list.tripleo_filters'
   NovaSchedulerDefaultFilters: ['RetryFilter', 'TripleOCapabilitiesFilter', 'ComputeCapabilitiesFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
   NeutronDhcpAgentsPerNetwork: 2
diff --git a/environments/updates/update-from-192_0_2-subnet.yaml b/environments/updates/update-from-192_0_2-subnet.yaml
new file mode 100644 (file)
index 0000000..1813e7b
--- /dev/null
@@ -0,0 +1,3 @@
+parameter_defaults:
+  ControlPlaneDefaultRoute: 192.0.2.1
+  EC2MetadataIp: 192.0.2.1
index fcf022a..79794f9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Example extra config for cluster config
index 77d4b38..b954e72 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Example extra config for cluster config
index b6fef79..9e3713b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Template file to add a swap partition to a node.
 
index 044f817..e19fc21 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Template file to add a swap file to a node.
 
index c66e646..59b8e7f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Generates the relevant service principals for a server'
 
 parameters:
@@ -46,7 +46,7 @@ resources:
           # Filter null values and values that contain don't contain
           # 'metadata_settings', get the values from that key and get the
           # unique ones.
-          expression: list($.data.where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
+          expression: list(coalesce($.data, []).where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
           data: {get_param: RoleData}
 
   # Generates entries for nova metadata with the following format:
@@ -57,7 +57,7 @@ resources:
     properties:
       value:
         yaql:
-          expression: let(fqdns => $.data.fqdns) -> dict($.data.metadata.where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
+          expression: let(fqdns => $.data.fqdns) -> dict(coalesce($.data.metadata, []).where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
           data:
             metadata: {get_attr: [IncomingMetadataSettings, value]}
             fqdns:
@@ -72,7 +72,7 @@ resources:
     properties:
       value:
         yaql:
-          expression: dict($.data.where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
+          expression: dict(coalesce($.data, []).where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
           data: {get_attr: [IncomingMetadataSettings, value]}
 
 outputs:
index 4da54ea..4a0e06e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Extra Post Deployment Config'
 parameters:
   servers:
index 8ac7eb7..ee5a830 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Example extra config for post-deployment
index 738e263..346a1d7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Example extra config for post-deployment, this re-runs every update
index 38a9181..ff1556f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Post-deployment for the TripleO undercloud
@@ -81,13 +81,13 @@ resources:
         auth_url:
           if:
           - ssl_disabled
-          - list_join:
-            - ''
-            - - 'http://'
-              - {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
-              - ':5000/v2.0'
-          - list_join:
-            - ''
-            - - 'https://'
-              - {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]}
-              - ':13000/v2.0'
+          - make_url:
+              scheme: http
+              host: {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
+              port: 5000
+              path: /v2.0
+          - make_url:
+              scheme: https
+              host: {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]}
+              port: 13000
+              path: /v2.0
index e8316c5..96632bc 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   RHEL Registration and unregistration software deployments.
@@ -53,6 +53,12 @@ parameters:
     type: string
   rhel_reg_http_proxy_password:
     type: string
+  UpdateOnRHELRegistration:
+    type: boolean
+    default: false
+    description: |
+      When enabled, the system will perform a yum update after performing the
+      RHEL Registration process.
 
 resources:
 
@@ -134,6 +140,37 @@ resources:
       input_values:
         REG_METHOD: {get_param: rhel_reg_method}
 
+  YumUpdateConfigurationAfterRHELRegistration:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: |
+        #!/bin/bash
+        set -x
+        num_updates=$(yum list -q updates | wc -l)
+        if [ "$num_updates" -eq "0" ]; then
+           echo "No packages require updating"
+           exit 0
+        fi
+        full_command="yum -q -y update"
+        echo "Running: $full_command"
+        result=$($full_command)
+        return_code=$?
+        echo "$result"
+        echo "yum return code: $return_code"
+        exit $return_code
+
+  UpdateDeploymentAfterRHELRegistration:
+    type: OS::Heat::SoftwareDeployment
+    depends_on: RHELRegistrationDeployment
+    conditions:
+      update_requested: {get_param: UpdateOnRHELRegistration}
+    properties:
+      name: UpdateDeploymentAfterRHELRegistration
+      config: {get_resource: YumUpdateConfigurationAfterRHELRegistration}
+      server:  {get_param: server}
+      actions: ['CREATE'] # Only do this on CREATE
+
 outputs:
   deploy_stdout:
     description: Deployment reference, used to trigger puppet apply on changes
index 6f83cc4..d14ed73 100644 (file)
@@ -11,7 +11,7 @@ if [ -e $OK ] ; then
     exit 0
 fi
 
-retryCount=0
+retry_max_count=10
 opts=
 config_opts=
 attach_opts=
@@ -157,27 +157,41 @@ else
 fi
 
 function retry() {
-  if [[ $retryCount < 3 ]]; then
-    $@
-    if ! [[ $? == 0 ]]; then
-      retryCount=$(echo $retryCount + 1 | bc)
-      echo "WARN: Failed to connect when running '$@', retrying..."
-      retry $@
-    else
-      retryCount=0
+    # Inhibit -e since we want to retry without exiting..
+    set +e
+    # Retry delay (seconds)
+    retry_delay=2.0
+    retry_count=0
+    mycli="$@"
+    while [ $retry_count -lt ${retry_max_count} ]
+    do
+        echo "INFO: Sleeping ${retry_delay} ..."
+        sleep ${retry_delay}
+        echo "INFO: Executing '${mycli}' ..."
+        ${mycli}
+        if [ $? -eq 0 ]; then
+            echo "INFO: Ran '${mycli}' successfully, not retrying..."
+            break
+        else
+            echo "WARN: Failed to connect when running '${mycli}', retrying (attempt #$retry_count )..."
+            retry_count=$(echo $retry_count + 1 | bc)
+        fi
+    done
+
+    if [ $retry_count -ge ${retry_max_count} ]; then
+        echo "ERROR: Failed to connect after ${retry_max_count} attempts when running '${mycli}'"
+        exit 1
     fi
-  else
-    echo "ERROR: Failed to connect after 3 attempts when running '$@'"
-    exit 1
-  fi
+    # Re-enable -e when exiting retry()
+    set -e
 }
 
 function detect_satellite_version {
     ping_api=$REG_SAT_URL/katello/api/ping
-    if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+    if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
         echo Satellite 6 detected at $REG_SAT_URL
         satellite_version=6
-    elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+    elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
         echo Satellite 5 detected at $REG_SAT_URL
         satellite_version=5
     else
@@ -220,16 +234,15 @@ case "${REG_METHOD:-}" in
         detect_satellite_version
         if [ "$satellite_version" = "6" ]; then
             repos="$repos --enable ${satellite_repo}"
-            curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+            curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
             rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
             retry subscription-manager register $opts
             retry subscription-manager $repos
             retry yum install -y katello-agent || true # needed for errata reporting to satellite6
             katello-package-upload
-            retry subscription-manager repos --disable ${satellite_repo}
         else
             pushd /usr/share/rhn/
-            curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+            curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
             popd
             retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
         fi
index bb0b951..48ba526 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Do some configuration, then reboot - sometimes needed for early-boot
index 4ad53cb..658fea7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Do some configuration, then reboot - sometimes needed for early-boot
diff --git a/extraconfig/tasks/aodh_data_migration.sh b/extraconfig/tasks/aodh_data_migration.sh
deleted file mode 100644 (file)
index d4c2967..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# This delivers the aodh data migration script to be invoked as part of the tripleo
-# major upgrade workflow to migrate all the alarm data from mongodb to mysql.
-# This needs to run post controller node upgrades so new aodh mysql db configured and
-# running.
-#
-set -eu
-
-#Get existing mongodb connection
-MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)"
-
-# Get the aodh database string from hiera data
-MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)"
-
-#Run migration
-/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION
-
-
diff --git a/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml b/extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
deleted file mode 100644 (file)
index cf5d7a8..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-heat_template_version: ocata
-
-description: >
-  Software-config for ceilometer configuration under httpd during upgrades
-
-parameters:
-  servers:
-    type: json
-  input_values:
-    type: json
-    description: input values for the software deployments
-resources:
-  CeilometerWsgiMitakaNewtonPreUpgradeConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: puppet
-      config:
-        get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp
-
-  CeilometerWsgiMitakaNewtonUpgradeConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config:
-        list_join:
-          - ''
-          - - "#!/bin/bash\n\nset -e\n\n"
-            - get_file: pacemaker_common_functions.sh
-            - get_file: major_upgrade_pacemaker_migrations.sh
-            - "disable_standalone_ceilometer_api\n\n"
-
-  CeilometerWsgiMitakaNewtonPostUpgradeConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: |
-        #!/bin/bash
-        set -e
-        /usr/bin/systemctl reload httpd
-
-  CeilometerWsgiMitakaNewtonPreUpgradeDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
-      servers: {get_param: [servers, Controller]}
-      config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig}
-
-  CeilometerWsgiMitakaNewtonUpgradeConfigDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
-    properties:
-      name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
-      servers: {get_param: [servers, Controller]}
-      config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig}
-
-  CeilometerWsgiMitakaNewtonPostUpgradeDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
-    properties:
-      name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment
-      servers: {get_param: [servers, Controller]}
-      config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig}
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
deleted file mode 100755 (executable)
index 8bdff5e..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster()
-{
-    if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
-        echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
-        exit 1
-    fi
-}
-
-check_pcsd()
-{
-    if pcs status 2>&1 | grep -E 'Offline'; then
-        echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
-        exit 1
-    fi
-}
-
-mysql_need_update()
-{
-    # Shall we upgrade mysql data directory during the stack upgrade?
-    if [ "$mariadb_do_major_upgrade" = "auto" ]; then
-        ret=$(is_mysql_upgrade_needed)
-        if [ $ret = "1" ]; then
-            DO_MYSQL_UPGRADE=1
-        else
-            DO_MYSQL_UPGRADE=0
-        fi
-        echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
-    elif [ "$mariadb_do_major_upgrade" = "no" ]; then
-        DO_MYSQL_UPGRADE=0
-    else
-        DO_MYSQL_UPGRADE=1
-    fi
-}
-
-check_disk_for_mysql_dump()
-{
-    # Where to backup current database if mysql need to be upgraded
-    MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
-    MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
-    # Spare disk ratio for extra safety
-    MYSQL_BACKUP_SIZE_RATIO=1.2
-
-    mysql_need_update
-
-    if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
-        if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-
-            if [ -d "$MYSQL_BACKUP_DIR" ]; then
-                echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
-                exit 1
-            fi
-            mkdir "$MYSQL_BACKUP_DIR"
-            if [ $? -ne 0 ]; then
-                echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
-                exit 1
-            fi
-
-            # the /root/.my.cnf is needed because we set the mysql root
-            # password from liberty onwards
-            backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
-            # While not ideal, this step allows us to calculate exactly how much space the dump
-            # will need. Our main goal here is avoiding any chance of corruption due to disk space
-            # exhaustion
-            backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
-            database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
-            free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
-            # we need at least space for a new mysql database + dump of the existing one,
-            # times a small factor for additional safety room
-            # note: bash doesn't do floating point math or floats in if statements,
-            # so use python to apply the ratio and cast it back to integer
-            required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
-            if [ $required_space -ge $free_space ]; then
-                echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
-                exit 1
-            fi
-        fi
-    fi
-}
-
-check_python_rpm()
-{
-    # If for some reason rpm-python are missing we want to error out early enough
-    if ! rpm -q rpm-python &> /dev/null; then
-        echo_error "ERROR: upgrade cannot start without rpm-python installed"
-        exit 1
-    fi
-}
-
-check_clean_cluster()
-{
-    if pcs status | grep -q Stopped:; then
-        echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
-        exit 1
-    fi
-}
-
-check_galera_root_password()
-{
-    # BZ: 1357112
-    if [ ! -e /root/.my.cnf ]; then
-        echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
-        exit 1
-    fi
-}
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
deleted file mode 100755 (executable)
index 080831a..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-check_cluster
-check_pcsd
-if [[ -n $(is_bootstrap_node) ]]; then
-    check_clean_cluster
-fi
-check_python_rpm
-check_galera_root_password
-check_disk_for_mysql_dump
-
-# We want to disable fencing during the cluster --stop as it might fence
-# nodes where a service fails to stop, which could be fatal during an upgrade
-# procedure. So we remember the stonith state. If it was enabled we reenable it
-# at the end of this script
-if [[ -n $(is_bootstrap_node) ]]; then
-    STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
-    # We create this empty file if stonith was set to true so we can reenable stonith in step2
-    rm -f /var/tmp/stonith-true
-    if [ $STONITH_STATE == "true" ]; then
-        touch /var/tmp/stonith-true
-    fi
-    pcs property set stonith-enabled=false
-fi
-
-# Migrate to HA NG and fix up rabbitmq queues
-# We fix up the rabbitmq ha queues after the migration because it will
-# restart the rabbitmq resource. Doing it after the migration means no other
-# services will be restart as there are no other constraints
-if [[ -n $(is_bootstrap_node) ]]; then
-    migrate_full_to_ng_ha
-    rabbitmq_newton_ocata_upgrade
-fi
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
deleted file mode 100755 (executable)
index 8b90084..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_sync_timeout=1800
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
-    manage_systemd_service stop "${service%%-clone}"
-    # So the reason for not reusing check_resource_systemd is that
-    # I have observed systemctl is-active returning unknown with at least
-    # one service that was stopped (See LP 1627254)
-    timeout=600
-    tstart=$(date +%s)
-    tend=$(( $tstart + $timeout ))
-    check_interval=3
-    while (( $(date +%s) < $tend )); do
-      if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
-        echo "$service still active, sleeping $check_interval seconds."
-        sleep $check_interval
-      else
-        # we do not care if it is inactive, unknown or failed as long as it is
-        # not running
-        break
-      fi
-
-    done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
-# later
-mysql_need_update
-
-if [[ -n $(is_bootstrap_node) ]]; then
-    if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-        backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
-        mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
-        cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
-    fi
-
-    pcs resource disable redis
-    check_resource redis stopped 600
-    pcs resource disable rabbitmq
-    check_resource rabbitmq stopped 600
-    pcs resource disable galera
-    check_resource galera stopped 600
-    pcs resource disable openstack-cinder-volume
-    check_resource openstack-cinder-volume stopped 600
-    # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
-    #   https://bugzilla.redhat.com/show_bug.cgi?id=1330688
-    for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
-      pcs resource disable $vip
-      check_resource $vip stopped 60
-    done
-    pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
-    sleep 5
-    tnow=$(date +%s)
-    if (( tnow-tstart > cluster_sync_timeout )) ; then
-        echo_error "ERROR: cluster shutdown timed out"
-        exit 1
-    fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-    if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
-        echo_error "ERROR: mysql backup dir already exist"
-        exit 1
-    fi
-    mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
-yum -y install python-zaqarclient  # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-    # Scripts run via heat have no HOME variable set and this confuses
-    # mysqladmin
-    export HOME=/root
-
-    mkdir /var/lib/mysql || /bin/true
-    chown mysql:mysql /var/lib/mysql
-    chmod 0755 /var/lib/mysql
-    restorecon -R /var/lib/mysql/
-    mysql_install_db --datadir=/var/lib/mysql --user=mysql
-    chown -R mysql:mysql /var/lib/mysql/
-
-    if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
-        mysqld_safe --wsrep-new-cluster &
-        # We have a populated /root/.my.cnf with root/password here so
-        # we need to temporarily rename it because the newly created
-        # db is empty and no root password is set
-        mv /root/.my.cnf /root/.my.cnf.temporary
-        timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
-        mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
-        mv /root/.my.cnf.temporary /root/.my.cnf
-        mysqladmin -u root shutdown
-        # The import was successful so we may remove the folder
-        rm -r "$MYSQL_BACKUP_DIR"
-    fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller.  Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-    rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
-    if [ -f /var/tmp/stonith-true ]; then
-        pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
-    fi
-    rm -f /var/tmp/stonith-true
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini  --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini  --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
-
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
deleted file mode 100755 (executable)
index a3cbd94..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
-
-if [[ -n $(is_bootstrap_node) ]]; then
-    pcs cluster start --all
-
-    tstart=$(date +%s)
-    while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
-        sleep 5
-        tnow=$(date +%s)
-        if (( tnow-tstart > cluster_form_timeout )) ; then
-            echo_error "ERROR: timed out forming the cluster"
-            exit 1
-        fi
-    done
-
-    if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
-        echo_error "ERROR: timed out waiting for cluster to finish transition"
-        exit 1
-    fi
-
-    for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
-      pcs resource enable $vip
-      check_resource_pacemaker $vip started 60
-    done
-fi
-
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-
-if [[ -n $(is_bootstrap_node) ]]; then
-    tstart=$(date +%s)
-    while ! clustercheck; do
-        sleep 5
-        tnow=$(date +%s)
-        if (( tnow-tstart > galera_sync_timeout )) ; then
-            echo_error "ERROR galera sync timed out"
-            exit 1
-        fi
-    done
-
-    # Run all the db syncs
-    # TODO: check if this can be triggered in puppet and removed from here
-    ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
-    cinder-manage db sync
-    glance-manage db_sync
-    heat-manage --config-file /etc/heat/heat.conf db_sync
-    keystone-manage db_sync
-    neutron-db-manage upgrade heads
-    nova-manage db sync
-    nova-manage api_db sync
-    nova-manage db online_data_migrations
-    sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_4.sh
deleted file mode 100755 (executable)
index d2cb955..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
-start_or_enable_service redis
-check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
-
-# Swift isn't controled by pacemaker
-systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_5.sh
deleted file mode 100755 (executable)
index fa95f1f..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-if [[ -n $(is_bootstrap_node) ]]; then
-  # run gnocchi upgrade
-  gnocchi-upgrade
-fi
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_6.sh
deleted file mode 100755 (executable)
index d569084..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
-    services=${services%%openstack-sahara*}
-fi
-for service in $services; do
-    manage_systemd_service start "${service%%-clone}"
-    check_resource_systemd "${service%%-clone}" started 600
-done
diff --git a/extraconfig/tasks/major_upgrade_pacemaker.yaml b/extraconfig/tasks/major_upgrade_pacemaker.yaml
deleted file mode 100644 (file)
index 74d3be7..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-heat_template_version: ocata
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
-  servers:
-    type: json
-  input_values:
-    type: json
-    description: input values for the software deployments
-
-  UpgradeLevelNovaCompute:
-    type: string
-    description: Nova Compute upgrade level
-    default: ''
-  MySqlMajorUpgrade:
-    type: string
-    description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade
-    constraints:
-    - allowed_values: ['auto', 'yes', 'no']
-    default: 'auto'
-  KeepSaharaServicesOnUpgrade:
-    type: boolean
-    default: true
-    description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
-
-
-resources:
-  # TODO(jistr): for Mitaka->Newton upgrades and further we can use
-  # map_merge with input_values instead of feeding params into scripts
-  # via str_replace on bash snippets
-
-  ControllerPacemakerUpgradeConfig_Step1:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config:
-        list_join:
-        - ''
-        - - str_replace:
-              template: |
-                #!/bin/bash
-                upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
-              params:
-                UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
-          - str_replace:
-              template: |
-                #!/bin/bash
-                mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
-              params:
-                MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
-          - get_file: pacemaker_common_functions.sh
-          - get_file: major_upgrade_check.sh
-          - get_file: major_upgrade_pacemaker_migrations.sh
-          - get_file: major_upgrade_controller_pacemaker_1.sh
-
-  ControllerPacemakerUpgradeDeployment_Step1:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: [servers, Controller]}
-      config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
-      input_values: {get_param: input_values}
-
-  ControllerPacemakerUpgradeConfig_Step2:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config:
-        list_join:
-        - ''
-        - - str_replace:
-              template: |
-                #!/bin/bash
-                upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
-              params:
-                UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
-          - str_replace:
-              template: |
-                #!/bin/bash
-                mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
-              params:
-                MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
-          - get_file: pacemaker_common_functions.sh
-          - get_file: major_upgrade_check.sh
-          - get_file: major_upgrade_pacemaker_migrations.sh
-          - get_file: major_upgrade_controller_pacemaker_2.sh
-
-  ControllerPacemakerUpgradeDeployment_Step2:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: ControllerPacemakerUpgradeDeployment_Step1
-    properties:
-      servers:  {get_param: [servers, Controller]}
-      config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
-      input_values: {get_param: input_values}
-
-  ControllerPacemakerUpgradeConfig_Step3:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config:
-        list_join:
-        - ''
-        - - get_file: pacemaker_common_functions.sh
-          - get_file: major_upgrade_pacemaker_migrations.sh
-          - get_file: major_upgrade_controller_pacemaker_3.sh
-
-  ControllerPacemakerUpgradeDeployment_Step3:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: ControllerPacemakerUpgradeDeployment_Step2
-    properties:
-      servers:  {get_param: [servers, Controller]}
-      config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
-      input_values: {get_param: input_values}
-
-  ControllerPacemakerUpgradeConfig_Step4:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config:
-        list_join:
-        - ''
-        - - get_file: pacemaker_common_functions.sh
-          - get_file: major_upgrade_pacemaker_migrations.sh
-          - get_file: major_upgrade_controller_pacemaker_4.sh
-
-  ControllerPacemakerUpgradeDeployment_Step4:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: ControllerPacemakerUpgradeDeployment_Step3
-    properties:
-      servers:  {get_param: [servers, Controller]}
-      config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
-      input_values: {get_param: input_values}
-
-  ControllerPacemakerUpgradeConfig_Step5:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config:
-        list_join:
-        - ''
-        - - get_file: pacemaker_common_functions.sh
-          - get_file: major_upgrade_pacemaker_migrations.sh
-          - get_file: major_upgrade_controller_pacemaker_5.sh
-
-  ControllerPacemakerUpgradeDeployment_Step5:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: ControllerPacemakerUpgradeDeployment_Step4
-    properties:
-      servers:  {get_param: [servers, Controller]}
-      config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
-      input_values: {get_param: input_values}
-
-  ControllerPacemakerUpgradeConfig_Step6:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config:
-        list_join:
-        - ''
-        - - str_replace:
-              template: |
-                #!/bin/bash
-                keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
-              params:
-                KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
-          - get_file: pacemaker_common_functions.sh
-          - get_file: major_upgrade_pacemaker_migrations.sh
-          - get_file: major_upgrade_controller_pacemaker_6.sh
-
-  ControllerPacemakerUpgradeDeployment_Step6:
-    type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: ControllerPacemakerUpgradeDeployment_Step5
-    properties:
-      servers:  {get_param: [servers, Controller]}
-      config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
-      input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh b/extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
deleted file mode 100644 (file)
index ae22a1e..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/bin/bash
-
-# Special pieces of upgrade migration logic go into this
-# file. E.g. Pacemaker cluster transitions for existing deployments,
-# matching changes to overcloud_controller_pacemaker.pp (Puppet
-# handles deployment, this file handles migrations).
-#
-# This file shouldn't execute any action on its own, all logic should
-# be wrapped into bash functions. Upgrade scripts will source this
-# file and call the functions defined in this file where appropriate.
-#
-# The migration functions should be idempotent. If the migration has
-# been already applied, it should be possible to call the function
-# again without damaging the deployment or failing the upgrade.
-
-# If the major version of mysql is going to change after the major
-# upgrade, the database must be upgraded on disk to avoid failures
-# due to internal incompatibilities between major mysql versions
-# https://bugs.launchpad.net/tripleo/+bug/1587449
-# This function detects whether a database upgrade is required
-# after a mysql package upgrade. It returns 0 when no major upgrade
-# has to take place, 1 otherwise.
-function is_mysql_upgrade_needed {
-    # The name of the package which provides mysql might differ
-    # after the upgrade. Consider the generic package name, which
-    # should capture the major version change (e.g. 5.5 -> 10.1)
-    local name="mariadb"
-    local output
-    local ret
-    set +e
-    output=$(yum -q check-update $name)
-    ret=$?
-    set -e
-    if [ $ret -ne 100 ]; then
-        # no updates so we exit
-        echo "0"
-        return
-    fi
-
-    local currentepoch=$(rpm -q --qf "%{epoch}" $name)
-    local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
-    local currentrelease=$(rpm -q --qf "%{release}" $name)
-    local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
-    local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
-    local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
-    local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
-
-    # With this we trigger the dump restore/path if we change either epoch or
-    # version in the package If only the release tag changes we do not do it
-    # FIXME: we could refine this by trying to parse the mariadb version
-    # into X.Y.Z and trigger the update only if X and/or Y change.
-    output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
-    if [ "$output" != "-1" ]; then
-        echo "0"
-        return
-    fi
-    echo "1"
-}
-
-# This function returns the list of services to be migrated away from pacemaker
-# and to systemd. The reason to have these services in a separate function is because
-# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
-# and in the function to migrate the cluster from full HA to HA NG
-function services_to_migrate {
-    # The following PCMK resources the ones the we are going to delete
-    PCMK_RESOURCE_TODELETE="
-    httpd-clone
-    memcached-clone
-    mongod-clone
-    neutron-dhcp-agent-clone
-    neutron-l3-agent-clone
-    neutron-metadata-agent-clone
-    neutron-netns-cleanup-clone
-    neutron-openvswitch-agent-clone
-    neutron-ovs-cleanup-clone
-    neutron-server-clone
-    openstack-aodh-evaluator-clone
-    openstack-aodh-listener-clone
-    openstack-aodh-notifier-clone
-    openstack-ceilometer-central-clone
-    openstack-ceilometer-collector-clone
-    openstack-ceilometer-notification-clone
-    openstack-cinder-api-clone
-    openstack-cinder-scheduler-clone
-    openstack-glance-api-clone
-    openstack-gnocchi-metricd-clone
-    openstack-gnocchi-statsd-clone
-    openstack-heat-api-cfn-clone
-    openstack-heat-api-clone
-    openstack-heat-api-cloudwatch-clone
-    openstack-heat-engine-clone
-    openstack-nova-api-clone
-    openstack-nova-conductor-clone
-    openstack-nova-consoleauth-clone
-    openstack-nova-novncproxy-clone
-    openstack-nova-scheduler-clone
-    openstack-sahara-api-clone
-    openstack-sahara-engine-clone
-    "
-    echo $PCMK_RESOURCE_TODELETE
-}
-
-# This function will migrate a mitaka system where all the resources are managed
-# via pacemaker to a newton setup where only a few services will be managed by pacemaker
-# On a high-level it will operate as follows:
-# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
-#    during the conversion
-# 2. Remove all the colocation constraints and then the ordering constraints, except the
-#    ones related to haproxy/VIPs which exist in Newton as well
-# 3. Take the cluster out of maintenance-mode
-# 4. Remove all the resources that won't be managed by pacemaker in newton. The
-#    outcome will be
-#    that they are stopped and removed from pacemakers control
-# 5. Do a resource cleanup to make sure the cluster is in a clean state
-function migrate_full_to_ng_ha {
-    if [[ -n $(pcmk_running) ]]; then
-        pcs property set maintenance-mode=true
-
-        # First we go through all the colocation constraints (except the ones
-        # we want to keep, i.e. the haproxy/ip ones) and we remove those
-        COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
-        for constraint in $COL_CONSTRAINTS; do
-            log_debug "Deleting colocation constraint $constraint from CIB"
-            pcs constraint remove "$constraint"
-        done
-
-        # Now we kill all the ordering constraints (except the haproxy/ip ones)
-        ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:"  | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
-        for constraint in $ORD_CONSTRAINTS; do
-            log_debug "Deleting ordering constraint $constraint from CIB"
-            pcs constraint remove "$constraint"
-        done
-        # At this stage all the pacemaker resources are removed from the CIB.
-        # Once we remove the maintenance-mode those systemd resources will keep
-        # on running. They shall be systemd enabled via the puppet converge
-        # step later on
-        pcs property set maintenance-mode=false
-
-        # At this stage there are no constraints whatsoever except the haproxy/ip ones
-        # which we want to keep. We now disable and then delete each resource
-        # that will move to systemd.
-        # We want the systemd resources be stopped before doing "yum update",
-        # that way "systemctl try-restart <service>" is no-op because the
-        # service was down already 
-        PCS_STATUS_OUTPUT="$(pcs status)"
-        for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
-             if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
-                 log_debug "Deleting $resource from the CIB"
-                 if ! pcs resource disable "$resource" --wait=600; then
-                     echo_error "ERROR: resource $resource failed to be disabled"
-                     exit 1
-                 fi
-                 pcs resource delete --force "$resource"
-             else
-                 log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
-             fi
-        done
-
-        # We need to do a pcs resource cleanup here + crm_resource --wait to
-        # make sure the cluster is in a clean state before we stop everything,
-        # upgrade and restart everything
-        pcs resource cleanup
-        # We are making sure here that the cluster is stable before proceeding
-        if ! timeout -k 10 600 crm_resource --wait; then
-            echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
-            exit 1
-        fi
-    fi
-}
-
-function disable_standalone_ceilometer_api {
-    if [[ -n $(is_bootstrap_node) ]]; then
-        if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
-            # Disable pacemaker resources for ceilometer-api
-            manage_pacemaker_service disable openstack-ceilometer-api
-            check_resource_pacemaker openstack-ceilometer-api stopped 600
-            pcs resource delete openstack-ceilometer-api --wait=600
-        fi
-    fi
-}
-
-
-# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
-# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
-# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
-# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
-# Note that changing an attribute like this makes the rabbitmq resource restart
-function rabbitmq_newton_ocata_upgrade {
-    if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
-        # Number of controller is obtained by counting how many hostnames we
-        # have in controller_node_names hiera key
-        nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
-        nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
-        if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
-            echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
-            exit 1
-        fi
-        pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
-    fi
-}
diff --git a/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml b/extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
deleted file mode 100644 (file)
index 45933fb..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-heat_template_version: ocata
-
-description: >
-  Software-config for performing aodh data migration
-
-parameters:
-  servers:
-    type: json
-  input_values:
-    type: json
-    description: input values for the software deployments
-resources:
-
-  AodhMysqlMigrationScriptConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: {get_file: aodh_data_migration.sh}
-
-  AodhMysqlMigrationScriptDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: [servers, Controller]}
-      config: {get_resource: AodhMysqlMigrationScriptConfig}
-      input_values: {get_param: input_values}
diff --git a/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp b/extraconfig/tasks/mitaka_to_newton_ceilometer_wsgi_upgrade.pp
deleted file mode 100644 (file)
index a8d4366..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This puppet manifest is to be used only during a Mitaka->Newton upgrade
-# It configures ceilometer to be run under httpd but it makes sure to not
-# restart any services. This snippet needs to be called before init as a
-# pre upgrade migration.
-
-Service <|
-  tag == 'ceilometer-service'
-|> {
-  hasrestart => true,
-  restart    => '/bin/true',
-  start      => '/bin/true',
-  stop       => '/bin/true',
-}
-
-if $::hostname == downcase(hiera('bootstrap_nodeid')) {
-  $pacemaker_master = true
-  $sync_db = true
-} else {
-  $pacemaker_master = false
-  $sync_db = false
-}
-
-include ::tripleo::packages
-
-
-if str2bool(hiera('mongodb::server::ipv6', false)) {
-  $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[')
-  $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
-} else {
-  $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017')
-}
-$mongodb_replset = hiera('mongodb::server::replset')
-$mongo_node_string = join($mongo_node_ips_with_port, ',')
-$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-
-$rabbit_hosts = hiera('rabbitmq_node_ips', undef)
-$rabbit_port  = hiera('ceilometer::rabbit_port', 5672)
-$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}")
-
-class { '::ceilometer' :
-  rabbit_hosts => $rabbit_endpoints,
-}
-
-class {'::ceilometer::db':
-  database_connection => $database_connection,
-}
-
-if $sync_db  {
-  include ::ceilometer::db::sync
-}
-
-include ::ceilometer::config
-
-class { '::ceilometer::api':
-  enabled           => true,
-  service_name      => 'httpd',
-  keystone_password => hiera('ceilometer::keystone::auth::password'),
-  identity_uri      => hiera('ceilometer::keystone::authtoken::auth_url'),
-  auth_uri          => hiera('ceilometer::keystone::authtoken::auth_uri'),
-  keystone_tenant   => hiera('ceilometer::keystone::authtoken::project_name'),
-}
-
-class { '::apache' :
-  service_enable  => false,
-  service_manage  => true,
-  service_restart => '/bin/true',
-  purge_configs   => false,
-  purge_vhost_dir => false,
-}
-
-# To ensure existing ports are not overridden
-class { '::aodh::wsgi::apache':
-  servername => $::hostname,
-  ssl        => false,
-}
-class { '::gnocchi::wsgi::apache':
-  servername => $::hostname,
-  ssl        => false,
-}
-
-class { '::keystone::wsgi::apache':
-  servername => $::hostname,
-  ssl        => false,
-}
-class { '::ceilometer::wsgi::apache':
-  servername => $::hostname,
-  ssl        => false,
-}
index aae4a2d..f17a073 100755 (executable)
@@ -299,9 +299,10 @@ function systemctl_swift {
 }
 
 # Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+# Update condition and add --notriggerun for +bug/1669714
 function special_case_ovs_upgrade_if_needed {
-    if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
-        echo "Manual upgrade of openvswitch - restart in postun detected"
+    if rpm -qa | grep "^openvswitch-2.5.0-14" || rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart" ; then
+        echo "Manual upgrade of openvswitch - ovs-2.5.0-14 or restart in postun detected"
         rm -rf OVS_UPGRADE
         mkdir OVS_UPGRADE && pushd OVS_UPGRADE
         echo "Attempting to downloading latest openvswitch with yumdownloader"
@@ -310,8 +311,8 @@ function special_case_ovs_upgrade_if_needed {
             if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
                 echo "Looks like newer version of $pkg is already installed, skipping"
             else
-                echo "Updating $pkg with nopostun option"
-                rpm -U --replacepkgs --nopostun $pkg
+                echo "Updating $pkg with --nopostun --notriggerun"
+                rpm -U --replacepkgs --nopostun --notriggerun $pkg
             fi
         done
         popd
@@ -321,3 +322,52 @@ function special_case_ovs_upgrade_if_needed {
 
 }
 
+# This code is meant to fix https://bugs.launchpad.net/tripleo/+bug/1686357 on
+# existing setups via a minor update workflow and be idempotent. We need to
+# run this before the yum update because we fix this up even when there are no
+# packages to update on the system (in which case the script exits).
+# This code must be called with set +eu (due to the ocf scripts being sourced)
+function fixup_wrong_ipv6_vip {
+    # This XPath query identifies of all the VIPs in pacemaker with netmask /64. Those are IPv6 only resources that have the wrong netmask
+    # This gives the address of the resource in the CIB, one address per line. For example:
+    # /cib/configuration/resources/primitive[@id='ip-2001.db8.ca2.4..10']/instance_attributes[@id='ip-2001.db8.ca2.4..10-instance_attributes']\
+    # /nvpair[@id='ip-2001.db8.ca2.4..10-instance_attributes-cidr_netmask']
+    vip_xpath_query="//resources/primitive[@type='IPaddr2']/instance_attributes/nvpair[@name='cidr_netmask' and @value='64']"
+    vip_xpath_xml_addresses=$(cibadmin --query --xpath "$vip_xpath_query" -e 2>/dev/null)
+    # The following extracts the @id value of the resource
+    vip_resources_to_fix=$(echo -e "$vip_xpath_xml_addresses" | sed -n "s/.*primitive\[@id='\([^']*\)'.*/\1/p")
+    # Runnning this in a subshell so that sourcing files cannot possibly affect the running script
+    (
+        OCF_PATH="/usr/lib/ocf/lib/heartbeat"
+        if [ -n "$vip_resources_to_fix" -a -f $OCF_PATH/ocf-shellfuncs -a -f $OCF_PATH/findif.sh ]; then
+            source $OCF_PATH/ocf-shellfuncs
+            source $OCF_PATH/findif.sh
+            for resource in $vip_resources_to_fix; do
+                echo "Updating IPv6 VIP $resource with a /128 and a correct addrlabel"
+                # The following will give us something like:
+                # <nvpair id="ip-2001.db8.ca2.4..10-instance_attributes-ip" name="ip" value="2001:db8:ca2:4::10"/>
+                ip_cib_nvpair=$(cibadmin --query --xpath "//resources/primitive[@type='IPaddr2' and @id='$resource']/instance_attributes/nvpair[@name='ip']")
+                # Let's filter out the value of the nvpair to get the ip address
+                ip_address=$(echo $ip_cib_nvpair | xmllint --xpath 'string(//nvpair/@value)' -)
+                OCF_RESKEY_cidr_netmask="64"
+                OCF_RESKEY_ip="$ip_address"
+                # Unfortunately due to https://bugzilla.redhat.com/show_bug.cgi?id=1445628
+                # we need to find out the appropiate nic given the ip address.
+                nic=$(findif $ip_address | awk '{ print $1 }')
+                ret=$?
+                if [ -z "$nic" -o $ret -ne 0 ]; then
+                    echo "NIC autodetection failed for VIP $ip_address, not updating VIPs"
+                    # Only exits the subshell
+                    exit 1
+                fi
+                ocf_run -info pcs resource update --wait "$resource" ip="$ip_address" cidr_netmask=128 nic="$nic" lvs_ipv6_addrlabel=true lvs_ipv6_addrlabel_value=99
+                ret=$?
+                if [ $ret -ne 0 ]; then
+                    echo "pcs resource update for VIP $resource failed, not updating VIPs"
+                    # Only exits the subshell
+                    exit 1
+                fi
+            done
+        fi
+    )
+}
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Post-Puppet Config for Pacemaker deployments'
 
 parameters:
@@ -10,7 +10,9 @@ parameters:
 
 resources:
 
-  ControllerPostPuppetMaintenanceModeConfig:
+{%- for role in roles -%}
+{% if "controller" in role.tags %}
+  {{role.name}}PostPuppetMaintenanceModeConfig:
     type: OS::Heat::SoftwareConfig
     properties:
       group: script
@@ -22,16 +24,19 @@ resources:
             pcs property set maintenance-mode=false
         fi
 
-  ControllerPostPuppetMaintenanceModeDeployment:
+  {{role.name}}PostPuppetMaintenanceModeDeployment:
     type: OS::Heat::SoftwareDeployments
     properties:
-      servers:  {get_param: servers}
-      config: {get_resource: ControllerPostPuppetMaintenanceModeConfig}
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}PostPuppetMaintenanceModeConfig}
       input_values: {get_param: input_values}
 
-  ControllerPostPuppetRestart:
-    type: OS::TripleO::Tasks::ControllerPostPuppetRestart
-    depends_on: ControllerPostPuppetMaintenanceModeDeployment
+  {{role.name}}PostPuppetRestart:
+    type: OS::TripleO::Tasks::{{role.name}}PostPuppetRestart
+    depends_on: {{role.name}}PostPuppetMaintenanceModeDeployment
     properties:
-      servers:  {get_param: servers}
+      servers: {get_param: [servers, {{role.name}}]}
       input_values: {get_param: input_values}
+{%- endif -%}
+{% endfor %}
+
index 475a668..203ca1f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Post-Puppet restart config for Pacemaker deployments'
 
 parameters:
@@ -23,6 +23,6 @@ resources:
   ControllerPostPuppetRestartDeployment:
     type: OS::Heat::SoftwareDeployments
     properties:
-      servers:  {get_param: servers}
+      servers: {get_param: servers}
       config: {get_resource: ControllerPostPuppetRestartConfig}
       input_values: {get_param: input_values}
index aa7514f..98b37be 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Pre-Puppet Config for Pacemaker deployments'
 
 parameters:
@@ -20,6 +20,6 @@ resources:
   ControllerPrePuppetMaintenanceModeDeployment:
     type: OS::Heat::SoftwareDeployments
     properties:
-      servers:  {get_param: servers}
+      servers: {get_param: servers}
       config: {get_resource: ControllerPrePuppetMaintenanceModeConfig}
       input_values: {get_param: input_values}
index b7771e3..e3f6c49 100755 (executable)
@@ -10,7 +10,10 @@ function run_puppet {
     export FACTER_deploy_config_name="${role}Deployment_Step${step}"
     if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
         set +e
-        puppet apply --detailed-exitcodes "${manifest}"
+        puppet apply --detailed-exitcodes \
+               --modulepath \
+               /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+               "${manifest}"
         rc=$?
         echo "puppet apply exited with exit code $rc"
     else
diff --git a/extraconfig/tasks/ssh/host_public_key.yaml b/extraconfig/tasks/ssh/host_public_key.yaml
new file mode 100644 (file)
index 0000000..e4ba0cc
--- /dev/null
@@ -0,0 +1,42 @@
+heat_template_version: pike
+
+description: >
+  This is a template which will fetch the ssh host public key.
+
+parameters:
+  server:
+    description: ID of the node to apply this config to
+    type: string
+
+resources:
+  SshHostPubKeyConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      outputs:
+      - name: rsa
+      - name: ecdsa
+      - name: ed25519
+      config: |
+        #!/bin/sh -x
+        test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa
+        test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa
+        test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519
+
+  SshHostPubKeyDeployment:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      config: {get_resource: SshHostPubKeyConfig}
+      server: {get_param: server}
+
+
+outputs:
+  ecdsa:
+    description: Host ssh public key (ecdsa)
+    value:  {get_attr: [SshHostPubKeyDeployment, ecdsa]}
+  rsa:
+    description: Host ssh public key (rsa)
+    value:  {get_attr: [SshHostPubKeyDeployment, rsa]}
+  ed25519:
+    description: Host ssh public key (ed25519)
+    value:  {get_attr: [SshHostPubKeyDeployment, ed25519]}
diff --git a/extraconfig/tasks/ssh/known_hosts_config.yaml b/extraconfig/tasks/ssh/known_hosts_config.yaml
new file mode 100644 (file)
index 0000000..50bde65
--- /dev/null
@@ -0,0 +1,36 @@
+heat_template_version: pike
+description: 'SSH Known Hosts Config'
+
+parameters:
+  known_hosts:
+    type: string
+
+resources:
+
+  SSHKnownHostsConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      inputs:
+        - name: known_hosts
+          default: {get_param: known_hosts}
+      config: |
+        #!/bin/bash
+        set -eux
+        set -o pipefail
+
+        echo "Creating ssh known hosts file"
+
+        if [ ! -z "${known_hosts}" ]; then
+          echo "${known_hosts}"
+          echo -ne "${known_hosts}" > /etc/ssh/ssh_known_hosts
+          chmod 0644 /etc/ssh/ssh_known_hosts
+        else
+          rm -f /etc/ssh/ssh_known_hosts
+          echo "No ssh known hosts"
+        fi
+
+outputs:
+  OS::stack_id:
+    description: The SSHKnownHostsConfig resource.
+    value: {get_resource: SSHKnownHostsConfig}
\ No newline at end of file
diff --git a/extraconfig/tasks/swift-ring-deploy.yaml b/extraconfig/tasks/swift-ring-deploy.yaml
deleted file mode 100644 (file)
index d17f78a..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-heat_template_version: ocata
-
-parameters:
-  servers:
-    type: json
-  SwiftRingGetTempurl:
-    default: ''
-    description: A temporary Swift URL to download rings from.
-    type: string
-
-resources:
-  SwiftRingDeployConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      inputs:
-        - name: swift_ring_get_tempurl
-      config: |
-        #!/bin/sh
-        pushd /
-        curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true
-        popd
-
-  SwiftRingDeploy:
-    type: OS::Heat::SoftwareDeployments
-    properties:
-      name: SwiftRingDeploy
-      config: {get_resource: SwiftRingDeployConfig}
-      servers:  {get_param: servers}
-      input_values:
-        swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
diff --git a/extraconfig/tasks/swift-ring-update.yaml b/extraconfig/tasks/swift-ring-update.yaml
deleted file mode 100644 (file)
index 440c688..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-heat_template_version: ocata
-
-parameters:
-  servers:
-    type: json
-  SwiftRingPutTempurl:
-    default: ''
-    description: A temporary Swift URL to upload rings to.
-    type: string
-
-resources:
-  SwiftRingUpdateConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      inputs:
-        - name: swift_ring_put_tempurl
-      config: |
-        #!/bin/sh
-        TMP_DATA=$(mktemp -d)
-        function cleanup {
-          rm -Rf "$TMP_DATA"
-        }
-        trap cleanup EXIT
-        # sanity check in case rings are not consistent within cluster
-        swift-recon --md5 | grep -q "doesn't match" && exit 1
-        pushd ${TMP_DATA}
-        tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/*
-        resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz`
-        popd
-        if [ "$resp" != "201" ]; then
-            exit 1
-        fi
-
-  SwiftRingUpdate:
-    type: OS::Heat::SoftwareDeployments
-    properties:
-      name: SwiftRingUpdate
-      config: {get_resource: SwiftRingUpdateConfig}
-      servers: {get_param: servers}
-      input_values:
-        swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
index c256541..1114897 100644 (file)
@@ -28,37 +28,44 @@ SCRIPT_NAME=$(basename $0)
 $(declare -f log_debug)
 $(declare -f manage_systemd_service)
 $(declare -f systemctl_swift)
+$(declare -f special_case_ovs_upgrade_if_needed)
 
 # pin nova messaging +-1 for the nova-compute service
 if [[ -n \$NOVA_COMPUTE ]]; then
     crudini  --set /etc/nova/nova.conf upgrade_levels compute auto
 fi
 
-$(declare -f special_case_ovs_upgrade_if_needed)
 special_case_ovs_upgrade_if_needed
 
-yum -y install python-zaqarclient  # needed for os-collect-config
 if [[ -n \$SWIFT_STORAGE ]]; then
     systemctl_swift stop
 fi
+
 yum -y update
+
 if [[ -n \$SWIFT_STORAGE ]]; then
     systemctl_swift start
 fi
 # Due to bug#1640177 we need to restart compute agent
 if [[ -n \$NOVA_COMPUTE ]]; then
-    echo "Restarting openstack ceilometer agent compute"
+    log_debug "Restarting openstack ceilometer agent compute"
     systemctl restart openstack-ceilometer-compute
+    yum install -y openstack-nova-migration
 fi
 
 # Apply puppet manifest to converge just right after the ${ROLE} upgrade
 $(declare -f run_puppet)
 for step in 1 2 3 4 5 6; do
+    log_debug "Running puppet step \$step for ${ROLE}"
     if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then
-         echo "Puppet failure at step \${step}"
+         log_debug "Puppet failure at step \${step}"
          exit 1
     fi
+    log_debug "Completed puppet step \$step"
 done
+
+log_debug "TripleO upgrade run completed."
+
 ENDOFCAT
 
 # ensure the permissions are OK
index 6bf415b..cb9cc5b 100755 (executable)
@@ -38,39 +38,48 @@ if [[ -a "$timestamp_file" ]]; then
 fi
 touch "$timestamp_file"
 
-command_arguments=${command_arguments:-}
-
-list_updates=$(yum list updates)
-
-if [[ "$list_updates" == "" ]]; then
-    echo "No packages require updating"
-    exit 0
-fi
-
 pacemaker_status=""
-if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then
+# We include word boundaries in order to not match pacemaker_remote
+if hiera -c /etc/puppet/hiera.yaml service_names | grep -q '\bpacemaker\b'; then
     pacemaker_status=$(systemctl is-active pacemaker)
 fi
 
-# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
-# and https://bugs.launchpad.net/tripleo/+bug/1634851
+# (NB: when backporting this s/pacemaker_short_bootstrap_node_name/bootstrap_nodeid)
+# This runs before the yum_update so we are guaranteed to run it even in the absence
+# of packages to update (the check for -z "$update_identifier" guarantees that this
+# is run only on overcloud stack update -i)
 if [[ "$pacemaker_status" == "active" && \
-      "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]] ; then
-    if pcs resource show rabbitmq | grep -E "start.*timeout=100"; then
-        pcs resource update rabbitmq op start timeout=200s
-    fi
-    if pcs resource show rabbitmq | grep -E "stop.*timeout=90"; then
-        pcs resource update rabbitmq op stop timeout=200s
-    fi
-    if pcs resource show redis | grep -E "start.*timeout=120"; then
-        pcs resource update redis op start timeout=200s
-    fi
-    if pcs resource show redis | grep -E "stop.*timeout=120"; then
-        pcs resource update redis op stop timeout=200s
+        "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \
+    # OCF scripts don't cope with -eu
+    echo "Verifying if we need to fix up any IPv6 VIPs"
+    set +eu
+    fixup_wrong_ipv6_vip
+    ret=$?
+    set -eu
+    if [ $ret -ne 0 ]; then
+        echo "Fixing up IPv6 VIPs failed. Stopping here. (See https://bugs.launchpad.net/tripleo/+bug/1686357 for more info)"
+        exit 1
     fi
 fi
 
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+command_arguments=${command_arguments:-}
+
+# yum check-update exits 100 if updates are available
+set +e
+check_update=$(yum check-update 2>&1)
+check_update_exit=$?
+set -e
+
+if [[ "$check_update_exit" == "1" ]]; then
+    echo "Failed to check for package updates"
+    echo "$check_update"
+    exit 1
+elif [[ "$check_update_exit" != "100" ]]; then
+    echo "No packages require updating"
+    exit 0
+fi
+
+# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
 special_case_ovs_upgrade_if_needed
 
 if [[ "$pacemaker_status" == "active" ]] ; then
@@ -100,17 +109,6 @@ return_code=$?
 echo "$result"
 echo "yum return code: $return_code"
 
-# Writes any changes caused by alterations to os-net-config and bounces the
-# interfaces *before* restarting the cluster.
-os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
-RETVAL=$?
-if [[ $RETVAL == 2 ]]; then
-    echo "os-net-config: interface configuration files updated successfully"
-elif [[ $RETVAL != 0 ]]; then
-    echo "ERROR: os-net-config configuration failed"
-    exit $RETVAL
-fi
-
 if [[ "$pacemaker_status" == "active" ]] ; then
     echo "Starting cluster node"
     pcs cluster start
@@ -127,15 +125,19 @@ if [[ "$pacemaker_status" == "active" ]] ; then
         fi
     done
 
-    tstart=$(date +%s)
-    while ! clustercheck; do
-        sleep 5
-        tnow=$(date +%s)
-        if (( tnow-tstart > galera_sync_timeout )) ; then
-            echo "ERROR galera sync timed out"
-            exit 1
-        fi
-    done
+    RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? )
+
+    if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then
+        tstart=$(date +%s)
+        while ! clustercheck; do
+            sleep 5
+            tnow=$(date +%s)
+            if (( tnow-tstart > galera_sync_timeout )) ; then
+                echo "ERROR galera sync timed out"
+                exit 1
+            fi
+        done
+    fi
 
     echo "Waiting for pacemaker cluster to settle"
     if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
@@ -146,6 +148,7 @@ if [[ "$pacemaker_status" == "active" ]] ; then
     pcs status
 fi
 
-echo "Finished yum_update.sh on server $deploy_server_id at `date`"
+
+echo "Finished yum_update.sh on server $deploy_server_id at `date` with return code: $return_code"
 
 exit $return_code
index 8cff838..9daa835 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Software-config for performing package updates using yum
index 9400c1d..2ede5be 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'No-op yum update task'
 
 resources:
index e936e60..65e93fe 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 parameters:
   ContrailRepo:
index f82bc19..8bd1c9d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Configure os-net-config mappings for specific nodes
index bc379f4..0951b84 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   This is a default no-op template which provides empty user-data
index d412b93..43e966d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   This is first boot configuration for development purposes. It allows
index a352093..2f03c83 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 # NOTE: You don't need to pass the parameter explicitly from the
 # parent template, it can be specified via the parameter_defaults
index ed8302d..5223f93 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 parameters:
   # Can be overridden via parameter_defaults in the environment
index 63dd5a9..d32f223 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Uses cloud-init to enable root logins and set the root password.
index 5a21171..f0d3c6a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'All Hosts Config'
 
 parameters:
@@ -31,7 +31,7 @@ outputs:
       The content that should be appended to your /etc/hosts if you want to get
       hostname-based access to the deployed nodes (useful for testing without
       setting up a DNS).
-    value: {get_attr: [hostsConfigImpl, config, hosts]}
+    value: {get_param: hosts}
   OS::stack_id:
     description: The ID of the hostsConfigImpl resource.
     value: {get_resource: hostsConfigImpl}
index 3ae09c9..f92f9a1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge.
 parameters:
index 10d5388..29646ab 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config for a simple bridge.
 parameters:
index 0466481..6c44e60 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config for a simple bridge.
 parameters:
@@ -33,7 +33,7 @@ parameters:
   ControlPlaneDefaultRoute: # Override this via parameter_defaults
     description: The default route of the control plane network.
     type: string
-    default: 192.0.2.1
+    default: 192.168.24.1
   EC2MetadataIp: # Override this via parameter_defaults
     description: The IP address of the EC2 metadata server.
     type: string
index be05cc1..57f1a19 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Software Config to no-op for os-net-config. Using this will allow you
index 12374a2..cbf282e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
 parameters:
index 50e541b..c778bd8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
 parameters:
index a52e22b..e864be0 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config for a simple bridge.
 parameters:
index 9be51c0..881fbfd 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
 parameters:
index 703fea0..97177c4 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the ceph storage role.
 parameters:
index df15cd6..5456c2c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the cinder storage role.
 parameters:
index 4677241..607d346 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
 parameters:
index f9c926d..448d4e2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
 parameters:
index ce1e865..8ac5cda 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
 parameters:
index bb4ac27..2579648 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role with IPv6
   on the External network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control
index 9151538..e4b3012 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
 parameters:
index 6d4e368..6371ceb 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the swift storage role.
 parameters:
index 6a78806..3cc4361 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure multiple interfaces for the ceph storage role.
 parameters:
index d238444..fa7d49e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure multiple interfaces for the cinder storage role.
 parameters:
index abfd323..a793912 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure multiple interfaces for the
   compute role with external bridge for DVR.
index 101a08d..5549368 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure multiple interfaces for the compute role.
 parameters:
index 4fae435..477eeaa 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure multiple interfaces for the controller role with IPv6 on the External
   network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
index ba9f8fd..59f16b9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure multiple interfaces for the controller role.
 parameters:
index 4019012..180f553 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure multiple interfaces for the swift storage role.
 parameters:
index 448df69..6685f2b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the ceph storage role.
 parameters:
index 465555d..ecc57ad 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the cinder storage role.
 parameters:
index a21bc8f..e36afd3 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the compute role.
 parameters:
index bb8bb9c..d405807 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
   IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
index a9689ce..a52a8b8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the controller role.
 parameters:
index c8e4db2..ad154fa 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the swift storage role.
 parameters:
index 0b5eb0c..790e8a7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the ceph storage role.
 parameters:
index 882d6eb..6dee3be 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the cinder storage role.
 parameters:
index 42cfd78..2201b0b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the compute role.
 parameters:
index 9e0680e..d26de32 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the controller role. No external IP is configured.
 parameters:
index 1f9a67d..8f68760 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
   IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
index 4ac1831..8530118 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the controller role.
 parameters:
index 605b8ee..b4587e0 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: >
   Software Config to drive os-net-config to configure VLANs for the swift storage role.
 parameters:
index 990cbab..42eb118 100755 (executable)
@@ -191,7 +191,7 @@ def template_endpoint_items(config):
 
 def generate_endpoint_map_template(config):
     return collections.OrderedDict([
-        ('heat_template_version', 'ocata'),
+        ('heat_template_version', 'pike'),
         ('description', 'A map of OpenStack endpoints. Since the endpoints '
          'are URLs, we need to have brackets around IPv6 IP addresses. The '
          'inputs to these parameters come from net_ip_uri_map, which will '
index 277bd67..c92ce37 100644 (file)
@@ -225,7 +225,6 @@ Keystone:
         net_param: KeystonePublicApi
         uri_suffixes:
             '': /v2.0
-            EC2: /v2.0/ec2tokens
             V3: /v3
         names:
             EC2: KeystoneEC2
index fecac0a..a3dbe18 100644 (file)
@@ -2,7 +2,7 @@
 ### This file is automatically generated from endpoint_data.yaml
 ### by the script build_endpoint_map.py
 
-heat_template_version: ocata
+heat_template_version: pike
 description: A map of OpenStack endpoints. Since the endpoints are URLs,
   we need to have brackets around IPv6 IP addresses. The inputs to these
   parameters come from net_ip_uri_map, which will include these brackets
@@ -6012,88 +6012,6 @@ outputs:
                         template: NETWORK_uri
             - ':'
             - get_param: [EndpointMap, KeystoneAdmin, port]
-      KeystoneEC2:
-        host:
-          str_replace:
-            template:
-              get_param: [EndpointMap, KeystoneInternal, host]
-            params:
-              CLOUDNAME:
-                get_param:
-                - CloudEndpoints
-                - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
-              IP_ADDRESS:
-                get_param:
-                - NetIpMap
-                - str_replace:
-                    params:
-                      NETWORK:
-                        get_param: [ServiceNetMap, KeystonePublicApiNetwork]
-                    template: NETWORK_uri
-        host_nobrackets:
-          str_replace:
-            template:
-              get_param: [EndpointMap, KeystoneInternal, host]
-            params:
-              CLOUDNAME:
-                get_param:
-                - CloudEndpoints
-                - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
-              IP_ADDRESS:
-                get_param:
-                - NetIpMap
-                - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
-        port:
-          get_param: [EndpointMap, KeystoneInternal, port]
-        protocol:
-          get_param: [EndpointMap, KeystoneInternal, protocol]
-        uri:
-          list_join:
-          - ''
-          - - get_param: [EndpointMap, KeystoneInternal, protocol]
-            - ://
-            - str_replace:
-                template:
-                  get_param: [EndpointMap, KeystoneInternal, host]
-                params:
-                  CLOUDNAME:
-                    get_param:
-                    - CloudEndpoints
-                    - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
-                  IP_ADDRESS:
-                    get_param:
-                    - NetIpMap
-                    - str_replace:
-                        params:
-                          NETWORK:
-                            get_param: [ServiceNetMap, KeystonePublicApiNetwork]
-                        template: NETWORK_uri
-            - ':'
-            - get_param: [EndpointMap, KeystoneInternal, port]
-            - /v2.0/ec2tokens
-        uri_no_suffix:
-          list_join:
-          - ''
-          - - get_param: [EndpointMap, KeystoneInternal, protocol]
-            - ://
-            - str_replace:
-                template:
-                  get_param: [EndpointMap, KeystoneInternal, host]
-                params:
-                  CLOUDNAME:
-                    get_param:
-                    - CloudEndpoints
-                    - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
-                  IP_ADDRESS:
-                    get_param:
-                    - NetIpMap
-                    - str_replace:
-                        params:
-                          NETWORK:
-                            get_param: [ServiceNetMap, KeystonePublicApiNetwork]
-                        template: NETWORK_uri
-            - ':'
-            - get_param: [EndpointMap, KeystoneInternal, port]
       KeystoneInternal:
         host:
           str_replace:
index 21260d3..277c761 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
index 51000bb..e577c1c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
index 793535c..563e6d4 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Internal API network. Used for most APIs, Database, RPC.
index 5395065..05a740b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Internal API network. Used for most APIs, Database, RPC.
index 77fcd4e..41ede5c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Management network. System administration, SSH, DNS, NTP, etc. This network
index e1391ad..a44d34d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Management network. System administration, SSH, DNS, NTP, etc. This network
diff --git a/network/networks.j2.yaml b/network/networks.j2.yaml
new file mode 100644 (file)
index 0000000..f19e2c0
--- /dev/null
@@ -0,0 +1,17 @@
+heat_template_version: pike
+
+description: Create networks to split out Overcloud traffic
+
+resources:
+
+  {%- for network in networks %}
+    {%- if network.name != 'InternalApi' %}
+  {{network.name}}Network:
+    {%- else  %}
+  InternalNetwork:
+    {%- endif %}
+    type: OS::TripleO::Network::{{network.name}}
+  {%- endfor %}
+
+  NetworkExtraConfig:
+    type: OS::TripleO::Network::ExtraConfig
diff --git a/network/networks.yaml b/network/networks.yaml
deleted file mode 100644 (file)
index 26033ee..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-heat_template_version: ocata
-
-description: Create networks to split out Overcloud traffic
-
-resources:
-
-  ExternalNetwork:
-    type: OS::TripleO::Network::External
-
-  InternalNetwork:
-    type: OS::TripleO::Network::InternalApi
-
-  StorageMgmtNetwork:
-    type: OS::TripleO::Network::StorageMgmt
-
-  StorageNetwork:
-    type: OS::TripleO::Network::Storage
-
-  TenantNetwork:
-    type: OS::TripleO::Network::Tenant
-
-  ManagementNetwork:
-    type: OS::TripleO::Network::Management
-
-  NetworkExtraConfig:
-    type: OS::TripleO::Network::ExtraConfig
index 0f21e3e..386520c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port for a VIP on the undercloud ctlplane network.
index c33643e..a02cc28 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the external network. The IP address will be chosen
index 893b26d..d2610c6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs
index c67789a..e5fe8d7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs. This version is for IPv6
index 905974f..12d61cc 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the external network. The IP address will be chosen
index 69a887e..64fdce6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a service mapped list of IPs
index c9673dd..2735a69 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a service mapped list of IPv6 IPs
index 1f96e3f..f258080 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the internal_api network.
index 3f16f30..cb87fd5 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs
index b36ef23..12a0731 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs. This version is for IPv6
index e236156..46e6e18 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the internal_api network.
index b626bc2..dd62033 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the management network. The IP address will be chosen
index 05fedb9..188be68 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs
index 64758bf..b5d4425 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs. This version is for IPv6
index 9e6a35b..977502a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the management network. The IP address will be chosen
index 83d875e..c3734af 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 parameters:
   ControlPlaneIpList:
index c974d72..75818bf 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 parameters:
   ControlPlaneIp:
index 58f96e6..018bf2b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 parameters:
   # Set these via parameter defaults to configure external VIPs
index 12db8d2..aa40cf1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 parameters:
   # Set these via parameter defaults to configure external VIPs
index e2004cb..8030bfc 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns the control plane port (provisioning network) as the ip_address.
index 8040041..5c1aba1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the storage network.
index dfab49a..ca5993f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs
index a6cde5f..ec7cd2f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs. This version is for IPv6
index b96fbd0..94b058a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the storage_mgmt API network.
index 6ec3dba..63b2e15 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs
index 2f3ea19..6d0b879 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs This version is for IPv6
index 01e4c31..3d70c69 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the storage_mgmt API network.
index 1dd7619..6137d24 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the storage network.
index f6929b8..a56b0f4 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the tenant network.
index c72b227..03ff6d1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs
index bc056fa..d45faf0 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Returns an IP from a network mapped list of IPs
index 8410182..d23e91f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port on the tenant network.
index d996d03..70b4482 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port for a VIP on the isolated network NetworkName.
index 7a45756..09f646a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Creates a port for a VIP on the isolated network NetworkName.
index a1042eb..c14d52f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Mapping of service_name_network -> network name
@@ -54,6 +54,7 @@ parameters:
       HeatApiCfnNetwork: internal_api
       HeatApiCloudwatchNetwork: internal_api
       NovaApiNetwork: internal_api
+      NovaColdMigrationNetwork: ctlplane
       NovaPlacementNetwork: internal_api
       NovaMetadataNetwork: internal_api
       NovaVncProxyNetwork: internal_api
@@ -67,6 +68,7 @@ parameters:
       HorizonNetwork: internal_api
       MemcachedNetwork: internal_api
       RabbitmqNetwork: internal_api
+      QdrNetwork: internal_api
       RedisNetwork: internal_api
       MysqlNetwork: internal_api
       CephClusterNetwork: storage_mgmt
index 0a704ea..0fb9cc0 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Storage network.
index c711716..9869f0d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Storage management network. Storage replication, etc.
index 2b06519..d6b1652 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Storage management network. Storage replication, etc.
index 777e616..0ec34ad 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Storage network.
index 33055fe..4881308 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Tenant network.
index 0bf5d2f..bbc2b6b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Tenant IPv6 network.
diff --git a/network_data.yaml b/network_data.yaml
new file mode 100644 (file)
index 0000000..6d62605
--- /dev/null
@@ -0,0 +1,30 @@
+# List of networks, used for j2 templating of enabled networks
+#
+# Supported values:
+#
+# name: Name of the network (mandatory)
+# name_lower: lowercase version of name used for filenames
+#             (optional, defaults to name.lower())
+# vlan: vlan for the network (optional)
+# gateway: gateway for the network (optional)
+# enabled: Is the network enabled (optional, defaults to true)
+# vip: Enable creation of a virtual IP on this network
+# [TODO] (dsneddon@redhat.com) - Enable dynamic creation of VIP ports, to support
+# VIPs on non-default networks. See https://bugs.launchpad.net/tripleo/+bug/1667104
+#
+- name: External
+  vip: true
+- name: InternalApi
+  name_lower: internal_api
+  vip: true
+- name: Storage
+  vip: true
+- name: StorageMgmt
+  name_lower: storage_mgmt
+  vip: true
+- name: Tenant
+  vip: false  # Tenant network does not use VIPs
+- name: Management
+  # Management network is disabled by default
+  enabled: false
+  vip: false  # Management network does not use VIPs
index b811a5a..7b8fe23 100644 (file)
@@ -5,15 +5,14 @@ resource_registry:
   OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
   OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
   OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
+  OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
+  OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
   OS::TripleO::DefaultPasswords: default_passwords.yaml
 
   # Tasks (for internal TripleO usage)
   OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
   OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
 
-  OS::TripleO::Tasks::SwiftRingDeploy: extraconfig/tasks/swift-ring-deploy.yaml
-  OS::TripleO::Tasks::SwiftRingUpdate: extraconfig/tasks/swift-ring-update.yaml
-
 {% for role in roles %}
   OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None
   OS::TripleO::{{role.name}}PostDeploySteps: puppet/post.yaml
@@ -23,22 +22,16 @@ resource_registry:
   OS::TripleO::Tasks::{{role.name}}PostConfig: OS::Heat::None
   OS::TripleO::{{role.name}}ExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
   # Port assignments for the {{role.name}} role
+  {%- if role.name != 'ObjectStorage' %}
+    {%- for network in networks %}
+  OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: network/ports/noop.yaml
+    {%- endfor %}
+  {%- else %}
   # Note we have to special-case ObjectStorage for backwards compatibility
-  {% if role.name != 'ObjectStorage' %}
-  OS::TripleO::{{role.name}}::Ports::ExternalPort: network/ports/noop.yaml
-  OS::TripleO::{{role.name}}::Ports::InternalApiPort: network/ports/noop.yaml
-  OS::TripleO::{{role.name}}::Ports::StoragePort: network/ports/noop.yaml
-  OS::TripleO::{{role.name}}::Ports::StorageMgmtPort: network/ports/noop.yaml
-  OS::TripleO::{{role.name}}::Ports::TenantPort: network/ports/noop.yaml
-  OS::TripleO::{{role.name}}::Ports::ManagementPort: network/ports/noop.yaml
-  {% else %}
-  OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml
-  {% endif %}
+    {%- for network in networks %}
+  OS::TripleO::SwiftStorage::Ports::{{network.name}}Port: network/ports/noop.yaml
+    {%- endfor %}
+  {%- endif %}
   OS::TripleO::{{role.name}}::Net::SoftwareConfig: net-config-noop.yaml
 {% endfor %}
 
@@ -49,6 +42,9 @@ resource_registry:
   OS::TripleO::ServiceServerMetadataHook: OS::Heat::None
 
   OS::TripleO::Server: OS::Nova::Server
+{% for role in roles %}
+  OS::TripleO::{{role.name}}Server: OS::TripleO::Server
+{% endfor %}
 
   # This creates the "heat-admin" user for all OS images by default
   # To disable, replace with firstboot/userdata_default.yaml
@@ -69,11 +65,6 @@ resource_registry:
   OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
   OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
 
-{% for role in roles %}
-  OS::TripleO::Tasks::{{role.name}}PrePuppet: OS::Heat::None
-  OS::TripleO::Tasks::{{role.name}}PostPuppet: OS::Heat::None
-{% endfor %}
-
   # "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
   # phase, e.g when puppet is applied, but after the pre_deploy phase.  Useful when
   # configuration with knowledge of all nodes in the cluster is required vs single
@@ -84,12 +75,9 @@ resource_registry:
   # TripleO overcloud networks
   OS::TripleO::Network: network/networks.yaml
 
-  OS::TripleO::Network::External: OS::Heat::None
-  OS::TripleO::Network::InternalApi: OS::Heat::None
-  OS::TripleO::Network::StorageMgmt: OS::Heat::None
-  OS::TripleO::Network::Storage: OS::Heat::None
-  OS::TripleO::Network::Tenant: OS::Heat::None
-  OS::TripleO::Network::Management: OS::Heat::None
+  {%- for network in networks %}
+  OS::TripleO::Network::{{network.name}}: OS::Heat::None
+  {%- endfor %}
 
   OS::TripleO::Network::ExtraConfig: OS::Heat::None
 
@@ -98,10 +86,10 @@ resource_registry:
   OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
 
   # Port assignments for the VIPs
-  OS::TripleO::Network::Ports::ExternalVipPort: network/ports/noop.yaml
-  OS::TripleO::Network::Ports::InternalApiVipPort: network/ports/noop.yaml
-  OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml
-  OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
+  {%- for network in networks if network.vip|default(false) %}
+  OS::TripleO::Network::Ports::{{network.name}}VipPort: network/ports/noop.yaml
+  {%- endfor %}
+
   OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
   OS::TripleO::Network::Ports::ControlPlaneVipPort: OS::Neutron::Port
 
@@ -120,7 +108,6 @@ resource_registry:
   # services
   OS::TripleO::Services: puppet/services/services.yaml
   OS::TripleO::Services::Apache: puppet/services/apache.yaml
-  OS::TripleO::Services::ApacheTLS: OS::Heat::None
   OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
   OS::TripleO::Services::CephMds: OS::Heat::None
   OS::TripleO::Services::CephMon: OS::Heat::None
@@ -144,9 +131,11 @@ resource_registry:
   OS::TripleO::Services::HeatEngine: puppet/services/heat-engine.yaml
   OS::TripleO::Services::Kernel: puppet/services/kernel.yaml
   OS::TripleO::Services::MySQL: puppet/services/database/mysql.yaml
-  OS::TripleO::Services::MySQLTLS: OS::Heat::None
+  OS::TripleO::Services::NeutronBgpVpnApi: OS::Heat::None
   OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
+  OS::TripleO::Services::NeutronL2gwApi: OS::Heat::None
   OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
+  OS::TripleO::Services::NeutronL2gwAgent: OS::Heat::None
   OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
   # FIXME(shardy) the duplicate NeutronServer line can be removed when we've updated
   # the multinode job ControllerServices after this patch merges
@@ -160,6 +149,7 @@ resource_registry:
   OS::TripleO::Services::NeutronCorePluginML2OVN: puppet/services/neutron-plugin-ml2-ovn.yaml
   OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml
   OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml
+  OS::TripleO::Services::NeutronCorePluginNSX: puppet/services/neutron-plugin-nsx.yaml
   OS::TripleO::Services::OVNDBs: OS::Heat::None
 
   OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
@@ -169,6 +159,7 @@ resource_registry:
   OS::TripleO::Services::PacemakerRemote: OS::Heat::None
   OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
   OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
+  OS::TripleO::Services::Qdr: OS::Heat::None
   OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
   OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None
   OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None
@@ -176,7 +167,8 @@ resource_registry:
   OS::TripleO::Services::Memcached: puppet/services/memcached.yaml
   OS::TripleO::Services::SaharaApi: OS::Heat::None
   OS::TripleO::Services::SaharaEngine: OS::Heat::None
-  OS::TripleO::Services::Sshd: OS::Heat::None
+  OS::TripleO::Services::Securetty: OS::Heat::None
+  OS::TripleO::Services::Sshd: puppet/services/sshd.yaml
   OS::TripleO::Services::Redis: puppet/services/database/redis.yaml
   OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
   OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
@@ -190,22 +182,31 @@ resource_registry:
   OS::TripleO::Services::NovaLibvirt: puppet/services/nova-libvirt.yaml
   OS::TripleO::Services::Ntp: puppet/services/time/ntp.yaml
   OS::TripleO::Services::SwiftProxy: puppet/services/swift-proxy.yaml
+  OS::TripleO::Services::ExternalSwiftProxy: OS::Heat::None
   OS::TripleO::Services::SwiftStorage: puppet/services/swift-storage.yaml
   OS::TripleO::Services::SwiftRingBuilder: puppet/services/swift-ringbuilder.yaml
   OS::TripleO::Services::Snmp: puppet/services/snmp.yaml
   OS::TripleO::Services::Tacker: OS::Heat::None
   OS::TripleO::Services::Timezone: puppet/services/time/timezone.yaml
   OS::TripleO::Services::CeilometerApi: puppet/services/ceilometer-api.yaml
-  OS::TripleO::Services::CeilometerCollector: puppet/services/ceilometer-collector.yaml
-  OS::TripleO::Services::CeilometerExpirer: puppet/services/ceilometer-expirer.yaml
+  OS::TripleO::Services::CeilometerCollector: puppet/services/disabled/ceilometer-collector.yaml
+  OS::TripleO::Services::CeilometerExpirer: puppet/services/disabled/ceilometer-expirer.yaml
   OS::TripleO::Services::CeilometerAgentCentral: puppet/services/ceilometer-agent-central.yaml
   OS::TripleO::Services::CeilometerAgentNotification: puppet/services/ceilometer-agent-notification.yaml
   OS::TripleO::Services::ComputeCeilometerAgent: puppet/services/ceilometer-agent-compute.yaml
+  OS::TripleO::Services::CeilometerAgentIpmi: puppet/services/ceilometer-agent-ipmi.yaml
   OS::TripleO::Services::Horizon: puppet/services/horizon.yaml
+  # Undercloud Telemetry services
+  OS::TripleO::Services::UndercloudCeilometerAgentCentral: OS::Heat::None
+  OS::TripleO::Services::UndercloudCeilometerAgentNotification: OS::Heat::None
+
   #Gnocchi services
   OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
   OS::TripleO::Services::GnocchiMetricd: puppet/services/gnocchi-metricd.yaml
   OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml
+  OS::TripleO::Services::UndercloudGnocchiApi: OS::Heat::None
+  OS::TripleO::Services::UndercloudGnocchiMetricd: OS::Heat::None
+  OS::TripleO::Services::UndercloudGnocchiStatsd: OS::Heat::None
   # Services that are disabled by default (use relevant environment files):
   OS::TripleO::Services::FluentdClient: OS::Heat::None
   OS::TripleO::Services::Collectd: OS::Heat::None
@@ -223,7 +224,12 @@ resource_registry:
   OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml
   OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml
   OS::TripleO::Services::AodhListener: puppet/services/aodh-listener.yaml
+  OS::TripleO::Services::UndercloudAodhApi: OS::Heat::None
+  OS::TripleO::Services::UndercloudAodhEvaluator: OS::Heat::None
+  OS::TripleO::Services::UndercloudAodhNotifier: OS::Heat::None
+  OS::TripleO::Services::UndercloudAodhListener: OS::Heat::None
   OS::TripleO::Services::PankoApi: puppet/services/panko-api.yaml
+  OS::TripleO::Services::UndercloudPankoApi: OS::Heat::None
   OS::TripleO::Services::MistralEngine: OS::Heat::None
   OS::TripleO::Services::MistralApi: OS::Heat::None
   OS::TripleO::Services::MistralExecutor: OS::Heat::None
@@ -239,6 +245,10 @@ resource_registry:
   OS::TripleO::Services::Zaqar: OS::Heat::None
   OS::TripleO::Services::NeutronML2FujitsuCfab: OS::Heat::None
   OS::TripleO::Services::NeutronML2FujitsuFossw: OS::Heat::None
+  OS::TripleO::Services::CinderBackendDellPs: OS::Heat::None
+  OS::TripleO::Services::CinderBackendDellSc: OS::Heat::None
+  OS::TripleO::Services::CinderBackendNetApp: OS::Heat::None
+  OS::TripleO::Services::CinderBackendScaleIO: OS::Heat::None
   OS::TripleO::Services::CinderHPELeftHandISCSI: OS::Heat::None
   OS::TripleO::Services::Etcd: OS::Heat::None
   OS::TripleO::Services::Ec2Api: OS::Heat::None
@@ -249,6 +259,9 @@ resource_registry:
   OS::TripleO::Services::OctaviaWorker: OS::Heat::None
   OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml
   OS::TripleO::Services::Vpp: OS::Heat::None
+  OS::TripleO::Services::NeutronVppAgent: OS::Heat::None
+  OS::TripleO::Services::Docker: OS::Heat::None
+  OS::TripleO::Services::CertmongerUser: OS::Heat::None
 
 parameter_defaults:
   EnablePackageInstall: false
index e99f770..e1c70dc 100644 (file)
@@ -1,5 +1,13 @@
-{% set primary_role_name = roles[0].name -%}
-heat_template_version: ocata
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+  {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+    {%- set _ = primary_role.pop() -%}
+    {%- set _ = primary_role.append(role) -%}
+  {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
+heat_template_version: pike
 
 description: >
   Deploy an OpenStack environment, consisting of several node types (roles),
@@ -43,7 +51,9 @@ parameters:
     type: string
   ControlFixedIPs:
     default: []
-    description: Should be used for arbitrary ips.
+    description: >
+        Control the IP allocation for the ControlVirtualIP port. E.g.
+        [{'ip_address':'1.2.3.4'}]
     type: json
   InternalApiVirtualFixedIPs:
     default: []
@@ -114,6 +124,11 @@ parameters:
     description: What interface to add to the HypervisorNeutronPhysicalBridge.
     type: string
 
+  NodeCreateBatchSize:
+    default: 30
+    description: Maxiumum batch size for creating nodes
+    type: number
+
   # Jinja loop for Role in role_data.yaml
 {% for role in roles %}
   # Parameters generated for {{role.name}} Role
@@ -156,6 +171,11 @@ parameters:
     type: json
     description: Optional scheduler hints to pass to nova
     default: {}
+
+  {{role.name}}Parameters:
+    type: json
+    description: Optional Role Specific parameters to be provided to service
+    default: {}
 {% endfor %}
 
   # Identifiers to trigger tasks on nodes
@@ -249,6 +269,16 @@ resources:
       type: json
       value: {get_attr: [EndpointMap, endpoint_map]}
 
+  SshKnownHostsConfig:
+    type: OS::TripleO::Ssh::KnownHostsConfig
+    properties:
+      known_hosts:
+        list_join:
+          - ''
+          {% for role in roles %}
+          - {get_attr: [{{role.name}}, known_hosts_entry]}
+          {% endfor %}
+
   # Jinja loop for Role in roles_data.yaml
 {% for role in roles %}
   # Resources generated for {{role.name}} Role
@@ -260,6 +290,8 @@ resources:
       ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
       EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
       DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
+      RoleName: {{role.name}}
+      RoleParameters: {get_param: {{role.name}}Parameters}
 
   # Filter any null/None service_names which may be present due to mapping
   # of services to OS::Heat::None
@@ -280,6 +312,13 @@ resources:
       config: {get_attr: [hostsConfig, config_id]}
       servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
 
+  {{role.name}}SshKnownHostsDeployment:
+    type: OS::Heat::StructuredDeployments
+    properties:
+      name: {{role.name}}SshKnownHostsDeployment
+      config: {get_resource: SshKnownHostsConfig}
+      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+
   {{role.name}}AllNodesDeployment:
     type: OS::Heat::StructuredDeployments
     depends_on:
@@ -339,6 +378,9 @@ resources:
   {{role.name}}:
     type: OS::Heat::ResourceGroup
     depends_on: Networks
+    update_policy:
+      batch_create:
+        max_batch_size: {get_param: NodeCreateBatchSize}
     properties:
       count: {get_param: {{role.name}}Count}
       removal_policies: {get_param: {{role.name}}RemovalPolicies}
@@ -398,7 +440,7 @@ resources:
         -
 {% for role in roles %}
           - list_join:
-            - "\n"
+            - ""
             - {get_attr: [{{role.name}}, hosts_entry]}
 {% endfor %}
 
@@ -579,12 +621,24 @@ resources:
       PingTestIps:
         list_join:
         - ' '
-        - - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]}
-          - {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]}
-          - {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]}
-          - {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]}
-          - {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]}
-          - {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]}
+        - - yaql:
+              expression: coalesce($.data, []).first(null)
+              data: {get_attr: [{{primary_role_name}}, external_ip_address]}
+          - yaql:
+              expression: coalesce($.data, []).first(null)
+              data: {get_attr: [{{primary_role_name}}, internal_api_ip_address]}
+          - yaql:
+              expression: coalesce($.data, []).first(null)
+              data: {get_attr: [{{primary_role_name}}, storage_ip_address]}
+          - yaql:
+              expression: coalesce($.data, []).first(null)
+              data: {get_attr: [{{primary_role_name}}, storage_mgmt_ip_address]}
+          - yaql:
+              expression: coalesce($.data, []).first(null)
+              data: {get_attr: [{{primary_role_name}}, tenant_ip_address]}
+          - yaql:
+              expression: coalesce($.data, []).first(null)
+              data: {get_attr: [{{primary_role_name}}, management_ip_address]}
 
   UpdateWorkflow:
     type: OS::TripleO::Tasks::UpdateWorkflow
@@ -620,6 +674,7 @@ resources:
   AllNodesDeploySteps:
     type: OS::TripleO::PostDeploySteps
     depends_on:
+      - AllNodesExtraConfig
 {% for role in roles %}
       - {{role.name}}AllNodesDeployment
 {% endfor %}
@@ -671,4 +726,10 @@ outputs:
     value:
 {% for role in roles %}
       {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
+  RoleNetIpMap:
+    description: Mapping of each network to a list of IPs for each role
+    value:
+{% for role in roles %}
+      {{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
 {% endfor %}
index f629eff..1f9c821 100644 (file)
@@ -1,5 +1,8 @@
-version: 1.0\r
-\r
-template: overcloud.yaml\r
-environments:\r
--  path: overcloud-resource-registry-puppet.yaml\r
+version: 1.0
+
+name: overcloud
+description: >
+  Default Deployment plan
+template: overcloud.yaml
+environments:
+  - path: overcloud-resource-registry-puppet.yaml
index 7edf17a..baafe03 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'All Nodes Config for Puppet'
 
 parameters:
@@ -55,9 +55,18 @@ parameters:
   StackAction:
     type: string
     description: >
-      Heat action on performed top-level stack.
+      Heat action on performed top-level stack.  Note StackUpdateType is
+      set to UPGRADE when a major-version upgrade is in progress.
     constraints:
     - allowed_values: ['CREATE', 'UPDATE']
+  StackUpdateType:
+    type: string
+    description: >
+      Type of update, to differentiate between UPGRADE and UPDATE cases
+      when StackAction is UPDATE (both are the same stack action).
+    constraints:
+    - allowed_values: ['', 'UPGRADE']
+    default: ''
   # NOTE(jaosorior): This is being set as IPA as it's the first
   # CA we'll actually be testing out. But we can change this if
   # people request it.
@@ -170,6 +179,7 @@ resources:
                 deploy_identifier: {get_param: DeployIdentifier}
                 update_identifier: {get_param: UpdateIdentifier}
                 stack_action: {get_param: StackAction}
+                stack_update_type: {get_param: StackUpdateType}
           vip_data:
             map_merge:
               # Dynamically generate per-service VIP data based on enabled_services
index 51f9aba..3fc663f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'OpenStack cinder storage configured by Puppet'
 parameters:
   BlockStorageImage:
@@ -109,6 +109,15 @@ parameters:
     type: string
     description: Command which will be run whenever configuration data changes
     default: os-refresh-config --timeout 14400
+  ConfigCollectSplay:
+    type: number
+    default: 30
+    description: |
+      Maximum amount of time to possibly to delay configuation collection
+      polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+      the configuration collection to occur as soon as the collection process
+      starts.  This setting is used to prevent the configuration collection
+      processes from polling all at the exact same time.
   UpgradeInitCommand:
     type: string
     description: |
@@ -126,10 +135,11 @@ parameters:
 
 resources:
   BlockStorage:
-    type: OS::TripleO::Server
+    type: OS::TripleO::BlockStorageServer
     metadata:
       os-collect-config:
         command: {get_param: ConfigCommand}
+        splay: {get_param: ConfigCollectSplay}
     properties:
       image:
         {get_param: BlockStorageImage}
@@ -457,6 +467,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: BlockStorageDeployment
+    properties:
+        server: {get_resource: BlockStorage}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -504,6 +520,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [BlockStorage, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the block storage server
     value:
index d7d7f47..295e64f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'OpenStack ceph storage node configured by Puppet'
 parameters:
   OvercloudCephStorageFlavor:
@@ -115,6 +115,15 @@ parameters:
     type: string
     description: Command which will be run whenever configuration data changes
     default: os-refresh-config --timeout 14400
+  ConfigCollectSplay:
+    type: number
+    default: 30
+    description: |
+      Maximum amount of time to possibly to delay configuation collection
+      polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+      the configuration collection to occur as soon as the collection process
+      starts.  This setting is used to prevent the configuration collection
+      processes from polling all at the exact same time.
   UpgradeInitCommand:
     type: string
     description: |
@@ -132,10 +141,11 @@ parameters:
 
 resources:
   CephStorage:
-    type: OS::TripleO::Server
+    type: OS::TripleO::CephStorageServer
     metadata:
       os-collect-config:
         command: {get_param: ConfigCommand}
+        splay: {get_param: ConfigCollectSplay}
     properties:
       image: {get_param: CephStorageImage}
       image_update_policy: {get_param: ImageUpdatePolicy}
@@ -468,6 +478,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: CephStorageDeployment
+    properties:
+        server: {get_resource: CephStorage}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -515,6 +531,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [CephStorage, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the ceph storage server
     value:
index ebdd762..05318f3 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack hypervisor node configured via Puppet.
@@ -127,6 +127,15 @@ parameters:
     type: string
     description: Command which will be run whenever configuration data changes
     default: os-refresh-config --timeout 14400
+  ConfigCollectSplay:
+    type: number
+    default: 30
+    description: |
+      Maximum amount of time to possibly to delay configuation collection
+      polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+      the configuration collection to occur as soon as the collection process
+      starts.  This setting is used to prevent the configuration collection
+      processes from polling all at the exact same time.
   UpgradeInitCommand:
     type: string
     description: |
@@ -145,10 +154,11 @@ parameters:
 resources:
 
   NovaCompute:
-    type: OS::TripleO::Server
+    type: OS::TripleO::ComputeServer
     metadata:
       os-collect-config:
         command: {get_param: ConfigCommand}
+        splay: {get_param: ConfigCollectSplay}
     properties:
       image: {get_param: NovaImage}
       image_update_policy:
@@ -492,6 +502,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: NovaComputeDeployment
+    properties:
+        server: {get_resource: NovaCompute}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -559,7 +575,38 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [NovaCompute, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the Nova compute server
     value:
-      {get_resource: NovaCompute}
+      {get_resource: NovaCompute}
\ No newline at end of file
index 7337d06..832656b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   A software config which runs puppet on the {{role}} role
@@ -38,7 +38,7 @@ resources:
           - ''
           - list_join:
             - ','
-            - ['file,concat,file_line', {get_param: PuppetTags}]
+            - ['file,concat,file_line,augeas', {get_param: PuppetTags}]
       outputs:
       - name: result
       inputs:
index 2f4f583..163ba57 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack controller node configured by Puppet.
@@ -141,6 +141,15 @@ parameters:
     type: string
     description: Command which will be run whenever configuration data changes
     default: os-refresh-config --timeout 14400
+  ConfigCollectSplay:
+    type: number
+    default: 30
+    description: |
+      Maximum amount of time to possibly to delay configuation collection
+      polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+      the configuration collection to occur as soon as the collection process
+      starts.  This setting is used to prevent the configuration collection
+      processes from polling all at the exact same time.
   UpgradeInitCommand:
     type: string
     description: |
@@ -165,10 +174,11 @@ parameter_groups:
 resources:
 
   Controller:
-    type: OS::TripleO::Server
+    type: OS::TripleO::ControllerServer
     metadata:
       os-collect-config:
         command: {get_param: ConfigCommand}
+        splay: {get_param: ConfigCollectSplay}
     properties:
       image: {get_param: controllerImage}
       image_update_policy: {get_param: ImageUpdatePolicy}
@@ -467,7 +477,6 @@ resources:
           - all_nodes # provided by allNodesConfig
           - vip_data # provided by allNodesConfig
           - '"%{::osfamily}"'
-          - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
           - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
           - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
           - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
@@ -532,6 +541,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: ControllerDeployment
+    properties:
+        server: {get_resource: Controller}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -599,6 +614,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [Controller, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the Nova compute server
     value:
index 5e89405..3a7a73c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Software Config to install deployment artifacts (tarball's and/or
index b6d1239..e1c464b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Configure hieradata for all MidoNet nodes
 
index b05fa63..313c126 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Configure hieradata for Network Cisco configuration
 
index 533c0ee..93408dd 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Configure hieradata for Big Switch agents on compute node
 
@@ -27,6 +27,15 @@ resources:
             mapped_data:
               neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
               neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+              # NOTE(aschultz): required for the puppet module but we don't
+              # actually want them defined on the compute nodes so we're
+              # relying on the puppet  module's handling of <SERVICE DEFAULT>
+              # to just not set these but still accept that they were defined.
+              # This will should be fixed in puppet-neutron and removed here,
+              # but for backportability, we need to define something.
+              neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
+              neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
+
 
   NeutronBigswitchDeployment:
     type: OS::Heat::StructuredDeployment
index 1d16e90..ea2fd71 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Configure hieradata for Nuage configuration on the Compute
 
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
deleted file mode 100644 (file)
index 378f7f9..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-heat_template_version: ocata
-
-description: Configure hieradata for Cinder Netapp configuration
-
-parameters:
-  server:
-    description: ID of the controller node to apply this config to
-    type: string
-
-  # Config specific parameters, to be provided via parameter_defaults
-  CinderEnableNetappBackend:
-    type: boolean
-    default: true
-  CinderNetappBackendName:
-    type: string
-    default: 'tripleo_netapp'
-  CinderNetappLogin:
-    type: string
-  CinderNetappPassword:
-    type: string
-    hidden: true
-  CinderNetappServerHostname:
-    type: string
-  CinderNetappServerPort:
-    type: string
-    default: '80'
-  CinderNetappSizeMultiplier:
-    type: string
-    default: '1.2'
-  CinderNetappStorageFamily:
-    type: string
-    default: 'ontap_cluster'
-  CinderNetappStorageProtocol:
-    type: string
-    default: 'nfs'
-  CinderNetappTransportType:
-    type: string
-    default: 'http'
-  CinderNetappVfiler:
-    type: string
-    default: ''
-  CinderNetappVolumeList:
-    type: string
-    default: ''
-  CinderNetappVserver:
-    type: string
-    default: ''
-  CinderNetappPartnerBackendName:
-    type: string
-    default: ''
-  CinderNetappNfsShares:
-    type: string
-    default: ''
-  CinderNetappNfsSharesConfig:
-    type: string
-    default: '/etc/cinder/shares.conf'
-  CinderNetappNfsMountOptions:
-    type: string
-    default: ''
-  CinderNetappCopyOffloadToolPath:
-    type: string
-    default: ''
-  CinderNetappControllerIps:
-    type: string
-    default: ''
-  CinderNetappSaPassword:
-    type: string
-    default: ''
-    hidden: true
-  CinderNetappStoragePools:
-    type: string
-    default: ''
-  CinderNetappHostType:
-    type: string
-    default: ''
-  CinderNetappWebservicePath:
-    type: string
-    default: '/devmgr/v2'
-  # DEPRECATED options for compatibility with older versions
-  CinderNetappEseriesHostType:
-    type: string
-    default: 'linux_dm_mp'
-
-parameter_groups:
-- label: deprecated
-  description: Do not use deprecated params, they will be removed.
-  parameters:
-  - CinderNetappEseriesHostType
-
-resources:
-  CinderNetappConfig:
-    type: OS::Heat::StructuredConfig
-    properties:
-      group: hiera
-      config:
-        datafiles:
-          cinder_netapp_data:
-            mapped_data:
-              tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend}
-              cinder::backend::netapp::title: {get_input: NetappBackendName}
-              cinder::backend::netapp::netapp_login: {get_input: NetappLogin}
-              cinder::backend::netapp::netapp_password: {get_input: NetappPassword}
-              cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname}
-              cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort}
-              cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier}
-              cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily}
-              cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol}
-              cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType}
-              cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler}
-              cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList}
-              cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver}
-              cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName}
-              cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares}
-              cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig}
-              cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions}
-              cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath}
-              cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps}
-              cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword}
-              cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools}
-              cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType}
-              cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath}
-
-  CinderNetappDeployment:
-    type: OS::Heat::StructuredDeployment
-    properties:
-      name: CinderNetappDeployment
-      config: {get_resource: CinderNetappConfig}
-      server: {get_param: server}
-      input_values:
-        EnableNetappBackend: {get_param: CinderEnableNetappBackend}
-        NetappBackendName: {get_param: CinderNetappBackendName}
-        NetappLogin: {get_param: CinderNetappLogin}
-        NetappPassword: {get_param: CinderNetappPassword}
-        NetappServerHostname: {get_param: CinderNetappServerHostname}
-        NetappServerPort: {get_param: CinderNetappServerPort}
-        NetappSizeMultiplier: {get_param: CinderNetappSizeMultiplier}
-        NetappStorageFamily: {get_param: CinderNetappStorageFamily}
-        NetappStorageProtocol: {get_param: CinderNetappStorageProtocol}
-        NetappTransportType: {get_param: CinderNetappTransportType}
-        NetappVfiler: {get_param: CinderNetappVfiler}
-        NetappVolumeList: {get_param: CinderNetappVolumeList}
-        NetappVserver: {get_param: CinderNetappVserver}
-        NetappPartnerBackendName: {get_param: CinderNetappPartnerBackendName}
-        NetappNfsShares: {get_param: CinderNetappNfsShares}
-        NetappNfsSharesConfig: {get_param: CinderNetappNfsSharesConfig}
-        NetappNfsMountOptions: {get_param: CinderNetappNfsMountOptions}
-        NetappCopyOffloadToolPath: {get_param: CinderNetappCopyOffloadToolPath}
-        NetappControllerIps: {get_param: CinderNetappControllerIps}
-        NetappSaPassword: {get_param: CinderNetappSaPassword}
-        NetappStoragePools: {get_param: CinderNetappStoragePools}
-        NetappHostType: {get_param: CinderNetappHostType}
-        NetappWebservicePath: {get_param: CinderNetappWebservicePath}
-
-outputs:
-  deploy_stdout:
-    description: Deployment reference, used to trigger puppet apply on changes
-    value: {get_attr: [CinderNetappDeployment, deploy_stdout]}
index d3d546d..69cd703 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Extra Pre-Deployment Config, multiple'
 parameters:
   server:
index 1456337..71a915d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Configure hieradata for Neutron Big Switch configuration
 
@@ -6,6 +6,14 @@ parameters:
   server:
     description: ID of the controller node to apply this config to
     type: string
+  NeutronBigswitchAgentEnabled:
+    description: The state of the neutron-bsn-agent service.
+    type: boolean
+    default: true
+  NeutronBigswitchLLDPEnabled:
+    description: The state of the neutron-bsn-lldp service.
+    type: boolean
+    default: false
   NeutronBigswitchRestproxyServers:
     description: 'Big Switch controllers ("IP:port,IP:port")'
     type: string
@@ -43,6 +51,8 @@ resources:
         datafiles:
           neutron_bigswitch_data:
             mapped_data:
+              neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+              neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
               neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
               neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
               neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
@@ -58,6 +68,8 @@ resources:
       config: {get_resource: NeutronBigswitchConfig}
       server: {get_param: server}
       input_values:
+        neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled}
+        neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled}
         restproxy_servers: {get_param: NeutronBigswitchRestproxyServers}
         restproxy_server_auth: {get_param: NeutronBigswitchRestproxyServerAuth }
         restproxy_auto_sync_on_failure: {get_param: NeutronBigswitchRestproxyAutoSyncOnFailure}
index bca6010..b346bbd 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Configure hieradata for Cisco N1KV configuration
 
@@ -10,7 +10,7 @@ parameters:
   # Config specific parameters, to be provided via parameter_defaults
   N1000vVSMIP:
     type: string
-    default: '192.0.2.50'
+    default: '192.168.24.50'
   N1000vVSMDomainID:
     type: number
     default: 100
@@ -62,7 +62,7 @@ parameters:
     default: '255.255.255.0'
   N1000vMgmtGatewayIP:
     type: string
-    default: '192.0.2.1'
+    default: '192.168.24.1'
   N1000vPacemakerControl:
     type: boolean
     default: true
index 5da07f8..f14e13f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Noop Extra Pre-Deployment Config'
 parameters:
   server:
index 65113f6..7fb67d8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Configure hieradata overrides for specific nodes
 
index 04b5ccf..11f2769 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   This is a template which will inject the trusted anchor.
index 7ce1506..9c6a402 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Enroll nodes to FreeIPA
 
index 2a61afc..8cba435 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   This is a template which will build the TLS Certificates necessary
index 6f2dd68..8420f99 100644 (file)
@@ -2,7 +2,7 @@
 {% set batch_upgrade_steps_max = 3 -%}
 {% set upgrade_steps_max = 6 -%}
 {% set deliver_script = {'deliver': False} -%}
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Upgrade steps for all roles'
 
 parameters:
@@ -32,21 +32,6 @@ parameters:
     type: string
     hidden: true
 
-conditions:
-  # Conditions to disable any steps where the task list is empty
-{%- for role in roles %}
-  {{role.name}}UpgradeBatchConfigEnabled:
-    not:
-      equals:
-        - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
-        - []
-  {{role.name}}UpgradeConfigEnabled:
-    not:
-      equals:
-        - {get_param: [role_data, {{role.name}}, upgrade_tasks]}
-        - []
-{%- endfor %}
-
 resources:
 
 {% for role in roles if role.disable_upgrade_deployment|default(false) %}
@@ -65,18 +50,22 @@ resources:
           - "  crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n"
           - "  crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n"
           - "  crudini --set /etc/nova/nova.conf placement project_name service\n\n"
-          - "  systemctl restart openstack-nova-compute\n\n"
-          - "fi\n\n"
+          - "  crudini --set /etc/nova/nova.conf placement os_interface internal\n\n"
           - str_replace:
               template: |
                 crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD'
-                crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME'
+                crudini --set /etc/nova/nova.conf placement os_region_name 'REGION_NAME'
                 crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL'
-                ROLE='ROLE_NAME'
               params:
                 SERVICE_PASSWORD: { get_param: NovaPassword }
                 REGION_NAME: { get_param: KeystoneRegion }
                 AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+          - "  systemctl restart openstack-nova-compute\n\n"
+          - "fi\n\n"
+          - str_replace:
+              template: |
+                ROLE='ROLE_NAME'
+              params:
                 ROLE_NAME: {{role.name}}
           - get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
           - get_file: ../extraconfig/tasks/run_puppet.sh
@@ -99,23 +88,22 @@ resources:
   {%- for role in roles %}
   {{role.name}}UpgradeBatchConfig_Step{{step}}:
     type: OS::TripleO::UpgradeConfig
-  {%- if step > 0 %}
-    condition: {{role.name}}UpgradeBatchConfigEnabled
-    {% if role.name in enabled_roles %}
+    {%- if step > 0 %}
     depends_on:
-      - {{role.name}}UpgradeBatch_Step{{step -1}}
-    {%- endif %}
-  {% else %}
+      {%- for role_inside in enabled_roles %}
+      - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+      {%- endfor %}
+    {% else %}
     {% for role in roles if role.disable_upgrade_deployment|default(false) %}
       {% if deliver_script.update({'deliver': True}) %} {% endif %}
     {% endfor %}
     {% if deliver_script.deliver %}
     depends_on:
-    {% endif %}
       {% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
       - {{dep.name}}DeliverUpgradeScriptDeployment
       {% endfor %}
-  {% endif %}
+    {% endif %}
+    {% endif %}
     properties:
       UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
       step: {{step}}
@@ -125,21 +113,28 @@ resources:
   {%- for role in enabled_roles %}
   {{role.name}}UpgradeBatch_Step{{step}}:
     type: OS::Heat::SoftwareDeploymentGroup
-    condition: {{role.name}}UpgradeBatchConfigEnabled
     {%- if step > 0 %}
     depends_on:
-      - {{role.name}}UpgradeBatch_Step{{step -1}}
+      {%- for role_inside in enabled_roles %}
+      - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+      {%- endfor %}
     {% else %}
+    {% for role in roles if role.disable_upgrade_deployment|default(false) %}
+      {% if deliver_script.update({'deliver': True}) %} {% endif %}
+    {% endfor %}
+    {% if deliver_script.deliver %}
     depends_on:
-      - {{role.name}}UpgradeBatchConfig_Step{{step}}
-    {%- endif %}
+      {% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
+      - {{dep.name}}DeliverUpgradeScriptDeployment
+      {% endfor %}
+    {% endif %}
+    {% endif %}
     update_policy:
       batch_create:
         max_batch_size: {{role.upgrade_batch_size|default(1)}}
       rolling_update:
         max_batch_size: {{role.upgrade_batch_size|default(1)}}
     properties:
-      name: {{role.name}}UpgradeBatch_Step{{step}}
       servers: {get_param: [servers, {{role.name}}]}
       config: {get_resource: {{role.name}}UpgradeBatchConfig_Step{{step}}}
       input_values:
@@ -180,17 +175,18 @@ resources:
   {%- for role in roles %}
   {{role.name}}UpgradeConfig_Step{{step}}:
     type: OS::TripleO::UpgradeConfig
-  # The UpgradeConfig resources could actually be created without
-  # serialization, but the event output is easier to follow if we
-  # do, and there should be minimal performance hit (creating the
-  # config is cheap compared to the time to apply the deployment).
-  {%- if step > 0 %}
-    condition: {{role.name}}UpgradeConfigEnabled
-    {% if role.name in enabled_roles %}
+    # The UpgradeConfig resources could actually be created without
+    # serialization, but the event output is easier to follow if we
+    # do, and there should be minimal performance hit (creating the
+    # config is cheap compared to the time to apply the deployment).
     depends_on:
-      - {{role.name}}Upgrade_Step{{step -1}}
-    {% endif %}
-  {%- endif %}
+      {%- for role_inside in enabled_roles %}
+      {%- if step > 0 %}
+      - {{role_inside.name}}Upgrade_Step{{step -1}}
+      {%- else %}
+      - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+      {%- endif %}
+      {%- endfor %}
     properties:
       UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
       step: {{step}}
@@ -200,13 +196,15 @@ resources:
   {%- for role in enabled_roles %}
   {{role.name}}Upgrade_Step{{step}}:
     type: OS::Heat::SoftwareDeploymentGroup
-    {%- if step > 0 %}
-    condition: {{role.name}}UpgradeConfigEnabled
     depends_on:
-      - {{role.name}}Upgrade_Step{{step -1}}
-    {%- endif %}
+      {%- for role_inside in enabled_roles %}
+      {%- if step > 0 %}
+      - {{role_inside.name}}Upgrade_Step{{step -1}}
+      {%- else %}
+      - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+      {%- endif %}
+      {%- endfor %}
     properties:
-      name: {{role.name}}Upgrade_Step{{step}}
       servers: {get_param: [servers, {{role.name}}]}
       config: {get_resource: {{role.name}}UpgradeConfig_Step{{step}}}
       input_values:
index 6ee06d7..7ee12b1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'OpenStack swift storage node configured by Puppet'
 parameters:
   OvercloudSwiftStorageFlavor:
@@ -109,6 +109,15 @@ parameters:
     type: string
     description: Command which will be run whenever configuration data changes
     default: os-refresh-config --timeout 14400
+  ConfigCollectSplay:
+    type: number
+    default: 30
+    description: |
+      Maximum amount of time to possibly to delay configuation collection
+      polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+      the configuration collection to occur as soon as the collection process
+      starts.  This setting is used to prevent the configuration collection
+      processes from polling all at the exact same time.
   UpgradeInitCommand:
     type: string
     description: |
@@ -127,10 +136,11 @@ parameters:
 resources:
 
   SwiftStorage:
-    type: OS::Nova::Server
+    type: OS::TripleO::ObjectStorageServer
     metadata:
       os-collect-config:
         command: {get_param: ConfigCommand}
+        splay: {get_param: ConfigCollectSplay}
     properties:
       image: {get_param: SwiftStorageImage}
       flavor: {get_param: OvercloudSwiftStorageFlavor}
@@ -455,6 +465,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: SwiftStorageHieraDeploy
+    properties:
+        server: {get_resource: SwiftStorage}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -502,6 +518,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [SwiftStorage, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the swift storage server
     value:
index b84039d..c51b6e1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Post-upgrade configuration steps via puppet for all roles
index 2120277..3a15cec 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Post-deploy configuration steps via puppet for all roles,
index 581c4f0..360c633 100644 (file)
     properties:
       StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
 
-  {{role.name}}PrePuppet:
-    type: OS::TripleO::Tasks::{{role.name}}PrePuppet
-    properties:
-      servers: {get_param: [servers, {{role.name}}]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  {% if role.name in ['Controller', 'ObjectStorage'] %}
-  {{role.name}}SwiftRingDeploy:
-    type: OS::TripleO::Tasks::SwiftRingDeploy
-    properties:
-      servers: {get_param: [servers, {{role.name}}]}
-  {% endif %}
-
   # Step through a series of configuration steps
 {% for step in range(1, 6) %}
   {{role.name}}Deployment_Step{{step}}:
@@ -65,7 +51,7 @@
       - {{dep.name}}Deployment_Step5
   {% endfor %}
     properties:
-      servers:  {get_param: servers}
+      servers: {get_param: servers}
       input_values:
         update_identifier: {get_param: DeployIdentifier}
 
     properties:
         servers: {get_param: [servers, {{role.name}}]}
 
-  {{role.name}}PostPuppet:
-    depends_on:
-      - {{role.name}}ExtraConfigPost
-    type: OS::TripleO::Tasks::{{role.name}}PostPuppet
-    properties:
-      servers: {get_param: [servers, {{role.name}}]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  {% if role.name in ['Controller', 'ObjectStorage'] %}
-  {{role.name}}SwiftRingUpdate:
-    type: OS::TripleO::Tasks::SwiftRingUpdate
-    depends_on:
-  {% for dep in roles %}
-      - {{dep.name}}Deployment_Step5
-  {% endfor %}
-    properties:
-      servers: {get_param: [servers, {{role.name}}]}
-  {% endif %}
 {% endfor %}
index 1f68f41..dbb517f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'OpenStack {{role}} node configured by Puppet'
 parameters:
   Overcloud{{role}}Flavor:
@@ -125,6 +125,15 @@ parameters:
     type: string
     description: Command which will be run whenever configuration data changes
     default: os-refresh-config --timeout 14400
+  ConfigCollectSplay:
+    type: number
+    default: 30
+    description: |
+      Maximum amount of time to possibly to delay configuation collection
+      polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+      the configuration collection to occur as soon as the collection process
+      starts.  This setting is used to prevent the configuration collection
+      processes from polling all at the exact same time.
   LoggingSources:
     type: json
     default: []
@@ -148,10 +157,11 @@ parameters:
 
 resources:
   {{role}}:
-    type: OS::TripleO::Server
+    type: OS::TripleO::{{role.name}}Server
     metadata:
       os-collect-config:
         command: {get_param: ConfigCommand}
+        splay: {get_param: ConfigCollectSplay}
     properties:
       image: {get_param: {{role}}Image}
       image_update_policy: {get_param: ImageUpdatePolicy}
@@ -483,12 +493,19 @@ resources:
     type: OS::Heat::SoftwareDeployment
     depends_on: NetworkDeployment
     properties:
+      name: UpdateDeployment
       config: {get_resource: UpdateConfig}
       server: {get_resource: {{role}}}
       input_values:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: {{role}}Deployment
+    properties:
+        server: {get_resource: {{role}}}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -536,6 +553,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [{{role}}, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for {{role}} server
     value:
index f19b6cc..7a18ef0 100644 (file)
@@ -16,6 +16,39 @@ Each service may define its own input parameters and defaults.
 Operators will use the parameter_defaults section of any Heat
 environment to set per service parameters.
 
+Apart from sevice specific inputs, there are few default parameters for all
+the services. Following are the list of default parameters:
+
+ * ServiceNetMap: Mapping of service_name -> network name. Default mappings
+   for service to network names are defined in
+   ../network/service_net_map.j2.yaml, which may be overridden via
+   ServiceNetMap values added to a user environment file via
+   parameter_defaults.
+
+ * EndpointMap: Mapping of service endpoint -> protocol. Contains a mapping of
+   endpoint data generated for all services, based on the data included in
+   ../network/endpoints/endpoint_data.yaml.
+
+ * DefaultPasswords: Mapping of service -> default password. Used to pass some
+   passwords from the parent templates, this is a legacy interface and should
+   not be used by new services.
+
+ * RoleName: Name of the role on which this service is deployed. A service can
+   be deployed in multiple roles. This is an internal parameter (should not be
+   set via environment file), which is fetched from the name attribute of the
+   roles_data.yaml template.
+
+ * RoleParameters: Parameter specific to a role on which the service is
+   applied. Using the format "<RoleName>Parameters" in the parameter_defaults
+   of user environment file, parameters can be provided for a specific role.
+   For example, in order to provide a parameter specific to "Compute" role,
+   below is the format::
+
+      parameter_defaults:
+        ComputeParameters:
+          Param1: value
+
+
 Config Settings
 ---------------
 
index d7c87b6..561b48c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Aodh API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -24,6 +32,12 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  AodhApiPolicies:
+    description: |
+      A hash of policies to configure for Aodh API.
+      e.g. { aodh-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
   AodhBase:
@@ -32,6 +46,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
   ApacheServiceBase:
     type: ./apache.yaml
@@ -39,6 +55,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
 outputs:
@@ -61,6 +79,7 @@ outputs:
             aodh::wsgi::apache::wsgi_process_display_name: 'aodh_wsgi'
             aodh::api::service_name: 'httpd'
             aodh::api::enable_proxy_headers_parsing: true
+            aodh::policy::policies: {get_param: AodhApiPolicies}
             tripleo.aodh_api.firewall_rules:
               '128 aodh-api':
                 dport:
@@ -86,6 +105,12 @@ outputs:
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: Stop aodh_api service (running under httpd)
-          tags: step1
-          service: name=httpd state=stopped
+        yaql:
+          expression: $.data.apache_upgrade + $.data.aodh_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            aodh_api_upgrade:
+              - name: Stop aodh_api service (running under httpd)
+                tags: step1
+                service: name=httpd state=stopped
index c2c2d02..331fe9a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Aodh service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -61,15 +69,15 @@ outputs:
       config_settings:
         aodh_redis_password: {get_param: RedisPassword}
         aodh::db::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://aodh:'
-              - {get_param: AodhPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/aodh'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: aodh
+            password: {get_param: AodhPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /aodh
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         aodh::debug: {get_param: Debug}
         aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         aodh::rabbit_userid: {get_param: RabbitUserName}
@@ -77,11 +85,13 @@ outputs:
         aodh::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         aodh::rabbit_port: {get_param: RabbitClientPort}
         aodh::keystone::authtoken::project_name: 'service'
+        aodh::keystone::authtoken::user_domain_name: 'Default'
+        aodh::keystone::authtoken::project_domain_name: 'Default'
         aodh::keystone::authtoken::password: {get_param: AodhPassword}
-        aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+        aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         aodh::auth::auth_password: {get_param: AodhPassword}
-        aodh::auth::auth_region: 'regionOne'
+        aodh::auth::auth_region: {get_param: KeystoneRegion}
         aodh::auth::auth_tenant_name: 'service'
       service_config_settings:
         keystone:
index b8be4a9..669c11d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Aodh Evaluator service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index f5c9330..17710ec 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Aodh Listener service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 84c50dd..2eed1b7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Aodh Notifier service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
diff --git a/puppet/services/apache-internal-tls-certmonger.yaml b/puppet/services/apache-internal-tls-certmonger.yaml
deleted file mode 100644 (file)
index 4c94f44..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-heat_template_version: ocata
-
-description: >
-  Apache service TLS configurations.
-
-parameters:
-  ServiceNetMap:
-    default: {}
-    description: Mapping of service_name -> network name. Typically set
-                 via parameter_defaults in the resource registry.  This
-                 mapping overrides those in ServiceNetMapDefaults.
-    type: json
-  # The following parameters are not needed by the template but are
-  # required to pass the pep8 tests
-  DefaultPasswords:
-    default: {}
-    type: json
-  EndpointMap:
-    default: {}
-    description: Mapping of service endpoint -> protocol. Typically set
-                 via parameter_defaults in the resource registry.
-    type: json
-
-resources:
-
-  ApacheNetworks:
-    type: OS::Heat::Value
-    properties:
-      value:
-        # NOTE(jaosorior) Get unique network names to create
-        # certificates for those. We skip the tenant network since
-        # we don't need a certificate for that, and the external
-        # network will be handled in another template.
-        yaql:
-          expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
-          data:
-            map:
-              get_param: ServiceNetMap
-
-outputs:
-  role_data:
-    description: Role data for the Apache role.
-    value:
-      service_name: apache_internal_tls_certmonger
-      config_settings:
-        generate_service_certificates: true
-        apache_certificates_specs:
-          map_merge:
-            repeat:
-              template:
-                httpd-NETWORK:
-                  service_certificate: '/etc/pki/tls/certs/httpd-NETWORK.crt'
-                  service_key: '/etc/pki/tls/private/httpd-NETWORK.key'
-                  hostname: "%{hiera('fqdn_NETWORK')}"
-                  principal: "HTTP/%{hiera('fqdn_NETWORK')}"
-              for_each:
-                NETWORK: {get_attr: [ApacheNetworks, value]}
-      metadata_settings:
-        repeat:
-          template:
-            - service: HTTP
-              network: $NETWORK
-              type: node
-          for_each:
-            $NETWORK: {get_attr: [ApacheNetworks, value]}
-      upgrade_tasks:
-        - name: Check if httpd is deployed
-          command: systemctl is-enabled httpd
-          tags: common
-          ignore_errors: True
-          register: httpd_enabled
-        - name: "PreUpgrade step0,validation: Check service httpd is running"
-          shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
-          when: httpd_enabled.rc == 0
-          tags: step0,validation
index 2d95015..23fcab9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Apache service configured with Puppet. Note this is typically included
@@ -22,6 +22,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -30,14 +38,31 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
 
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 resources:
 
-  ApacheTLS:
-    type: OS::TripleO::Services::ApacheTLS
+  ApacheNetworks:
+    type: OS::Heat::Value
     properties:
-      ServiceNetMap: {get_param: ServiceNetMap}
+      value:
+        # NOTE(jaosorior) Get unique network names to create
+        # certificates for those. We skip the tenant network since
+        # we don't need a certificate for that, and the external
+        # is for HAProxy so it isn't used for apache either.
+        yaql:
+          expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+          data:
+            map:
+              get_param: ServiceNetMap
 
 outputs:
   role_data:
@@ -46,13 +71,13 @@ outputs:
       service_name: apache
       config_settings:
         map_merge:
-          - get_attr: [ApacheTLS, role_data, config_settings]
           -
             # for the given network; replacement examples (eg. for internal_api):
             # internal_api -> IP
             # internal_api_uri -> [IP]
             # internal_api_subnet - > IP/CIDR
             apache::ip: {get_param: [ServiceNetMap, ApacheNetwork]}
+            apache::default_vhost: false
             apache::server_signature: 'Off'
             apache::server_tokens: 'Prod'
             apache_remote_proxy_ips_network:
@@ -64,8 +89,37 @@ outputs:
             apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit }
             apache::mod::remoteip::proxy_ips:
               - "%{hiera('apache_remote_proxy_ips_network')}"
+          - if:
+            - internal_tls_enabled
+            -
+              generate_service_certificates: true
+              apache::mod::ssl::ssl_ca: {get_param: InternalTLSCAFile}
+              tripleo::certmonger::apache_dirs::certificate_dir: '/etc/pki/tls/certs/httpd'
+              tripleo::certmonger::apache_dirs::key_dir: '/etc/pki/tls/private/httpd'
+              apache_certificates_specs:
+                map_merge:
+                  repeat:
+                    template:
+                      httpd-NETWORK:
+                        service_certificate: '/etc/pki/tls/certs/httpd/httpd-NETWORK.crt'
+                        service_key: '/etc/pki/tls/private/httpd/httpd-NETWORK.key'
+                        hostname: "%{hiera('fqdn_NETWORK')}"
+                        principal: "HTTP/%{hiera('fqdn_NETWORK')}"
+                    for_each:
+                      NETWORK: {get_attr: [ApacheNetworks, value]}
+            - {}
       metadata_settings:
-        get_attr: [ApacheTLS, role_data, metadata_settings]
+        if:
+          - internal_tls_enabled
+          -
+            repeat:
+              template:
+                - service: HTTP
+                  network: $NETWORK
+                  type: node
+              for_each:
+                $NETWORK: {get_attr: [ApacheNetworks, value]}
+          - null
       upgrade_tasks:
         - name: Check if httpd is deployed
           command: systemctl is-enabled httpd
@@ -76,3 +130,6 @@ outputs:
           shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
           when: httpd_enabled.rc == 0
           tags: step0,validation
+        - name: Ensure mod_ssl package is installed
+          tags: step3
+          yum: name=mod_ssl state=latest
index 8085ac8..3eff534 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   AuditD configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index cba9241..53fba63 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Barbican API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -55,6 +63,12 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  BarbicanPolicies:
+    description: |
+      A hash of policies to configure for Barbican.
+      e.g. { barbican-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
 
@@ -64,6 +78,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -74,9 +90,10 @@ outputs:
         map_merge:
           - get_attr: [ApacheServiceBase, role_data, config_settings]
           - barbican::keystone::authtoken::password: {get_param: BarbicanPassword}
-            barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+            barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             barbican::keystone::authtoken::project_name: 'service'
+            barbican::policy::policies: {get_param: BarbicanPolicies}
             barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]}
             barbican::api::db_auto_create: false
             barbican::api::enabled_certificate_plugins: ['simple_certificate']
@@ -97,15 +114,15 @@ outputs:
                 params:
                   $NETWORK: {get_param: [ServiceNetMap, BarbicanApiNetwork]}
             barbican::db::database_connection:
-              list_join:
-                - ''
-                - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                  - '://barbican:'
-                  - {get_param: BarbicanPassword}
-                  - '@'
-                  - {get_param: [EndpointMap, MysqlInternal, host]}
-                  - '/barbican'
-                  - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+              make_url:
+                scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+                username: barbican
+                password: {get_param: BarbicanPassword}
+                host: {get_param: [EndpointMap, MysqlInternal, host]}
+                path: /barbican
+                query:
+                  read_default_file: /etc/my.cnf.d/tripleo.cnf
+                  read_default_group: tripleo
             tripleo.barbican_api.firewall_rules:
               '117 barbican':
                 dport:
@@ -135,27 +152,33 @@ outputs:
           nova::compute::barbican_endpoint:
             get_param: [EndpointMap, BarbicanInternal, uri]
           nova::compute::barbican_auth_endpoint:
-            get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+            get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]
         cinder_api:
           cinder::api::keymgr_api_class: >
             castellan.key_manager.barbican_key_manager.BarbicanKeyManager
           cinder::api::keymgr_encryption_api_url:
             get_param: [EndpointMap, BarbicanInternal, uri]
           cinder::api::keymgr_encryption_auth_url:
-            get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+            get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: Check if barbican_api is deployed
-          command: systemctl is-enabled openstack-barbican-api
-          tags: common
-          ignore_errors: True
-          register: barbican_api_enabled
-        - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
-          shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
-          when: barbican_api_enabled.rc == 0
-          tags: step0,validation
-        - name: Install openstack-barbican-api package if it was disabled
-          tags: step3
-          yum: name=openstack-barbican-api state=latest
-          when: barbican_api_enabled.rc != 0
+        yaql:
+          expression: $.data.apache_upgrade + $.data.barbican_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            barbican_api_upgrade:
+              - name: Check if barbican_api is deployed
+                command: systemctl is-enabled openstack-barbican-api
+                tags: common
+                ignore_errors: True
+                register: barbican_api_enabled
+              - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
+                shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
+                when: barbican_api_enabled.rc == 0
+                tags: step0,validation
+              - name: Install openstack-barbican-api package if it was disabled
+                tags: step3
+                yum: name=openstack-barbican-api state=latest
+                when: barbican_api_enabled.rc != 0
index 735e6dd..6249c1a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HAproxy service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 8082352..2dbaf55 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ceilometer Central Agent service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -38,6 +46,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -53,6 +63,8 @@ outputs:
           - get_attr: [CeilometerServiceBase, role_data, config_settings]
           - ceilometer_redis_password: {get_param: RedisPassword}
             central_namespace: true
+      service_config_settings:
+        get_attr: [CeilometerServiceBase, role_data, service_config_settings]
       step_config: |
         include ::tripleo::profile::base::ceilometer::agent::polling
       upgrade_tasks:
index 546bcd9..c453a43 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ceilometer Compute Agent service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -35,6 +43,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -47,6 +57,8 @@ outputs:
           - get_attr: [CeilometerServiceBase, role_data, config_settings]
           - ceilometer::agent::compute::instance_discovery_method: {get_param: InstanceDiscoveryMethod}
             compute_namespace: true
+      service_config_settings:
+        get_attr: [CeilometerServiceBase, role_data, service_config_settings]
       step_config: |
         include ::tripleo::profile::base::ceilometer::agent::polling
       upgrade_tasks:
diff --git a/puppet/services/ceilometer-agent-ipmi.yaml b/puppet/services/ceilometer-agent-ipmi.yaml
new file mode 100644 (file)
index 0000000..7dd1e78
--- /dev/null
@@ -0,0 +1,87 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Ceilometer Ipmi Agent service configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  RedisPassword:
+    description: The password for the redis service account.
+    type: string
+    hidden: true
+  MonitoringSubscriptionCeilometerIpmi:
+    default: 'overcloud-ceilometer-agent-ipmi'
+    type: string
+  CeilometerAgentIpmiLoggingSource:
+    type: json
+    default:
+      tag: openstack.ceilometer.agent.ipmi
+      path: /var/log/ceilometer/ipmi.log
+
+resources:
+  CeilometerServiceBase:
+    type: ./ceilometer-base.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Ceilometer Agent Ipmi role.
+    value:
+      service_name: ceilometer_agent_ipmi
+      monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerIpmi}
+      logging_source: {get_param: CeilometerAgentIpmiLoggingSource}
+      logging_groups:
+        - ceilometer
+      config_settings:
+        map_merge:
+          - get_attr: [CeilometerServiceBase, role_data, config_settings]
+          - ceilometer_redis_password: {get_param: RedisPassword}
+            ipmi_namespace: true
+      step_config: |
+        include ::tripleo::profile::base::ceilometer::agent::polling
+      upgrade_tasks:
+        - name: Check if ceilometer-agent-ipmi is deployed
+          command: systemctl is-enabled openstack-ceilometer-ipmi
+          tags: common
+          ignore_errors: True
+          register: ceilometer_ipmi_enabled
+        - name: "PreUpgrade step0,validation: Check if openstack-ceilometer-ipmi is running"
+          shell: >
+            /usr/bin/systemctl show 'openstack-ceilometer-ipmi' --property ActiveState |
+            grep '\bactive\b'
+          when: ceilometer_ipmi_enabled.rc == 0
+          tags: step0,validation
+        - name: Stop openstack-ceilometer-ipmi service
+          tags: step1
+          when: ceilometer_ipmi_enabled.rc == 0
+          service: name=openstack-ceilometer-ipmi state=stopped
+        - name: Install openstack-ceilometer-ipmi package if it was disabled
+          tags: step3
+          yum: name=openstack-ceilometer-ipmi state=latest
+          when: ceilometer_ipmi_enabled.rc != 0
index 4ee43f4..6e89356 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ceilometer Notification Agent service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -35,6 +43,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -47,6 +57,8 @@ outputs:
         - ceilometer
       config_settings:
         get_attr: [CeilometerServiceBase, role_data, config_settings]
+      service_config_settings:
+        get_attr: [CeilometerServiceBase, role_data, service_config_settings]
       step_config: |
         include ::tripleo::profile::base::ceilometer::agent::notification
       upgrade_tasks:
index f5ee9d4..74b0c3d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ceilometer API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,12 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  CeilometerApiPolicies:
+    description: |
+      A hash of policies to configure for Ceilometer API.
+      e.g. { ceilometer-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
   CeilometerServiceBase:
@@ -37,6 +51,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
   ApacheServiceBase:
     type: ./apache.yaml
@@ -44,6 +60,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
 outputs:
@@ -78,6 +96,7 @@ outputs:
                   "%{hiera('fqdn_$NETWORK')}"
                 params:
                   $NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
+            ceilometer::policy::policies: {get_param: CeilometerApiPolicies}
             ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
             ceilometer::wsgi::apache::ssl: {get_param: EnableInternalTLS}
             ceilometer::wsgi::apache::servername:
@@ -93,6 +112,12 @@ outputs:
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: Stop ceilometer_api service (running under httpd)
-          tags: step1
-          service: name=httpd state=stopped
+        yaql:
+          expression: $.data.apache_upgrade + $.data.ceilometer_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            ceilometer_api_upgrade:
+              - name: Stop ceilometer_api service (running under httpd)
+                tags: step1
+                service: name=httpd state=stopped
index 874c689..b3e2c3a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ceilometer service configured with Puppet
@@ -13,15 +13,19 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
-  CeilometerBackend:
-    default: 'mongodb'
-    description: The ceilometer backend type.
-    type: string
   CeilometerMeteringSecret:
     description: Secret shared by the ceilometer services.
     type: string
@@ -30,25 +34,31 @@ parameters:
     description: The password for the ceilometer service account.
     type: string
     hidden: true
-  CeilometerMeterDispatcher:
-    default: ['gnocchi']
-    description: Comma-seperated list of Dispatcher to process meter data
-    type: comma_delimited_list
-    constraints:
-    - allowed_values: ['gnocchi', 'database']
-  CeilometerEventDispatcher:
-    default: ['gnocchi']
-    description: Comma-separated list of Dispatchers to process events data
-    type: comma_delimited_list
-    constraints:
-    - allowed_values: ['panko', 'gnocchi', 'database']
   CeilometerWorkers:
     default: 0
     description: Number of workers for Ceilometer service.
     type: number
+  ManageEventPipeline:
+    default: false
+    description: Whether to manage event_pipeline.yaml.
+    type: boolean
   EventPipelinePublishers:
-    default: ['notifier://?topic=alarm.all']
-    description: A list of publishers to put in event_pipeline.yaml.
+    default: ['gnocchi://']
+    description: >
+        A list of publishers to put in event_pipeline.yaml. When the
+        collector is used, override this with notifier:// publisher.
+        Set ManageEventPipeline to true for override to take effect.
+    type: comma_delimited_list
+  ManagePipeline:
+    default: false
+    description: Whether to manage pipeline.yaml.
+    type: boolean
+  PipelinePublishers:
+    default: ['gnocchi://']
+    description: >
+        A list of publishers to put in pipeline.yaml. When the
+        collector is used, override this with notifier:// publisher.
+        Set ManagePipeline to true for override to take effect.
     type: comma_delimited_list
   Debug:
     default: ''
@@ -76,6 +86,19 @@ parameters:
     default: 5672
     description: Set rabbit subscriber port, change this if using SSL
     type: number
+  CeilometerApiEndpoint:
+    default: false
+    description: Whether to create or skip API endpoint. Set this to
+        false, if you choose to disable Ceilometer API service.
+    type: boolean
+  SnmpdReadonlyUserName:
+    default: ro_snmp_user
+    description: The user name for SNMPd with readonly rights running on all Overcloud nodes
+    type: string
+  SnmpdReadonlyUserPassword:
+    description: The user password for SNMPd with readonly rights running on all Overcloud nodes
+    type: string
+    hidden: true
 
 outputs:
   role_data:
@@ -84,31 +107,23 @@ outputs:
       service_name: ceilometer_base
       config_settings:
         ceilometer::debug: {get_param: Debug}
-        ceilometer::db::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-            - - '://ceilometer:'
-              - {get_param: CeilometerPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/ceilometer'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
-        ceilometer_backend: {get_param: CeilometerBackend}
-        # we include db_sync class in puppet-tripleo
-        ceilometer::db::sync_db: false
         ceilometer::keystone::authtoken::project_name: 'service'
+        ceilometer::keystone::authtoken::user_domain_name: 'Default'
+        ceilometer::keystone::authtoken::project_domain_name: 'Default'
         ceilometer::keystone::authtoken::password: {get_param: CeilometerPassword}
-        ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+        ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
         ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
+        ceilometer::agent::notification::manage_event_pipeline: {get_param: ManageEventPipeline}
         ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
+        ceilometer::agent::notification::manage_pipeline: {get_param: ManagePipeline}
+        ceilometer::agent::notification::pipeline_publishers: {get_param: PipelinePublishers}
         ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
         ceilometer::agent::auth::auth_tenant_name: 'service'
+        ceilometer::agent::auth::auth_user_domain_name: 'Default'
+        ceilometer::agent::auth::auth_project_domain_name: 'Default'
         ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
-        ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
-        ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
         ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
         ceilometer::dispatcher::gnocchi::filter_project: 'service'
         ceilometer::dispatcher::gnocchi::archive_policy: 'low'
@@ -118,17 +133,19 @@ outputs:
         ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         ceilometer::rabbit_port: {get_param: RabbitClientPort}
         ceilometer::rabbit_heartbeat_timeout_threshold: 60
-        ceilometer::db::database_db_max_retries: -1
-        ceilometer::db::database_max_retries: -1
         ceilometer::telemetry_secret: {get_param: CeilometerMeteringSecret}
+        ceilometer::snmpd_readonly_username: {get_param: SnmpdReadonlyUserName}
+        ceilometer::snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
       service_config_settings:
         keystone:
+          ceilometer_auth_enabled: true
           ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
           ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
           ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
           ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
           ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
           ceilometer::keystone::auth::tenant: 'service'
+          ceilometer::keystone::auth::configure_endpoint: {get_param: CeilometerApiEndpoint}
         mysql:
           ceilometer::db::mysql::password: {get_param: CeilometerPassword}
           ceilometer::db::mysql::user: ceilometer
index b0ec971..3ec1842 100644 (file)
@@ -1,7 +1,8 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ceilometer Collector service configured with Puppet
+  This service is deprecated and will be removed in future releases.
 
 parameters:
   ServiceNetMap:
@@ -13,11 +14,27 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  CeilometerBackend:
+    default: 'mongodb'
+    description: The ceilometer backend type.
+    type: string
+  CeilometerPassword:
+    description: The password for the ceilometer service account.
+    type: string
+    hidden: true
   MonitoringSubscriptionCeilometerCollector:
     default: 'overcloud-ceilometer-collector'
     type: string
@@ -26,7 +43,32 @@ parameters:
     default:
       tag: openstack.ceilometer.collector
       path: /var/log/ceilometer/collector.log
-
+  CeilometerMeterDispatcher:
+    default: ['gnocchi']
+    description: Comma-seperated list of Dispatcher to process meter data
+                 Note that database option is deprecated and will not be
+                 supported in future.
+    type: comma_delimited_list
+    constraints:
+    - allowed_values: ['gnocchi', 'database']
+  CeilometerEventDispatcher:
+    default: ['panko', 'gnocchi']
+    description: Comma-separated list of Dispatchers to process events data
+                 Note that database option is deprecated and will not be
+                 supported in future.
+    type: comma_delimited_list
+    constraints:
+    - allowed_values: ['panko', 'gnocchi', 'database']
+  CeilometerEventTTL:
+    default: '86400'
+    description: Number of seconds that events are kept in the database for
+                 (<= 0 means forever)
+    type: string
+  CeilometerMeteringTTL:
+    default: '86400'
+    description: Number of seconds that samples are kept in the database for
+                 (<= 0 means forever)
+    type: string
 resources:
   CeilometerServiceBase:
     type: ./ceilometer-base.yaml
@@ -34,6 +76,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
   MongoDbBase:
     type: ./database/mongodb-base.yaml
@@ -41,6 +85,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -55,6 +101,25 @@ outputs:
         map_merge:
           - get_attr: [MongoDbBase, role_data, config_settings]
           - get_attr: [CeilometerServiceBase, role_data, config_settings]
+          - ceilometer::db::database_connection:
+              make_url:
+                scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+                username: ceilometer
+                password: {get_param: CeilometerPassword}
+                host: {get_param: [EndpointMap, MysqlInternal, host]}
+                path: /ceilometer
+                query:
+                  read_default_file: /etc/my.cnf.d/tripleo.cnf
+                  read_default_group: tripleo
+            ceilometer_backend: {get_param: CeilometerBackend}
+            ceilometer::event_time_to_live: {get_param: CeilometerEventTTL}
+            ceilometer::metering_time_to_live: {get_param: CeilometerMeteringTTL}
+            # we include db_sync class in puppet-tripleo
+            ceilometer::db::sync_db: false
+            ceilometer::db::database_db_max_retries: -1
+            ceilometer::db::database_max_retries: -1
+            ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
+            ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
       service_config_settings:
         get_attr: [CeilometerServiceBase, role_data, service_config_settings]
       step_config: |
index 714434b..775e921 100644 (file)
@@ -1,7 +1,9 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ceilometer Expirer service configured with Puppet
+  Note, This service is deprecated and will be removed in
+  future releases.
 
 parameters:
   ServiceNetMap:
@@ -13,6 +15,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +39,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 033d3f7..5f19af6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Ceph base service. Shared by all Ceph services.
@@ -55,6 +55,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index f972e21..ec34fca 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Ceph Client service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 134f47c..599532c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Ceph External service.
@@ -53,6 +53,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index b68567f..270d3a2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Ceph MDS service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -31,6 +39,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index d589ef8..c36f053 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Ceph Monitor service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -78,6 +86,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index a97fa11..24b2886 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Ceph OSD service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -47,6 +55,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 0153197..ad91b4e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Ceph RadosGW service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -43,6 +51,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -73,7 +83,7 @@ outputs:
           ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
           ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
           ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
-          ceph::rgw::keystone::auth::roles: [ 'admin', 'member', '_member_' ]
+          ceph::rgw::keystone::auth::roles: [ 'admin', 'Member', '_member_' ]
           ceph::rgw::keystone::auth::tenant: service
           ceph::rgw::keystone::auth::user: swift
           ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
diff --git a/puppet/services/certmonger-user.yaml b/puppet/services/certmonger-user.yaml
new file mode 100644 (file)
index 0000000..6ad451a
--- /dev/null
@@ -0,0 +1,36 @@
+heat_template_version: pike
+
+description: >
+  Requests certificates using certmonger through Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+
+outputs:
+  role_data:
+    description: Role data for the certmonger-user service
+    value:
+      service_name: certmonger_user
+      step_config: |
+        include ::tripleo::profile::base::certmonger_user
index c0ea7aa..036209f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Cinder API service configured with Puppet
@@ -22,6 +22,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -46,6 +54,12 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  CinderApiPolicies:
+    description: |
+      A hash of policies to configure for Cinder API.
+      e.g. { cinder-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 conditions:
   cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]}
@@ -58,6 +72,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
   CinderBase:
@@ -66,6 +82,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -80,10 +98,13 @@ outputs:
         map_merge:
           - get_attr: [CinderBase, role_data, config_settings]
           - get_attr: [ApacheServiceBase, role_data, config_settings]
-          - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+          - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             cinder::keystone::authtoken::password: {get_param: CinderPassword}
             cinder::keystone::authtoken::project_name: 'service'
+            cinder::keystone::authtoken::user_domain_name: 'Default'
+            cinder::keystone::authtoken::project_domain_name: 'Default'
+            cinder::policy::policies: {get_param: CinderApiPolicies}
             cinder::api::enable_proxy_headers_parsing: true
 
             cinder::api::nova_catalog_info: 'compute:nova:internalURL'
@@ -150,25 +171,31 @@ outputs:
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: Check if cinder_api is deployed
-          command: systemctl is-enabled openstack-cinder-api
-          tags: common
-          ignore_errors: True
-          register: cinder_api_enabled
-        - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
-          shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
-          when: cinder_api_enabled.rc == 0
-          tags: step0,validation
-        - name: check for cinder running under apache (post upgrade)
-          tags: step1
-          shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder"
-          register: cinder_apache
-          ignore_errors: true
-        - name: Stop cinder_api service (running under httpd)
-          tags: step1
-          service: name=httpd state=stopped
-          when: "cinder_apache.rc == 0"
-        - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
-          tags: step1
-          when: cinder_api_enabled.rc == 0
-          service: name=openstack-cinder-api state=stopped enabled=no
+        yaql:
+          expression: $.data.apache_upgrade + $.data.cinder_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            cinder_api_upgrade:
+              - name: Check if cinder_api is deployed
+                command: systemctl is-enabled openstack-cinder-api
+                tags: common
+                ignore_errors: True
+                register: cinder_api_enabled
+              - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
+                shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
+                when: cinder_api_enabled.rc == 0
+                tags: step0,validation
+              - name: check for cinder running under apache (post upgrade)
+                tags: step1
+                shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder"
+                register: cinder_apache
+                ignore_errors: true
+              - name: Stop cinder_api service (running under httpd)
+                tags: step1
+                service: name=httpd state=stopped
+                when: cinder_apache.rc == 0
+              - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
+                tags: step1
+                when: cinder_api_enabled.rc == 0
+                service: name=openstack-cinder-api state=stopped enabled=no
index 1f15c53..d038253 100644 (file)
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Cinder Dell EMC PS Series backend
@@ -58,6 +58,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     type: json
index 6a6196a..a6b7d5c 100644 (file)
@@ -11,7 +11,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Cinder Dell EMC Storage Center backend
@@ -58,6 +58,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     type: json
diff --git a/puppet/services/cinder-backend-netapp.yaml b/puppet/services/cinder-backend-netapp.yaml
new file mode 100644 (file)
index 0000000..bddc8e1
--- /dev/null
@@ -0,0 +1,137 @@
+heat_template_version: pike
+
+description: Openstack Cinder Netapp backend
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  CinderEnableNetappBackend:
+    type: boolean
+    default: true
+  CinderNetappBackendName:
+    type: string
+    default: 'tripleo_netapp'
+  CinderNetappLogin:
+    type: string
+  CinderNetappPassword:
+    type: string
+    hidden: true
+  CinderNetappServerHostname:
+    type: string
+  CinderNetappServerPort:
+    type: string
+    default: '80'
+  CinderNetappSizeMultiplier:
+    type: string
+    default: '1.2'
+  CinderNetappStorageFamily:
+    type: string
+    default: 'ontap_cluster'
+  CinderNetappStorageProtocol:
+    type: string
+    default: 'nfs'
+  CinderNetappTransportType:
+    type: string
+    default: 'http'
+  CinderNetappVfiler:
+    type: string
+    default: ''
+  CinderNetappVolumeList:
+    type: string
+    default: ''
+  CinderNetappVserver:
+    type: string
+    default: ''
+  CinderNetappPartnerBackendName:
+    type: string
+    default: ''
+  CinderNetappNfsShares:
+    type: string
+    default: ''
+  CinderNetappNfsSharesConfig:
+    type: string
+    default: '/etc/cinder/shares.conf'
+  CinderNetappNfsMountOptions:
+    type: string
+    default: ''
+  CinderNetappCopyOffloadToolPath:
+    type: string
+    default: ''
+  CinderNetappControllerIps:
+    type: string
+    default: ''
+  CinderNetappSaPassword:
+    type: string
+    default: ''
+    hidden: true
+  CinderNetappStoragePools:
+    type: string
+    default: ''
+  CinderNetappHostType:
+    type: string
+    default: ''
+  CinderNetappWebservicePath:
+    type: string
+    default: '/devmgr/v2'
+  # DEPRECATED options for compatibility with older versions
+  CinderNetappEseriesHostType:
+    type: string
+    default: 'linux_dm_mp'
+
+parameter_groups:
+- label: deprecated
+  description: Do not use deprecated params, they will be removed.
+  parameters:
+  - CinderNetappEseriesHostType
+
+outputs:
+  role_data:
+    description: Role data for the Cinder NetApp backend.
+    value:
+      service_name: cinder_backend_netapp
+      config_settings:
+        tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_param: CinderEnableNetappBackend}
+        cinder::backend::netapp::title: {get_param: CinderNetappBackendName}
+        cinder::backend::netapp::netapp_login: {get_param: CinderNetappLogin}
+        cinder::backend::netapp::netapp_password: {get_param: CinderNetappPassword}
+        cinder::backend::netapp::netapp_server_hostname: {get_param: CinderNetappServerHostname}
+        cinder::backend::netapp::netapp_server_port: {get_param: CinderNetappServerPort}
+        cinder::backend::netapp::netapp_size_multiplier: {get_param: CinderNetappSizeMultiplier}
+        cinder::backend::netapp::netapp_storage_family: {get_param: CinderNetappStorageFamily}
+        cinder::backend::netapp::netapp_storage_protocol: {get_param: CinderNetappStorageProtocol}
+        cinder::backend::netapp::netapp_transport_type: {get_param: CinderNetappTransportType}
+        cinder::backend::netapp::netapp_vfiler: {get_param: CinderNetappVfiler}
+        cinder::backend::netapp::netapp_volume_list: {get_param: CinderNetappVolumeList}
+        cinder::backend::netapp::netapp_vserver: {get_param: CinderNetappVserver}
+        cinder::backend::netapp::netapp_partner_backend_name: {get_param: CinderNetappPartnerBackendName}
+        cinder::backend::netapp::nfs_shares: {get_param: CinderNetappNfsShares}
+        cinder::backend::netapp::nfs_shares_config: {get_param: CinderNetappNfsSharesConfig}
+        cinder::backend::netapp::nfs_mount_options: {get_param: CinderNetappNfsMountOptions}
+        cinder::backend::netapp::netapp_copyoffload_tool_path: {get_param: CinderNetappCopyOffloadToolPath}
+        cinder::backend::netapp::netapp_controller_ips: {get_param: CinderNetappControllerIps}
+        cinder::backend::netapp::netapp_sa_password: {get_param: CinderNetappSaPassword}
+        cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
+        cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
+        cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+      step_config: |
+        include ::tripleo::profile::base::cinder::volume
diff --git a/puppet/services/cinder-backend-pure.yaml b/puppet/services/cinder-backend-pure.yaml
new file mode 100644 (file)
index 0000000..576896a
--- /dev/null
@@ -0,0 +1,76 @@
+# Copyright (c) 2017 Pure Storage Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+heat_template_version: pike
+
+description: >
+  Openstack Cinder Pure Storage FlashArray backend
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    type: json
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+  CinderEnablePureBackend:
+    type: boolean
+    default: true
+  CinderPureBackendName:
+    type: string
+    default: 'tripleo_pure'
+  CinderPureStorageProtocol:
+    type: string
+    default: 'iSCSI'
+  CinderPureSanIp:
+    type: string
+  CinderPureAPIToken:
+    type: string
+  CinderPureUseChap:
+    type: boolean
+    default: false
+  CinderPureMultipathXfer:
+    type: boolean
+    default: true
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Pure Storage FlashArray backend.
+    value:
+      service_name: cinder_backend_pure
+      config_settings:
+                tripleo::profile::base::cinder::volume::cinder_enable_pure_backend: {get_param: CinderEnablePureBackend}
+                cinder::backend::pure::volume_backend_name: {get_param: CinderPureBackendName}
+                cinder::backend::pure::pure_storage_protocol: {get_param: CinderPureStorageProtocol}
+                cinder::backend::pure::san_ip: {get_param: CinderPureSanIp}
+                cinder::backend::pure::pure_api_token: {get_input: PureAPIToken}
+                cinder::backend::pure::use_chap_auth: {get_input: PureUseChap}
+                cinder::backend::pure::use_multipath_for_image_xfer: {get_input: PureMultipathXfer}
+      step_config: |
+        include ::tripleo::profile::base::cinder::volume
index eb709cd..832cc09 100644 (file)
@@ -11,7 +11,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Cinder Dell EMC ScaleIO backend
@@ -78,6 +78,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     type: json
@@ -106,6 +114,6 @@ outputs:
         cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity}
         cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion}
         cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio}
-        cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision}
+        cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOSanThinProvision}
       step_config: |
         include ::tripleo::profile::base::cinder::volume
index 14be07a..629a0f5 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Cinder Backup service configured with Puppet
@@ -25,6 +25,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -42,6 +50,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 88e7edb..2ba5aa5 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Cinder base service. Shared by all Cinder services.
@@ -21,6 +21,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -92,15 +100,15 @@ outputs:
       service_name: cinder_base
       config_settings:
         cinder::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://cinder:'
-              - {get_param: CinderPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/cinder'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: cinder
+            password: {get_param: CinderPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /cinder
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         cinder::debug: {get_param: Debug}
         cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         cinder::rabbit_userid: {get_param: RabbitUserName}
index ca7d283..3ea0fd8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Configure Cinder HPELeftHandISCSIDriver
@@ -32,6 +32,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     type: json
index f8361f6..806f9bb 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Cinder Scheduler service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -35,6 +43,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index b52955e..fe95222 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Cinder Volume service configured with Puppet
@@ -55,6 +55,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -77,6 +85,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -94,11 +104,7 @@ outputs:
             tripleo::profile::base::cinder::volume::cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend}
             tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
             tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
-            tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers:
-              str_replace:
-                template: SERVERS
-                params:
-                  SERVERS: {get_param: CinderNfsServers}
+            tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
             tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
             tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
             tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
index 8bc9f2e..8fbcd99 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Congress service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -47,6 +55,12 @@ parameters:
     default: 5672
     description: Set rabbit subscriber port, change this if using SSL
     type: number
+  CongressPolicies:
+    description: |
+      A hash of policies to configure for Congress.
+      e.g. { congress-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 outputs:
   role_data:
@@ -56,15 +70,15 @@ outputs:
       config_settings:
         congress_password: {get_param: CongressPassword}
         congress::db::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://congress:'
-              - {get_param: CongressPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/congress'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: congress
+            password: {get_param: CongressPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /congress
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         congress::debug: {get_param: Debug}
         congress::rpc_backend: rabbit
         congress::rabbit_userid: {get_param: RabbitUserName}
@@ -73,9 +87,12 @@ outputs:
         congress::rabbit_port: {get_param: RabbitClientPort}
         congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]}
 
+        congress::keystone::authtoken::password: {get_param: CongressPassword}
         congress::keystone::authtoken::project_name: 'service'
-        congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
-        congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+        congress::keystone::authtoken::user_domain_name: 'Default'
+        congress::keystone::authtoken::project_domain_name: 'Default'
+        congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+        congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
 
         congress::db::mysql::password: {get_param: CongressPassword}
         congress::db::mysql::user: congress
@@ -84,10 +101,12 @@ outputs:
         congress::db::mysql::allowed_hosts:
           - '%'
           - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+        congress::policy::policies: {get_param: CongressPolicies}
 
       service_config_settings:
         keystone:
           congress::keystone::auth::tenant: 'service'
+          congress::keystone::auth::region: {get_param: KeystoneRegion}
           congress::keystone::auth::password: {get_param: CongressPassword}
           congress::keystone::auth::public_url: {get_param: [EndpointMap, CongressPublic, uri]}
           congress::keystone::auth::internal_url: {get_param: [EndpointMap, CongressInternal, uri]}
index c27fcb7..b5fced4 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Configuration details for MongoDB service using composable roles
@@ -24,6 +24,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 63ec444..5bd621d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   MongoDb service deployment using puppet
@@ -14,11 +14,23 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  MongodbMemoryLimit:
+    default: '20G'
+    description: Limit the amount of memory mongodb uses with systemd.
+    type: string
   MongoDbLoggingSource:
     type: json
     description: Fluentd logging configuration for mongodb.
@@ -36,6 +48,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -49,6 +63,7 @@ outputs:
         map_merge:
           - get_attr: [MongoDbBase, role_data, config_settings]
           - tripleo::profile::base::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]}
+            tripleo::profile::base::database::mongodb::memory_limit: {get_param: MongodbMemoryLimit}
             mongodb::server::service_manage: True
             tripleo.mongodb.firewall_rules:
               '101 mongodb_config':
index 78456e2..19d732d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Mysql client settings
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -21,6 +29,11 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
 
 outputs:
   role_data:
@@ -30,5 +43,6 @@ outputs:
       config_settings:
         tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
         tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+        tripleo::profile::base::database::mysql::client::ssl_ca: {get_param: InternalTLSCAFile}
       step_config: |
         include ::tripleo::profile::base::database::mysql::client
diff --git a/puppet/services/database/mysql-internal-tls-certmonger.yaml b/puppet/services/database/mysql-internal-tls-certmonger.yaml
deleted file mode 100644 (file)
index 9f7eaf5..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-heat_template_version: ocata
-
-description: >
-  MySQL configurations for using TLS via certmonger.
-
-parameters:
-  ServiceNetMap:
-    default: {}
-    description: Mapping of service_name -> network name. Typically set
-                 via parameter_defaults in the resource registry.  This
-                 mapping overrides those in ServiceNetMapDefaults.
-    type: json
-  # The following parameters are not needed by the template but are
-  # required to pass the pep8 tests
-  DefaultPasswords:
-    default: {}
-    type: json
-  EndpointMap:
-    default: {}
-    description: Mapping of service endpoint -> protocol. Typically set
-                 via parameter_defaults in the resource registry.
-    type: json
-
-outputs:
-  role_data:
-    description: MySQL configurations for using TLS via certmonger.
-    value:
-      service_name: mysql_internal_tls_certmonger
-      config_settings:
-        generate_service_certificates: true
-        tripleo::profile::base::database::mysql::certificate_specs:
-          service_certificate: '/etc/pki/tls/certs/mysql.crt'
-          service_key: '/etc/pki/tls/private/mysql.key'
-          hostname:
-            str_replace:
-              template: "%{hiera('cloud_name_NETWORK')}"
-              params:
-                NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
-          principal:
-            str_replace:
-              template: "mysql/%{hiera('cloud_name_NETWORK')}"
-              params:
-                NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
-      metadata_settings:
-        - service: mysql
-          network: {get_param: [ServiceNetMap, MysqlNetwork]}
-          type: vip
index 808f135..2bde903 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   MySQL service deployment using puppet
@@ -14,6 +14,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -23,6 +31,10 @@ parameters:
     description: Configures MySQL max_connections config setting
     type: number
     default: 4096
+  MysqlIncreaseFileLimit:
+    description: Flag to increase MySQL open-files-limit to 16384
+    type: boolean
+    default: true
   MysqlRootPassword:
     type: string
     hidden: true
@@ -38,13 +50,13 @@ parameters:
     description: The password for the nova db account
     type: string
     hidden: true
+  EnableInternalTLS:
+    type: boolean
+    default: false
 
-resources:
+conditions:
 
-  MySQLTLS:
-    type: OS::TripleO::Services::MySQLTLS
-    properties:
-      ServiceNetMap: {get_param: ServiceNetMap}
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 outputs:
   role_data:
@@ -53,7 +65,6 @@ outputs:
       service_name: mysql
       config_settings:
         map_merge:
-          - get_attr: [MySQLTLS, role_data, config_settings]
           -
             # The Galera package should work in cluster and
             # non-cluster modes based on the config file.
@@ -96,10 +107,32 @@ outputs:
                   $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
             tripleo::profile::base::database::mysql::client_bind_address:
               {get_param: [ServiceNetMap, MysqlNetwork]}
+            tripleo::profile::base::database::mysql::generate_dropin_file_limit:
+              {get_param: MysqlIncreaseFileLimit}
+          - generate_service_certificates: true
+            tripleo::profile::base::database::mysql::certificate_specs:
+              service_certificate: '/etc/pki/tls/certs/mysql.crt'
+              service_key: '/etc/pki/tls/private/mysql.key'
+              hostname:
+                str_replace:
+                  template: "%{hiera('cloud_name_NETWORK')}"
+                  params:
+                    NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+              principal:
+                str_replace:
+                  template: "mysql/%{hiera('cloud_name_NETWORK')}"
+                  params:
+                    NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
       step_config: |
         include ::tripleo::profile::base::database::mysql
       metadata_settings:
-        get_attr: [MySQLTLS, role_data, metadata_settings]
+        if:
+          - internal_tls_enabled
+          -
+            - service: mysql
+              network: {get_param: [ServiceNetMap, MysqlNetwork]}
+              type: vip
+          - null
       upgrade_tasks:
         - name: Check for galera root password
           tags: step0
index 2b7dd43..89fa806 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Redis service configured with Puppet
@@ -8,6 +8,10 @@ parameters:
     description: The password for Redis
     type: string
     hidden: true
+  RedisFDLimit:
+    description: Configure Redis FD limit
+    type: string
+    default: 10240
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -17,6 +21,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -42,3 +54,5 @@ outputs:
         redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
         redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
         redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+        redis::sentinel::sentinel_bind: {get_param: [ServiceNetMap, RedisNetwork]}
+        redis::ulimit: {get_param: RedisFDLimit}
index 5ea25ca..df406a8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Redis service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -26,6 +34,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
diff --git a/puppet/services/disabled/ceilometer-collector.yaml b/puppet/services/disabled/ceilometer-collector.yaml
new file mode 100644 (file)
index 0000000..64fd476
--- /dev/null
@@ -0,0 +1,38 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Ceilometer Collector service, disabled since pike
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+
+outputs:
+  role_data:
+    description: Role data for the disabled Ceilometer Collector role.
+    value:
+      service_name: ceilometer_collector
+      upgrade_tasks:
+        - name: Stop and disable ceilometer_collector service on upgrade
+          tags: step1
+          service: name=openstack-ceilometer-collector state=stopped enabled=no
diff --git a/puppet/services/disabled/ceilometer-expirer.yaml b/puppet/services/disabled/ceilometer-expirer.yaml
new file mode 100644 (file)
index 0000000..182193e
--- /dev/null
@@ -0,0 +1,38 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Ceilometer Expirer service, disabled since pike
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+
+outputs:
+  role_data:
+    description: Role data for the disabled Ceilometer Expirer role.
+    value:
+      service_name: ceilometer_expirer
+      upgrade_tasks:
+        - name: Stop and disable ceilometer_expirer service on upgrade
+          tags: step1
+          service: name=openstack-ceilometer-expirer state=stopped enabled=no
index 7bf4a1f..b2cd03e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Glance Registry service, disabled since ocata
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
diff --git a/puppet/services/docker.yaml b/puppet/services/docker.yaml
new file mode 100644 (file)
index 0000000..2be2112
--- /dev/null
@@ -0,0 +1,51 @@
+heat_template_version: pike
+
+description: >
+  Configures docker on the host
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: tripleoupstream
+    type: string
+  DockerNamespaceIsRegistry:
+    type: boolean
+    default: false
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+outputs:
+  role_data:
+    description: Role data for the docker service
+    value:
+      service_name: docker
+      config_settings:
+        tripleo::profile::base::docker::docker_namespace: {get_param: DockerNamespace}
+        tripleo::profile::base::docker::insecure_registry: {get_param: DockerNamespaceIsRegistry}
+      step_config: |
+        include ::tripleo::profile::base::docker
+      upgrade_tasks:
+        - name: Install docker packages on upgrade if missing
+          tags: step3
+          yum: name=docker state=latest
+
index 10f6d31..aa878a9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack EC2-API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -30,6 +38,15 @@ parameters:
     type: string
     default: 'regionOne'
     description: Keystone region for endpoint
+  Ec2ApiExternalNetwork:
+    type: string
+    default: ''
+    description: Name of the external network, which is used to connect VPCs to
+                 Internet and to allocate Elastic IPs
+  NovaDefaultFloatingPool:
+    default: 'public'
+    description: Default pool for floating IP addresses
+    type: string
   MonitoringSubscriptionEc2Api:
     default: 'overcloud-ec2-api'
     type: string
@@ -42,10 +59,17 @@ parameters:
     default: 'false'
     description: Set to true to enable package installation via Puppet
     type: boolean
+  Ec2ApiPolicies:
+    description: |
+      A hash of policies to configure for EC2-API.
+      e.g. { ec2api-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 
 conditions:
   nova_workers_zero: {equals : [{get_param: Ec2ApiWorkers}, 0]}
+  external_network_unset: {equals : [{get_param: Ec2ApiExternalNetwork}, '']}
 
 outputs:
   role_data:
@@ -67,6 +91,7 @@ outputs:
           ec2api::keystone::authtoken::password: {get_param: Ec2ApiPassword}
           ec2api::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
           ec2api::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+          ec2api::policy::policies: {get_param: Ec2ApiPolicies}
           ec2api::api::enabled: true
           ec2api::package_manage: {get_param: EnablePackageInstall}
           ec2api::api::ec2api_listen:
@@ -82,21 +107,31 @@ outputs:
               params:
                 $NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]}
           ec2api::db::database_connection:
+            make_url:
+              scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+              username: ec2_api
+              password: {get_param: Ec2ApiPassword}
+              host: {get_param: [EndpointMap, MysqlInternal, host]}
+              path: /ec2_api
+              query:
+                read_default_file: /etc/my.cnf.d/tripleo.cnf
+                read_default_group: tripleo
+          ec2api::api::keystone_ec2_tokens_url:
             list_join:
               - ''
-              - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                - '://ec2_api:'
-                - {get_param: Ec2ApiPassword}
-                - '@'
-                - {get_param: [EndpointMap, MysqlInternal, host]}
-                - '/ec2_api'
-                - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+              - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+                - '/ec2tokens'
         -
           if:
           - nova_workers_zero
           - {}
           - ec2api::api::ec2api_workers: {get_param: Ec2ApiWorkers}
             ec2api::metadata::metadata_workers: {get_param: Ec2ApiWorkers}
+        -
+          if:
+          - external_network_unset
+          - ec2api::api::external_network: {get_param: NovaDefaultFloatingPool}
+          - ec2api::api::external_network: {get_param: Ec2ApiExternalNetwork}
       step_config: |
         include tripleo::profile::base::nova::ec2api
       service_config_settings:
index 7cdd845..2e87764 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Etcd service configured with Puppet
@@ -13,18 +13,33 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
   EtcdInitialClusterToken:
-    default: 'etcd-tripleo'
     description: Initial cluster token for the etcd cluster during bootstrap.
     type: string
+    hidden: true
   MonitoringSubscriptionEtcd:
     default: 'overcloud-etcd'
     type: string
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 outputs:
   role_data:
@@ -33,27 +48,47 @@ outputs:
       service_name: etcd
       monitoring_subscription: {get_param: MonitoringSubscriptionEtcd}
       config_settings:
-        etcd::etcd_name:
-          str_replace:
-            template:
-              "%{hiera('fqdn_$NETWORK')}"
-            params:
-              $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
-        # NOTE: bind IP is found in Heat replacing the network name with the local node IP
-        # for the given network; replacement examples (eg. for internal_api):
-        # internal_api -> IP
-        # internal_api_uri -> [IP]
-        # internal_api_subnet - > IP/CIDR
-        tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]}
-        tripleo::profile::base::etcd::client_port: '2379'
-        tripleo::profile::base::etcd::peer_port: '2380'
-        etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken}
-        etcd::manage_package: false
-        tripleo.etcd.firewall_rules:
-          '141 etcd':
-            dport:
-              - 2379
-              - 2380
+        map_merge:
+        -
+          etcd::etcd_name:
+            str_replace:
+              template:
+                "%{hiera('fqdn_$NETWORK')}"
+              params:
+                $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+          # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+          # for the given network; replacement examples (eg. for internal_api):
+          # internal_api -> IP
+          # internal_api_uri -> [IP]
+          # internal_api_subnet - > IP/CIDR
+          tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]}
+          tripleo::profile::base::etcd::client_port: '2379'
+          tripleo::profile::base::etcd::peer_port: '2380'
+          etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken}
+          etcd::manage_package: false
+          tripleo.etcd.firewall_rules:
+            '141 etcd':
+              dport:
+                - 2379
+                - 2380
+        -
+          if:
+          - internal_tls_enabled
+          - generate_service_certificates: true
+            tripleo::profile::base::etcd::certificate_specs:
+              service_certificate: '/etc/pki/tls/certs/etcd.crt'
+              service_key: '/etc/pki/tls/private/etcd.key'
+              hostname:
+                str_replace:
+                  template: "%{hiera('fqdn_NETWORK')}"
+                  params:
+                    NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+              principal:
+                str_replace:
+                  template: "etcd/%{hiera('fqdn_NETWORK')}"
+                  params:
+                    NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+          - {}
       step_config: |
         include ::tripleo::profile::base::etcd
       upgrade_tasks:
@@ -71,3 +106,11 @@ outputs:
         - name: Stop etcd service
           tags: step2
           service: name=etcd state=stopped
+      metadata_settings:
+        if:
+          - internal_tls_enabled
+          -
+            - service: etcd
+              network: {get_param: [ServiceNetMap, EtcdNetwork]}
+              type: node
+          - null
diff --git a/puppet/services/external-swift-proxy.yaml b/puppet/services/external-swift-proxy.yaml
new file mode 100644 (file)
index 0000000..206536d
--- /dev/null
@@ -0,0 +1,78 @@
+heat_template_version: pike
+
+description: >
+  External Swift Proxy endpoint configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ExternalPublicUrl:
+    description: Public endpoint url for the external swift proxy
+    type: string
+  ExternalInternalUrl:
+    description: Internal endpoint url for the external swift proxy
+    type: string
+  ExternalAdminUrl:
+    description: External endpoint url for the external swift proxy
+    type: string
+  ExternalSwiftUserTenant:
+    description: Tenant where swift user will be set as admin
+    type: string
+    default: 'service'
+  SwiftPassword:
+    description: The password for the swift service account, used by the swift proxy services.
+    type: string
+    hidden: true
+  KeystoneRegion:
+    type: string
+    default: 'regionOne'
+    description: Keystone region for endpoint
+
+resources:
+
+outputs:
+  role_data:
+    description: Role data for External Swift proxy.
+    value:
+      service_name: external_swift_proxy
+      config_settings:
+
+      step_config:
+
+      service_config_settings:
+        keystone:
+          swift::keystone::auth::public_url: {get_param: ExternalPublicUrl}
+          swift::keystone::auth::internal_url: {get_param: ExternalInternalUrl}
+          swift::keystone::auth::admin_url: {get_param: ExternalAdminUrl}
+          swift::keystone::auth::public_url_s3: ''
+          swift::keystone::auth::internal_url_s3: ''
+          swift::keystone::auth::admin_url_s3: ''
+          swift::keystone::auth::password: {get_param: SwiftPassword}
+          swift::keystone::auth::region: {get_param: KeystoneRegion}
+          swift::keystone::auth::tenant: {get_param: ExternalSwiftUserTenant}
+          swift::keystone::auth::configure_s3_endpoint: false
+          swift::keystone::auth::operator_roles:
+            - admin
+            - swiftoperator
+            - ResellerAdmin
+
index ce389dc..2815174 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Glance API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -48,9 +56,78 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  CephClientUserName:
+    default: openstack
+    type: string
+  Debug:
+    default: ''
+    description: Set to True to enable debugging on all services.
+    type: string
+  GlanceNotifierStrategy:
+    description: Strategy to use for Glance notification queue
+    type: string
+    default: noop
+  GlanceLogFile:
+    description: The filepath of the file to use for logging messages from Glance.
+    type: string
+    default: ''
+  GlanceBackend:
+    default: swift
+    description: The short name of the Glance backend to use. Should be one
+      of swift, rbd, or file
+    type: string
+    constraints:
+    - allowed_values: ['swift', 'file', 'rbd']
+  GlanceNfsEnabled:
+    default: false
+    description: >
+      When using GlanceBackend 'file', mount NFS share for image storage.
+    type: boolean
+  GlanceNfsShare:
+    default: ''
+    description: >
+      NFS share to mount for image storage (when GlanceNfsEnabled is true)
+    type: string
+  GlanceNfsOptions:
+    default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+    description: >
+      NFS mount options for image storage (when GlanceNfsEnabled is true)
+    type: string
+  GlanceRbdPoolName:
+    default: images
+    type: string
+  RabbitPassword:
+    description: The password for RabbitMQ
+    type: string
+    hidden: true
+  RabbitUserName:
+    default: guest
+    description: The username for RabbitMQ
+    type: string
+  RabbitClientPort:
+    default: 5672
+    description: Set rabbit subscriber port, change this if using SSL
+    type: number
+  RabbitClientUseSSL:
+    default: false
+    description: >
+        Rabbit client subscriber parameter to specify
+        an SSL connection to the RabbitMQ host.
+    type: string
+  KeystoneRegion:
+    type: string
+    default: 'regionOne'
+    description: Keystone region for endpoint
+  GlanceApiPolicies:
+    description: |
+      A hash of policies to configure for Glance API.
+      e.g. { glance-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 conditions:
   use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
+  glance_workers_unset: {equals : [{get_param: GlanceWorkers}, '']}
 
 resources:
 
@@ -60,15 +137,10 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
-  GlanceBase:
-    type: ./glance-base.yaml
-    properties:
-      ServiceNetMap: {get_param: ServiceNetMap}
-      DefaultPasswords: {get_param: DefaultPasswords}
-      EndpointMap: {get_param: EndpointMap}
-
 outputs:
   role_data:
     description: Role data for the Glance API role.
@@ -80,33 +152,34 @@ outputs:
         - glance
       config_settings:
         map_merge:
-          - get_attr: [GlanceBase, role_data, config_settings]
           - get_attr: [TLSProxyBase, role_data, config_settings]
           - glance::api::database_connection:
-              list_join:
-                - ''
-                - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                  - '://glance:'
-                  - {get_param: GlancePassword}
-                  - '@'
-                  - {get_param: [EndpointMap, MysqlInternal, host]}
-                  - '/glance'
-                  - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+              make_url:
+                scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+                username: glance
+                password: {get_param: GlancePassword}
+                host: {get_param: [EndpointMap, MysqlInternal, host]}
+                path: /glance
+                query:
+                  read_default_file: /etc/my.cnf.d/tripleo.cnf
+                  read_default_group: tripleo
             glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]}
-            glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+            glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
             glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
             glance::api::enable_v1_api: false
             glance::api::enable_v2_api: true
             glance::api::authtoken::password: {get_param: GlancePassword}
             glance::api::enable_proxy_headers_parsing: true
             glance::api::debug: {get_param: Debug}
-            glance::api::workers: {get_param: GlanceWorkers}
+            glance::policy::policies: {get_param: GlanceApiPolicies}
             tripleo.glance_api.firewall_rules:
               '112 glance_api':
                 dport:
                   - 9292
                   - 13292
             glance::api::authtoken::project_name: 'service'
+            glance::keystone::authtoken::user_domain_name: 'Default'
+            glance::keystone::authtoken::project_domain_name: 'Default'
             glance::api::pipeline: 'keystone'
             glance::api::show_image_direct_url: true
             # NOTE: bind IP is found in Heat replacing the network name with the
@@ -132,10 +205,47 @@ outputs:
               - use_tls_proxy
               - 'localhost'
               - {get_param: [ServiceNetMap, GlanceApiNetwork]}
+            glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
+            glance_log_file: {get_param: GlanceLogFile}
+            glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneV3Internal, uri] }
+            glance::backend::swift::swift_store_user: service:glance
+            glance::backend::swift::swift_store_key: {get_param: GlancePassword}
+            glance::backend::swift::swift_store_create_container_on_put: true
+            glance::backend::swift::swift_store_auth_version: 3
+            glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+            glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
+            glance_backend: {get_param: GlanceBackend}
+            glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
+            glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
+            glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
+            glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+            glance::notify::rabbitmq::notification_driver: messagingv2
+            tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
+            tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
+            tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
+          -
+            if:
+            - glance_workers_unset
+            - {}
+            - glance::api::workers: {get_param: GlanceWorkers}
+      service_config_settings:
+        keystone:
+          glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
+          glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
+          glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
+          glance::keystone::auth::password: {get_param: GlancePassword }
+          glance::keystone::auth::region: {get_param: KeystoneRegion}
+          glance::keystone::auth::tenant: 'service'
+        mysql:
+          glance::db::mysql::password: {get_param: GlancePassword}
+          glance::db::mysql::user: glance
+          glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          glance::db::mysql::dbname: glance
+          glance::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
       step_config: |
         include ::tripleo::profile::base::glance::api
-      service_config_settings:
-        get_attr: [GlanceBase, role_data, service_config_settings]
       upgrade_tasks:
         - name: Check if glance_api is deployed
           command: systemctl is-enabled openstack-glance-api
diff --git a/puppet/services/glance-base.yaml b/puppet/services/glance-base.yaml
deleted file mode 100644 (file)
index f554898..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-heat_template_version: ocata
-
-description: >
-  OpenStack Glance Common settings with Puppet
-
-parameters:
-  ServiceNetMap:
-    default: {}
-    description: Mapping of service_name -> network name. Typically set
-                 via parameter_defaults in the resource registry.  This
-                 mapping overrides those in ServiceNetMapDefaults.
-    type: json
-  DefaultPasswords:
-    default: {}
-    type: json
-  EndpointMap:
-    default: {}
-    description: Mapping of service endpoint -> protocol. Typically set
-                 via parameter_defaults in the resource registry.
-    type: json
-  CephClientUserName:
-    default: openstack
-    type: string
-  Debug:
-    default: ''
-    description: Set to True to enable debugging on all services.
-    type: string
-  GlanceNotifierStrategy:
-    description: Strategy to use for Glance notification queue
-    type: string
-    default: noop
-  GlanceLogFile:
-    description: The filepath of the file to use for logging messages from Glance.
-    type: string
-    default: ''
-  GlancePassword:
-    description: The password for the glance service and db account, used by the glance services.
-    type: string
-    hidden: true
-  GlanceBackend:
-    default: swift
-    description: The short name of the Glance backend to use. Should be one
-      of swift, rbd, or file
-    type: string
-    constraints:
-    - allowed_values: ['swift', 'file', 'rbd']
-  GlanceNfsEnabled:
-    default: false
-    description: >
-      When using GlanceBackend 'file', mount NFS share for image storage.
-    type: boolean
-  GlanceNfsShare:
-    default: ''
-    description: >
-      NFS share to mount for image storage (when GlanceNfsEnabled is true)
-    type: string
-  GlanceNfsOptions:
-    default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
-    description: >
-      NFS mount options for image storage (when GlanceNfsEnabled is true)
-    type: string
-  GlanceRbdPoolName:
-    default: images
-    type: string
-  RabbitPassword:
-    description: The password for RabbitMQ
-    type: string
-    hidden: true
-  RabbitUserName:
-    default: guest
-    description: The username for RabbitMQ
-    type: string
-  RabbitClientPort:
-    default: 5672
-    description: Set rabbit subscriber port, change this if using SSL
-    type: number
-  RabbitClientUseSSL:
-    default: false
-    description: >
-        Rabbit client subscriber parameter to specify
-        an SSL connection to the RabbitMQ host.
-    type: string
-  KeystoneRegion:
-    type: string
-    default: 'regionOne'
-    description: Keystone region for endpoint
-
-outputs:
-  role_data:
-    description: Role data for the Glance common role.
-    value:
-      service_name: glance_base
-      config_settings:
-        glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
-        glance_log_file: {get_param: GlanceLogFile}
-        glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
-        glance::backend::swift::swift_store_user: service:glance
-        glance::backend::swift::swift_store_key: {get_param: GlancePassword}
-        glance::backend::swift::swift_store_create_container_on_put: true
-        glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
-        glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
-        glance_backend: {get_param: GlanceBackend}
-        glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
-        glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
-        glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
-        glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
-        glance::notify::rabbitmq::notification_driver: messagingv2
-        tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
-        tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
-        tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
-      service_config_settings:
-        keystone:
-          glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
-          glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
-          glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
-          glance::keystone::auth::password: {get_param: GlancePassword }
-          glance::keystone::auth::region: {get_param: KeystoneRegion}
-          glance::keystone::auth::tenant: 'service'
-        mysql:
-          glance::db::mysql::password: {get_param: GlancePassword}
-          glance::db::mysql::user: glance
-          glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-          glance::db::mysql::dbname: glance
-          glance::db::mysql::allowed_hosts:
-            - '%'
-            - "%{hiera('mysql_bind_host')}"
index 08a939a..2411d42 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Gnocchi service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -44,6 +52,12 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  GnocchiApiPolicies:
+    description: |
+      A hash of policies to configure for Gnocchi API.
+      e.g. { gnocchi-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
 
@@ -53,6 +67,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
   ApacheServiceBase:
     type: ./apache.yaml
@@ -60,6 +76,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
 outputs:
@@ -83,10 +101,13 @@ outputs:
             gnocchi::api::enabled: true
             gnocchi::api::enable_proxy_headers_parsing: true
             gnocchi::api::service_name: 'httpd'
-            gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+            gnocchi::policy::policies: {get_param: GnocchiApiPolicies}
+            gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
             gnocchi::keystone::authtoken::project_name: 'service'
+            gnocchi::keystone::authtoken::user_domain_name: 'Default'
+            gnocchi::keystone::authtoken::project_domain_name: 'Default'
             gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
             gnocchi::wsgi::apache::servername:
               str_replace:
@@ -103,10 +124,6 @@ outputs:
             # internal_api_subnet - > IP/CIDR
             gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
             gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi'
-
-            gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
-            gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
-            gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]}
       step_config: |
         include ::tripleo::profile::base::gnocchi::api
       service_config_settings:
@@ -128,6 +145,12 @@ outputs:
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: Stop gnocchi_api service (running under httpd)
-          tags: step1
-          service: name=httpd state=stopped
+        yaql:
+          expression: $.data.apache_upgrade + $.data.gnocchi_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            gnocchi_api_upgrade:
+              - name: Stop gnocchi_api service (running under httpd)
+                tags: step1
+                service: name=httpd state=stopped
index c631005..d62c349 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Gnocchi service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -22,6 +30,10 @@ parameters:
     default: 'mysql'
     description: The short name of the Gnocchi indexer backend to use.
     type: string
+  MetricProcessingDelay:
+    default: 60
+    description: Delay between processing metrics.
+    type: number
   GnocchiPassword:
     description: The password for the gnocchi service and db account.
     type: string
@@ -32,10 +44,6 @@ parameters:
   CephClientUserName:
     default: openstack
     type: string
-  KeystoneRegion:
-    type: string
-    default: 'regionOne'
-    description: Keystone region for endpoint
   RedisPassword:
     description: The password for the redis service account.
     type: string
@@ -59,19 +67,21 @@ outputs:
         gnocchi_redis_password: {get_param: RedisPassword}
         gnocchi::debug: {get_param: Debug}
         gnocchi::db::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://gnocchi:'
-              - {get_param: GnocchiPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/gnocchi'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: gnocchi
+            password: {get_param: GnocchiPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /gnocchi
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         gnocchi::db::sync::extra_opts: '--skip-storage'
+        gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
         gnocchi::storage::swift::swift_user: 'service:gnocchi'
-        gnocchi::storage::swift::swift_auth_version: 2
+        gnocchi::storage::swift::swift_auth_version: 3
         gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
+        gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneV3Internal, uri]}
         gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
         gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
         gnocchi::storage::ceph::ceph_keyring:
index 9d76c2e..5ada99f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Gnocchi service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -33,6 +41,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index bb8d3bc..5ba1dfc 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Gnocchi service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index ae22616..1866bb9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HAProxy deployment with TLS enabled, powered by certmonger
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 6013b02..7ebacdb 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HAProxy deployment with TLS enabled, powered by certmonger
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index bd5b9ef..a71491c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HAproxy service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -37,6 +45,11 @@ parameters:
   MonitoringSubscriptionHaproxy:
     default: 'overcloud-haproxy'
     type: string
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
 
 resources:
 
@@ -46,6 +59,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
   HAProxyInternalTLS:
     type: OS::TripleO::Services::HAProxyInternalTLS
@@ -53,6 +68,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -71,6 +88,7 @@ outputs:
             tripleo::haproxy::haproxy_stats_user: {get_param: HAProxyStatsUser}
             tripleo::haproxy::haproxy_stats_password: {get_param: HAProxyStatsPassword}
             tripleo::haproxy::redis_password: {get_param: RedisPassword}
+            tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
             tripleo::profile::base::haproxy::certificates_specs:
               map_merge:
                 - get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
@@ -96,8 +114,6 @@ outputs:
           when: haproxy_enabled.rc == 0
           service: name=haproxy state=started
       metadata_settings:
-        yaql:
-          expression: '[].concat(coalesce($.data.internal, []), coalesce($.data.public, []))'
-          data:
-            public: {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]}
-            internal: {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]}
+        list_concat:
+          - {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]}
+          - {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]}
index 483f0a4..92d73cf 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Heat CloudFormation API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -38,14 +46,33 @@ parameters:
     default:
       tag: openstack.heat.api.cfn
       path: /var/log/heat/heat-api-cfn.log
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+  heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
 
 resources:
+
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
+
   HeatBase:
     type: ./heat-base.yaml
     properties:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -59,19 +86,32 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [HeatBase, role_data, config_settings]
-          - heat::api_cfn::workers: {get_param: HeatWorkers}
-            tripleo.heat_api_cfn.firewall_rules:
+          - get_attr: [ApacheServiceBase, role_data, config_settings]
+          - tripleo.heat_api_cfn.firewall_rules:
               '125 heat_cfn':
                 dport:
                   - 8000
                   - 13800
-            # NOTE: bind IP is found in Heat replacing the network name with the
-            # local node IP for the given network; replacement examples
-            # (eg. for internal_api):
+            heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+            heat::wsgi::apache_api_cfn::ssl: {get_param: EnableInternalTLS}
+            heat::api_cfn::service_name: 'httpd'
+            # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+            # for the given network; replacement examples (eg. for internal_api):
             # internal_api -> IP
             # internal_api_uri -> [IP]
             # internal_api_subnet - > IP/CIDR
-            heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+            heat::wsgi::apache_api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+            heat::wsgi::apache_api_cfn::servername:
+              str_replace:
+                template:
+                  "%{hiera('fqdn_$NETWORK')}"
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+          -
+            if:
+            - heat_workers_zero
+            - {}
+            - heat::wsgi::apache_api_cfn::workers: {get_param: HeatWorkers}
       step_config: |
         include ::tripleo::profile::base::heat::api_cfn
       service_config_settings:
@@ -94,7 +134,16 @@ outputs:
           shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b'
           when: heat_api_cfn_enabled.rc == 0
           tags: step0,validation
-        - name: Stop heat_api_cfn service
+        - name: check for heat_api_cfn running under apache (post upgrade)
           tags: step1
-          when: heat_api_cfn_enabled.rc == 0
-          service: name=openstack-heat-api-cfn state=stopped
+          shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cfn_wsgi"
+          register: heat_api_cfn_apache
+          ignore_errors: true
+        - name: Stop heat_api_cfn service (running under httpd)
+          tags: step1
+          service: name=httpd state=stopped
+          when: heat_api_cfn_apache.rc == 0
+        - name: Stop and disable heat_api_cfn service (pre-upgrade not under httpd)
+          tags: step1
+          when: heat_api_cfn_apache.rc == 0
+          service: name=openstack-heat-api-cfn state=stopped enabled=no
index 8879bcb..a740d20 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Heat CloudWatch API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -30,14 +38,33 @@ parameters:
     default:
       tag: openstack.heat.api.cloudwatch
       path: /var/log/heat/heat-api-cloudwatch.log
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+  heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
 
 resources:
+
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
+
   HeatBase:
     type: ./heat-base.yaml
     properties:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -51,19 +78,34 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [HeatBase, role_data, config_settings]
-          - heat::api_cloudwatch::workers: {get_param: HeatWorkers}
-            tripleo.heat_api_cloudwatch.firewall_rules:
+          - get_attr: [ApacheServiceBase, role_data, config_settings]
+          - tripleo.heat_api_cloudwatch.firewall_rules:
               '125 heat_cloudwatch':
                 dport:
                   - 8003
                   - 13003
-            # NOTE: bind IP is found in Heat replacing the network name with the
-            # local node IP for the given network; replacement examples
-            # (eg. for internal_api):
+            heat::api_cloudwatch::bind_host:
+              get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+            heat::wsgi::apache_api_cloudwatch::ssl: {get_param: EnableInternalTLS}
+            heat::api_cloudwatch::service_name: 'httpd'
+            # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+            # for the given network; replacement examples (eg. for internal_api):
             # internal_api -> IP
             # internal_api_uri -> [IP]
             # internal_api_subnet - > IP/CIDR
-            heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+            heat::wsgi::apache_api_cloudwatch::bind_host:
+              get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+            heat::wsgi::apache_api_cloudwatch::servername:
+              str_replace:
+                template:
+                  "%{hiera('fqdn_$NETWORK')}"
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]}
+          -
+            if:
+            - heat_workers_zero
+            - {}
+            - heat::wsgi::apache_api_cloudwatch::workers: {get_param: HeatWorkers}
       step_config: |
         include ::tripleo::profile::base::heat::api_cloudwatch
       upgrade_tasks:
@@ -76,7 +118,16 @@ outputs:
           shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b'
           when: heat_api_cloudwatch_enabled.rc == 0
           tags: step0,validation
-        - name: Stop heat_api_cloudwatch service
+        - name: check for heat_api_cloudwatch running under apache (post upgrade)
+          tags: step1
+          shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cloudwatch_wsgi"
+          register: heat_api_cloudwatch_apache
+          ignore_errors: true
+        - name: Stop heat_api_cloudwatch service (running under httpd)
+          tags: step1
+          service: name=httpd state=stopped
+          when: heat_api_cloudwatch_apache.rc == 0
+        - name: Stop and disable heat_api_cloudwatch service (pre-upgrade not under httpd)
           tags: step1
           when: heat_api_cloudwatch_enabled.rc == 0
-          service: name=openstack-heat-api-cloudwatch state=stopped
+          service: name=openstack-heat-api-cloudwatch state=stopped enabled=no
index 2464011..ced7f0c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Heat API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -38,14 +46,39 @@ parameters:
     default:
       tag: openstack.heat.api
       path: /var/log/heat/heat-api.log
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  HeatApiPolicies:
+    description: |
+      A hash of policies to configure for Heat API.
+      e.g. { heat-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
+
+conditions:
+  heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
 
 resources:
+
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
+
   HeatBase:
     type: ./heat-base.yaml
     properties:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -59,19 +92,33 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [HeatBase, role_data, config_settings]
-          - heat::api::workers: {get_param: HeatWorkers}
-            tripleo.heat_api.firewall_rules:
+          - get_attr: [ApacheServiceBase, role_data, config_settings]
+          - tripleo.heat_api.firewall_rules:
               '125 heat_api':
                 dport:
                   - 8004
                   - 13004
-            # NOTE: bind IP is found in Heat replacing the network name with the
-            # local node IP for the given network; replacement examples
-            # (eg. for internal_api):
+            heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+            heat::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+            heat::policy::policies: {get_param: HeatApiPolicies}
+            heat::api::service_name: 'httpd'
+            # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+            # for the given network; replacement examples (eg. for internal_api):
             # internal_api -> IP
             # internal_api_uri -> [IP]
             # internal_api_subnet - > IP/CIDR
-            heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+            heat::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+            heat::wsgi::apache_api::servername:
+              str_replace:
+                template:
+                  "%{hiera('fqdn_$NETWORK')}"
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, HeatApiNetwork]}
+          -
+            if:
+            - heat_workers_zero
+            - {}
+            - heat::wsgi::apache_api::workers: {get_param: HeatWorkers}
       step_config: |
         include ::tripleo::profile::base::heat::api
       service_config_settings:
@@ -94,7 +141,16 @@ outputs:
           shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b'
           when: heat_api_enabled.rc == 0
           tags: step0,validation
-        - name: Stop heat_api service
+        - name: check for heat_api running under apache (post upgrade)
+          tags: step1
+          shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_wsgi"
+          register: heat_api_apache
+          ignore_errors: true
+        - name: Stop heat_api service (running under httpd)
+          tags: step1
+          service: name=httpd state=stopped
+          when: heat_api_apache.rc == 0
+        - name: Stop and disable heat_api service (pre-upgrade not under httpd)
           tags: step1
           when: heat_api_enabled.rc == 0
-          service: name=openstack-heat-api state=stopped
+          service: name=openstack-heat-api state=stopped enabled=no
index e83a9ed..dfd823d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Heat base service. Shared for all Heat services.
@@ -39,6 +39,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -125,7 +133,9 @@ outputs:
             value: 'role:admin'
         heat::rabbit_heartbeat_timeout_threshold: 60
         heat::keystone::authtoken::project_name: 'service'
-        heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+        heat::keystone::authtoken::user_domain_name: 'Default'
+        heat::keystone::authtoken::project_domain_name: 'Default'
+        heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         heat::keystone::authtoken::password: {get_param: HeatPassword}
         heat::keystone::domain::domain_name: 'heat_stack'
index a166f3a..1d5f054 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Heat Engine service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -65,6 +73,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -84,35 +94,33 @@ outputs:
             heat::engine::max_nested_stack_depth: 6
             heat::engine::max_resources_per_stack: {get_param: HeatMaxResourcesPerStack}
             heat::engine::heat_metadata_server_url:
-              list_join:
-                - ''
-                - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
-                  - '://'
-                  - {get_param: [EndpointMap, HeatCfnPublic, host]}
-                  - ':'
-                  - {get_param: [EndpointMap, HeatCfnPublic, port]}
+              make_url:
+                scheme: {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+                host: {get_param: [EndpointMap, HeatCfnPublic, host]}
+                port: {get_param: [EndpointMap, HeatCfnPublic, port]}
             heat::engine::heat_waitcondition_server_url:
-              list_join:
-                - ''
-                - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
-                  - '://'
-                  - {get_param: [EndpointMap, HeatCfnPublic, host]}
-                  - ':'
-                  - {get_param: [EndpointMap, HeatCfnPublic, port]}
-                  - '/v1/waitcondition'
+              make_url:
+                scheme: {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+                host: {get_param: [EndpointMap, HeatCfnPublic, host]}
+                port: {get_param: [EndpointMap, HeatCfnPublic, port]}
+                path: /v1/waitcondition
             heat::engine::convergence_engine: {get_param: HeatConvergenceEngine}
             tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge}
             heat::database_connection:
+              make_url:
+                scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+                username: heat
+                password: {get_param: HeatPassword}
+                host: {get_param: [EndpointMap, MysqlInternal, host]}
+                path: /heat
+                query:
+                  read_default_file: /etc/my.cnf.d/tripleo.cnf
+                  read_default_group: tripleo
+            heat::keystone_ec2_uri:
               list_join:
-                - ''
-                - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                  - '://heat:'
-                  - {get_param: HeatPassword}
-                  - '@'
-                  - {get_param: [EndpointMap, MysqlInternal, host]}
-                  - '/heat'
-                  - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
-            heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
+              - ''
+              - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+                - '/ec2tokens'
             heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
             heat::engine::auth_encryption_key:
               yaql:
index 60b009a..2914ee2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Horizon service configured with Puppet
@@ -17,6 +17,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -40,6 +48,10 @@ parameters:
     type: string
     hidden: true
     default: ''
+  HorizonSecureCookies:
+    description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
+    type: boolean
+    default: true
   MemcachedIPv6:
     default: false
     description: Enable IPv6 features in Memcached.
@@ -78,7 +90,7 @@ outputs:
             access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
             options: ['FollowSymLinks','MultiViews']
           horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
-          horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+          horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
           horizon::password_validator: {get_param: [HorizonPasswordValidator]}
           horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]}
           horizon::secret_key:
@@ -88,6 +100,7 @@ outputs:
                 passwords:
                   - {get_param: HorizonSecret}
                   - {get_param: [DefaultPasswords, horizon_secret]}
+          horizon::secure_cookies: {get_param: [HorizonSecureCookies]}
           memcached_ipv6: {get_param: MemcachedIPv6}
         -
           if:
index 7aab6f8..945033a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ironic API configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,12 @@ parameters:
     type: string
     default: 'regionOne'
     description: Keystone region for endpoint
+  IronicApiPolicies:
+    description: |
+      A hash of policies to configure for Ironic API.
+      e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
   IronicBase:
@@ -37,6 +51,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -49,8 +65,10 @@ outputs:
           - get_attr: [IronicBase, role_data, config_settings]
           - ironic::api::authtoken::password: {get_param: IronicPassword}
             ironic::api::authtoken::project_name: 'service'
+            ironic::api::authtoken::user_domain_name: 'Default'
+            ironic::api::authtoken::project_domain_name: 'Default'
             ironic::api::authtoken::username: 'ironic'
-            ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+            ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
             ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
@@ -62,6 +80,7 @@ outputs:
             ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
             # This is used to build links in responses
             ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+            ironic::policy::policies: {get_param: IronicApiPolicies}
             tripleo.ironic_api.firewall_rules:
               '133 ironic api':
                 dport:
index d186b04..da48516 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ironic services configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -52,15 +60,15 @@ outputs:
       service_name: ironic_base
       config_settings:
         ironic::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://ironic:'
-              - {get_param: IronicPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/ironic'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: ironic
+            password: {get_param: IronicPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /ironic
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         ironic::debug: {get_param: Debug}
         ironic::rabbit_userid: {get_param: RabbitUserName}
         ironic::rabbit_password: {get_param: RabbitPassword}
index f9547be..76b1bf3 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Ironic conductor configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -32,10 +40,30 @@ parameters:
                  created yet) and should be changed to an actual UUID in
                  a post-deployment stack update.
     type: string
+  IronicDefaultBootOption:
+    default: 'local'
+    description: How to boot the bare metal instances. Set to 'local' (the
+                 default) to use local bootloader (requires grub2 for partition
+                 images). Set to 'netboot' to make the instances boot from
+                 controllers using PXE/iPXE.
+    type: string
+  IronicDefaultNetworkInterface:
+    default: 'flat'
+    description: Network interface implementation to use by default.
+                 Set to "flat" (the default) to use one flat provider network.
+                 Set to "neutron" to make Ironic interact with the Neutron
+                 ML2 driver to enable other network types and certain
+                 advances networking features. Requires
+                 IronicProvisioningNetwork to be correctly set.
+    type: string
   IronicEnabledDrivers:
     default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo']
     description: Enabled Ironic drivers
     type: comma_delimited_list
+  IronicEnabledHardwareTypes:
+    default: ['ipmi']
+    description: Enabled Ironic hardware types
+    type: comma_delimited_list
   IronicIPXEEnabled:
     default: true
     description: Whether to use iPXE instead of PXE for deployment.
@@ -44,6 +72,19 @@ parameters:
     default: 8088
     description: Port to use for serving images when iPXE is used.
     type: string
+  IronicPassword:
+    description: The password for the Ironic service and db account, used by the Ironic services
+    type: string
+    hidden: true
+  IronicProvisioningNetwork:
+    default: 'provisioning'
+    description: Name or UUID of the *overcloud* network used for provisioning
+                 of bare metal nodes, if IronicDefaultNetworkInterface is
+                 set to "neutron". The default value of "provisioning" can be
+                 left during the initial deployment (when no networks are
+                 created yet) and should be changed to an actual UUID in
+                 a post-deployment stack update.
+    type: string
   MonitoringSubscriptionIronicConductor:
     default: 'overcloud-ironic-conductor'
     type: string
@@ -55,6 +96,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -65,12 +108,13 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [IronicBase, role_data, config_settings]
-          # FIXME: I have no idea why neutron_url is in "api" manifest
-          - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
-            ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+          - ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
             ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
             ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
+            ironic::conductor::provisioning_network: {get_param: IronicProvisioningNetwork}
+            ironic::conductor::default_boot_option: {get_param: IronicDefaultBootOption}
             ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
+            ironic::conductor::enabled_hardware_types: {get_param: IronicEnabledHardwareTypes}
             # We need an endpoint containing a real IP, not a VIP here
             ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]}
             ironic::conductor::http_url:
@@ -91,6 +135,9 @@ outputs:
             # NOTE(dtantsur): UEFI only works with iPXE currently for us
             ironic::drivers::pxe::uefi_pxe_config_template: '$pybasedir/drivers/modules/ipxe_config.template'
             ironic::drivers::pxe::uefi_pxe_bootfile_name: 'ipxe.efi'
+            ironic::drivers::interfaces::enabled_console_interfaces: ['ipmitool-socat', 'no-console']
+            ironic::drivers::interfaces::enabled_network_interfaces: ['flat', 'neutron']
+            ironic::drivers::interfaces::default_network_interface: {get_param: IronicDefaultNetworkInterface}
             tripleo.ironic_conductor.firewall_rules:
               '134 ironic conductor TFTP':
                 dport: 69
@@ -104,7 +151,40 @@ outputs:
             # the VIP, but rather a real IP of the host.
             ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
             ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
-
+            # Credentials to access other services
+            ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::glance::username: 'ironic'
+            ironic::glance::password: {get_param: IronicPassword}
+            ironic::glance::project_name: 'service'
+            ironic::glance::user_domain_name: 'Default'
+            ironic::glance::project_domain_name: 'Default'
+            ironic::neutron::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::neutron::username: 'ironic'
+            ironic::neutron::password: {get_param: IronicPassword}
+            ironic::neutron::project_name: 'service'
+            ironic::neutron::user_domain_name: 'Default'
+            ironic::neutron::project_domain_name: 'Default'
+            ironic::service_catalog::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::service_catalog::username: 'ironic'
+            ironic::service_catalog::password: {get_param: IronicPassword}
+            ironic::service_catalog::project_name: 'service'
+            ironic::service_catalog::user_domain_name: 'Default'
+            ironic::service_catalog::project_domain_name: 'Default'
+            ironic::swift::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::swift::username: 'ironic'
+            ironic::swift::password: {get_param: IronicPassword}
+            ironic::swift::project_name: 'service'
+            ironic::swift::user_domain_name: 'Default'
+            ironic::swift::project_domain_name: 'Default'
+            # ironic-inspector support is not implemented, but let's configure
+            # the credentials for consistency.
+            ironic::drivers::inspector::enabled: false
+            ironic::drivers::inspector::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::drivers::inspector::username: 'ironic'
+            ironic::drivers::inspector::password: {get_param: IronicPassword}
+            ironic::drivers::inspector::project_name: 'service'
+            ironic::drivers::inspector::user_domain_name: 'Default'
+            ironic::drivers::inspector::project_domain_name: 'Default'
       step_config: |
         include ::tripleo::profile::base::ironic::conductor
       upgrade_tasks:
index 38f9f3b..44e6b24 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Keepalived service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -52,12 +60,12 @@ outputs:
         - tripleo.keepalived.firewall_rules:
             '106 keepalived vrrp':
               proto: vrrp
-        - 
+        -
           if:
           - control_iface_empty
           - {}
           - tripleo::keepalived::control_virtual_interface: {get_param: ControlVirtualInterface}
-        - 
+        -
           if:
           - public_iface_empty
           - {}
index fec455d..3f9b0b7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Load kernel modules with kmod and configure kernel options with sysctl.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -22,6 +30,32 @@ parameters:
     default: 1048576
     description: Configures sysctl kernel.pid_max key
     type: number
+  KernelDisableIPv6:
+    default: 0
+    description: Configures sysctl net.ipv6.{default/all}.disable_ipv6 keys
+    type: number
+  NeighbourGcThreshold1:
+    default: 1024
+    description: Configures sysctl net.ipv4.neigh.default.gc_thresh1 value.
+                 This is the minimum number of entries to keep in the ARP
+                 cache. The garbage collector will not run if there are
+                 fewer than this number of entries in the cache.
+    type: number
+  NeighbourGcThreshold2:
+    default: 2048
+    description: Configures sysctl net.ipv4.neigh.default.gc_thresh2 value.
+                 This is the soft maximum number of entries to keep in the
+                 ARP cache. The garbage collector will  allow the number of
+                 entries to exceed this for 5 seconds before collection will
+                 be performed.
+    type: number
+  NeighbourGcThreshold3:
+    default: 4096
+    description: Configures sysctl net.ipv4.neigh.default.gc_thresh3 value.
+                 This is the hard maximum number of entries to keep in the
+                 ARP cache. The garbage collector will always run if there
+                 are more than this number of entries in the cache.
+    type: number
 
 outputs:
   role_data:
@@ -31,7 +65,7 @@ outputs:
       config_settings:
         kernel_modules:
           nf_conntrack: {}
-          ip_conntrack_proto_sctp: {}
+          nf_conntrack_proto_sctp: {}
         sysctl_settings:
           net.ipv4.tcp_keepalive_intvl:
             value: 1
@@ -39,10 +73,28 @@ outputs:
             value: 5
           net.ipv4.tcp_keepalive_time:
             value: 5
+          net.ipv4.conf.default.send_redirects:
+            value: 0
+          net.ipv4.conf.all.send_redirects:
+            value: 0
+          net.ipv4.conf.default.accept_redirects:
+            value: 0
+          net.ipv4.conf.default.secure_redirects:
+            value: 0
+          net.ipv4.conf.all.secure_redirects:
+            value: 0
+          net.ipv4.conf.default.log_martians:
+            value: 1
+          net.ipv4.conf.all.log_martians:
+            value: 1
           net.nf_conntrack_max:
             value: 500000
           net.netfilter.nf_conntrack_max:
             value: 500000
+          net.ipv6.conf.default.disable_ipv6:
+            value: {get_param: KernelDisableIPv6}
+          net.ipv6.conf.all.disable_ipv6:
+            value: {get_param: KernelDisableIPv6}
           # prevent neutron bridges from autoconfiguring ipv6 addresses
           net.ipv6.conf.all.accept_ra:
             value: 0
@@ -52,9 +104,24 @@ outputs:
             value: 0
           net.ipv6.conf.default.autoconf:
             value: 0
+          net.ipv6.conf.default.accept_redirects:
+            value: 0
+          net.ipv6.conf.all.accept_redirects:
+            value: 0
           net.core.netdev_max_backlog:
             value: 10000
           kernel.pid_max:
             value: {get_param: KernelPidMax}
+          kernel.dmesg_restrict:
+            value: 1
+          fs.suid_dumpable:
+            value: 0
+          #avoid neighbour table overflow on large deployments
+          net.ipv4.neigh.default.gc_thresh1:
+            value: {get_param: NeighbourGcThreshold1}
+          net.ipv4.neigh.default.gc_thresh2:
+            value: {get_param: NeighbourGcThreshold2}
+          net.ipv4.neigh.default.gc_thresh3:
+            value: {get_param: NeighbourGcThreshold3}
       step_config: |
         include ::tripleo::profile::base::kernel
index f40c8d9..7262e47 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Keystone service configured with Puppet
@@ -35,7 +35,7 @@ parameters:
   KeystoneTokenProvider:
     description: The keystone token format
     type: string
-    default: 'uuid'
+    default: 'fernet'
     constraints:
       - allowed_values: ['uuid', 'fernet']
   ServiceNetMap:
@@ -47,6 +47,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -119,27 +127,27 @@ parameters:
         Cron to purge expired tokens - Ensure
     default: 'present'
   KeystoneCronTokenFlushMinute:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Minute
     default: '1'
   KeystoneCronTokenFlushHour:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Hour
-    default: '0'
+    default: '*'
   KeystoneCronTokenFlushMonthday:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Month Day
     default: '*'
   KeystoneCronTokenFlushMonth:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Month
     default: '*'
   KeystoneCronTokenFlushWeekday:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Week Day
     default: '*'
@@ -158,6 +166,22 @@ parameters:
     description: >
         Cron to purge expired tokens - User
     default: 'keystone'
+  KeystonePolicies:
+    description: |
+      A hash of policies to configure for Keystone.
+      e.g. { keystone-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
+  KeystoneLDAPDomainEnable:
+    description: Trigger to call ldap_backend puppet keystone define.
+    type: boolean
+    default: False
+  KeystoneLDAPBackendConfigs:
+    description: Hash containing the configurations for the LDAP backends
+                 configured in keystone.
+    type: json
+    default: {}
+    hidden: true
 
 resources:
 
@@ -167,10 +191,13 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
 conditions:
   keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
+  keystone_ldap_domain_enabled: {equals: [{get_param: KeystoneLDAPDomainEnable}, True]}
 
 outputs:
   role_data:
@@ -185,18 +212,19 @@ outputs:
         map_merge:
           - get_attr: [ApacheServiceBase, role_data, config_settings]
           - keystone::database_connection:
-              list_join:
-                - ''
-                - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                  - '://keystone:'
-                  - {get_param: AdminToken}
-                  - '@'
-                  - {get_param: [EndpointMap, MysqlInternal, host]}
-                  - '/keystone'
-                  - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+              make_url:
+                scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+                username: keystone
+                password: {get_param: AdminToken}
+                host: {get_param: [EndpointMap, MysqlInternal, host]}
+                path: /keystone
+                query:
+                  read_default_file: /etc/my.cnf.d/tripleo.cnf
+                  read_default_group: tripleo
             keystone::admin_token: {get_param: AdminToken}
             keystone::admin_password: {get_param: AdminPassword}
             keystone::roles::admin::password: {get_param: AdminPassword}
+            keystone::policy::policies: {get_param: KeystonePolicies}
             keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
             keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
             keystone::token_provider: {get_param: KeystoneTokenProvider}
@@ -213,6 +241,7 @@ outputs:
                 content: {get_param: KeystoneFernetKey0}
               '/etc/keystone/fernet-keys/1':
                 content: {get_param: KeystoneFernetKey1}
+            keystone::fernet_replace_keys: false
             keystone::debug: {get_param: Debug}
             keystone::rabbit_userid: {get_param: RabbitUserName}
             keystone::rabbit_password: {get_param: RabbitPassword}
@@ -232,7 +261,7 @@ outputs:
             keystone::cron::token_flush::maxdelay: 3600
             keystone::roles::admin::service_tenant: 'service'
             keystone::roles::admin::admin_tenant: 'admin'
-            keystone::cron::token_flush::destination: '/dev/null'
+            keystone::cron::token_flush::destination: '/var/log/keystone/keystone-tokenflush.log'
             keystone::config::keystone_config:
               ec2/driver:
                 value: 'keystone.contrib.ec2.backends.sql.Ec2'
@@ -293,6 +322,15 @@ outputs:
             keystone::cron::token_flush::maxdelay: {get_param: KeystoneCronTokenFlushMaxDelay}
             keystone::cron::token_flush::destination: {get_param: KeystoneCronTokenFlushDestination}
             keystone::cron::token_flush::user: {get_param: KeystoneCronTokenFlushUser}
+          -
+            if:
+            - keystone_ldap_domain_enabled
+            -
+              tripleo::profile::base::keystone::ldap_backend_enable: True
+              keystone::using_domain_config: True
+              tripleo::profile::base::keystone::ldap_backends_config:
+                get_param: KeystoneLDAPBackendConfigs
+            - {}
 
       step_config: |
         include ::tripleo::profile::base::keystone
@@ -305,10 +343,22 @@ outputs:
           keystone::db::mysql::allowed_hosts:
             - '%'
             - "%{hiera('mysql_bind_host')}"
-      # Ansible tasks to handle upgrade
-      upgrade_tasks:
-        - name: Stop keystone service (running under httpd)
-          tags: step1
-          service: name=httpd state=stopped
+        horizon:
+          if:
+          - keystone_ldap_domain_enabled
+          -
+            horizon::keystone_multidomain_support: true
+            horizon::keystone_default_domain: 'Default'
+          - {}
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
+      upgrade_tasks:
+        yaql:
+          expression: $.data.apache_upgrade + $.data.keystone_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            keystone_upgrade:
+              - name: Stop keystone service (running under httpd)
+                tags: step1
+                service: name=httpd state=stopped
index 65ad80e..21049a9 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Fluentd base service
 
@@ -12,6 +12,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: >
index 57595b8..e34f31f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Fluentd client configured with Puppet
 
@@ -12,6 +12,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: >
@@ -24,6 +32,8 @@ resources:
     type: ./fluentd-base.yaml
     properties:
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
 
index 68f98af..9ade641 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Fluentd logging configuration
 
@@ -12,6 +12,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: >
index 7b78c82..2710d78 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Manila-api service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -37,6 +45,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -48,9 +58,11 @@ outputs:
         map_merge:
           - get_attr: [ManilaBase, role_data, config_settings]
           - manila::keystone::authtoken::password: {get_param: ManilaPassword}
-            manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+            manila::keystone::authtoken::auth_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
             manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
             manila::keystone::authtoken::project_name: 'service'
+            manila::keystone::authtoken::user_domain_name: 'Default'
+            manila::keystone::authtoken::project_domain_name: 'Default'
             tripleo.manila_api.firewall_rules:
               '150 manila':
                 dport:
index 36ef1ea..f4c7a07 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Manila Cephfs backend
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -39,7 +47,7 @@ parameters:
     default: 'ceph'
   ManilaCephFSNativeCephFSEnableSnapshots:
     type: boolean
-    default: true
+    default: false
   ManilaCephFSDataPoolName:
     default: manila_data
     type: string
index 23831a6..7be9239 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Manila generic backend.
@@ -58,6 +58,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     type: json
index 1f6fcf4..b106848 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Manila netapp backend.
@@ -73,6 +73,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     type: json
index c183bc0..a299fff 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Manila base service. Shared by manila-api/scheduler/share services
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -59,15 +67,15 @@ outputs:
         manila::db::database_db_max_retries: -1
         manila::db::database_max_retries: -1
         manila::sql_connection:
-          list_join:
-          - ''
-          - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-            - '://manila:'
-            - {get_param: ManilaPassword}
-            - '@'
-            - {get_param: [EndpointMap, MysqlInternal, host]}
-            - '/manila'
-            - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: manila
+            password: {get_param: ManilaPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /manila
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
       service_config_settings:
         mysql:
           manila::db::mysql::password: {get_param: ManilaPassword}
index c8114f2..b3d1ffa 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Manila-scheduler service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -41,6 +49,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 6ac0d2c..50d7f7c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Manila-share service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -33,6 +41,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 146cc30..5b98e02 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Memcached service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 49b2d4c..da77ef0 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Collectd client service
 
@@ -17,6 +17,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   CollectdDefaultPlugins:
     default:
       - disk
@@ -70,7 +78,9 @@ parameters:
   CollectdSecurityLevel:
     type: string
     description: >
-      Security level setting for remote collectd connection.
+      Security level setting for remote collectd connection. If it is
+      set to Sign or Encrypt the CollectdPassword and CollectdUsername
+      parameters need to be set.
     default: 'None'
     constraints:
       - allowed_values:
index 1c7d6bd..0040673 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Mistral API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -22,6 +30,12 @@ parameters:
     default: 1
     description: The number of workers for the mistral-api.
     type: number
+  MistralApiPolicies:
+    description: |
+      A hash of policies to configure for Mistral API.
+      e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
   MistralBase:
@@ -30,6 +44,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -41,6 +57,7 @@ outputs:
           - get_attr: [MistralBase, role_data, config_settings]
           - mistral::api::api_workers: {get_param: MistralWorkers}
             mistral::api::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]}
+            mistral::policy::policies: {get_param: MistralApiPolicies}
             tripleo.mistral_api.firewall_rules:
               '133 mistral':
                 dport:
index e103034..2e70865 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Mistral base service. Shared for all Mistral services.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -57,15 +65,15 @@ outputs:
       service_name: mistral_base
       config_settings:
         mistral::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://mistral:'
-              - {get_param: MistralPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/mistral'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: mistral
+            password: {get_param: MistralPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /mistral
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         mistral::rabbit_userid: {get_param: RabbitUserName}
         mistral::rabbit_password: {get_param: RabbitPassword}
         mistral::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -74,7 +82,11 @@ outputs:
         mistral::keystone_password: {get_param: MistralPassword}
         mistral::keystone_tenant: 'service'
         mistral::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
-        mistral::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
+        mistral::keystone_ec2_uri:
+          list_join:
+          - ''
+          - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+            - '/ec2tokens'
         mistral::identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
       service_config_settings:
         keystone:
index 03a2a55..6a0fed1 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Mistral Engine service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -26,6 +34,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 0f6adb0..57f29dd 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Mistral API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -26,6 +34,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index a8303a5..5e7e994 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Sensu base service
 
@@ -17,6 +17,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   MonitoringRabbitHost:
     description: RabbitMQ host Sensu has to connect to.
     type: string
@@ -29,7 +37,18 @@ parameters:
     default: false
     description: >
         RabbitMQ client subscriber parameter to specify an SSL connection
-        to the RabbitMQ host.
+        to the RabbitMQ host. Set MonitoringRabbitUseSSL to true without
+        specifying a private key or cert chain to use SSL transport,
+        but not cert auth.
+    type: string
+  MonitoringRabbitSSLPrivateKey:
+    default: ''
+    description: Private key to be used by Sensu to connect to RabbitMQ host.
+    type: string
+  MonitoringRabbitSSLCertChain:
+    default: ''
+    description: >
+      Private SSL cert chain to be used by Sensu to connect to RabbitMQ host.
     type: string
   MonitoringRabbitPassword:
     description: The RabbitMQ password used for monitoring purposes.
@@ -71,6 +90,8 @@ outputs:
         sensu::rabbitmq_password: {get_param: MonitoringRabbitPassword}
         sensu::rabbitmq_port: {get_param: MonitoringRabbitPort}
         sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL}
+        sensu::rabbitmq_ssl_private_key: {get_param: MonitoringRabbitSSLPrivateKey}
+        sensu::rabbitmq_ssl_cert_chain: {get_param: MonitoringRabbitSSLCertChain}
         sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName}
         sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost}
         sensu::redact: {get_param: SensuRedactVariables}
index aba2b1e..25e2b94 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Sensu client configured with Puppet
 
@@ -12,6 +12,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: >
@@ -38,6 +46,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -81,4 +91,4 @@ outputs:
         - name: Install sensu package if it was disabled
           tags: step3
           yum: name=sensu state=latest
-          when: sensu_client.rc != 0
+          when: sensu_client_enabled.rc != 0
index 67341ed..9b78437 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Contrail Analytics Database service deployment using puppet, this YAML file
@@ -16,6 +16,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index e3e0ec4..f85ba7c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Contrail Analytics service deployment using puppet, this YAML file
@@ -16,6 +16,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index bc56a3c..bdcdbb8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Base parameters for all Contrail Services.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 185b609..feda585 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Contrail Config service deployment using puppet, this YAML file
@@ -16,6 +16,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -37,6 +45,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 0964989..f110854 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Contrail Control service deployment using puppet, this YAML file
@@ -16,6 +16,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -37,6 +45,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index b47c2c3..5ce25a2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Contrail Database service deployment using puppet, this YAML file
@@ -16,6 +16,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 4dfc657..da86714 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Contrail Heat plugin adds Contrail specific heat resources enabling heat
@@ -14,6 +14,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -27,6 +35,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 2f2ceb3..0c5e2a8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Opencontrail plugin
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -30,6 +38,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 765be9a..8918f6d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Provision Contrail services after deployment
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -26,6 +34,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 88adc4a..9d48e0e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Contrail TSN Service
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -41,6 +49,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index db9f083..f03ed9c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Compute OpenContrail plugin
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -27,7 +35,7 @@ parameters:
     description: vRouter physical interface
     type: string
   ContrailVrouterGateway:
-    default: '192.0.2.1'
+    default: '192.168.24.1'
     description: vRouter default gateway
     type: string
   ContrailVrouterNetmask:
@@ -42,6 +50,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 3786cdd..f723e6a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Contrail WebUI service deployment using puppet, this YAML file
@@ -16,6 +16,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index bb191ff..8e1e0b8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Server configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -21,13 +29,13 @@ parameters:
   NeutronWorkers:
     default: ''
     description: |
-      Sets the number of API and RPC workers for the Neutron service. The
-      default value results in the configuration being left unset and a
-      system-dependent default will be chosen (usually the number of
-      processors). Please note that this can result in a large number of
-      processes and memory consumption on systems with a large core count. On
-      such systems it is recommended that a non-default value be selected that
-      matches the load requirements.
+      Sets the number of API and RPC workers for the Neutron service.
+      The default value results in the configuration being left unset
+      and a system-dependent default will be chosen (usually the number
+      of processors). Please note that this can result in a large number
+      of processes and memory consumption on systems with a large core
+      count. On such systems it is recommended that a non-default value
+      be selected that matches the load requirements.
     type: string
   NeutronPassword:
     description: The password for the neutron service and db account, used by neutron agents.
@@ -57,6 +65,15 @@ parameters:
     default:
       tag: openstack.neutron.api
       path: /var/log/neutron/server.log
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  NeutronApiPolicies:
+    description: |
+      A hash of policies to configure for Neutron API.
+      e.g. { neutron-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
   # DEPRECATED: the following options are deprecated and are currently maintained
   # for backwards compatibility. They will be removed in the Ocata cycle.
@@ -71,10 +88,6 @@ parameters:
       removed in Ocata.  Future releases will enable L3 HA by default if it is
       appropriate for the deployment type. Alternate mechanisms will be
       available to override.
-  EnableInternalTLS:
-    type: boolean
-    default: false
-
 parameter_groups:
 - label: deprecated
   description: |
@@ -87,6 +100,7 @@ parameter_groups:
 
 conditions:
   use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
+  neutron_workers_unset: {equals : [{get_param: NeutronWorkers}, '']}
 
 resources:
 
@@ -96,6 +110,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
   NeutronBase:
@@ -104,6 +120,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -119,27 +137,28 @@ outputs:
           - get_attr: [NeutronBase, role_data, config_settings]
           - get_attr: [TLSProxyBase, role_data, config_settings]
           - neutron::server::database_connection:
-              list_join:
-                - ''
-                - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                  - '://neutron:'
-                  - {get_param: NeutronPassword}
-                  - '@'
-                  - {get_param: [EndpointMap, MysqlInternal, host]}
-                  - '/ovs_neutron'
-                  - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
-            neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+              make_url:
+                scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+                username: neutron
+                password: {get_param: NeutronPassword}
+                host: {get_param: [EndpointMap, MysqlInternal, host]}
+                path: /ovs_neutron
+                query:
+                  read_default_file: /etc/my.cnf.d/tripleo.cnf
+                  read_default_group: tripleo
+            neutron::policy::policies: {get_param: NeutronApiPolicies}
+            neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
             neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
-            neutron::server::api_workers: {get_param: NeutronWorkers}
-            neutron::server::rpc_workers: {get_param: NeutronWorkers}
             neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
             neutron::server::enable_proxy_headers_parsing: true
             neutron::keystone::authtoken::password: {get_param: NeutronPassword}
-            neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
+            neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneInternal, uri_no_suffix ] }
             neutron::server::notifications::tenant_name: 'service'
             neutron::server::notifications::project_name: 'service'
             neutron::server::notifications::password: {get_param: NovaPassword}
             neutron::keystone::authtoken::project_name: 'service'
+            neutron::keystone::authtoken::user_domain_name: 'Default'
+            neutron::keystone::authtoken::project_domain_name: 'Default'
             neutron::server::sync_db: true
             tripleo.neutron_api.firewall_rules:
               '114 neutron api':
@@ -147,6 +166,7 @@ outputs:
                   - 9696
                   - 13696
             neutron::server::router_distributed: {get_param: NeutronEnableDVR}
+            neutron::server::enable_dvr: {get_param: NeutronEnableDVR}
             # NOTE: bind IP is found in Heat replacing the network name with the local node IP
             # for the given network; replacement examples (eg. for internal_api):
             # internal_api -> IP
@@ -170,6 +190,12 @@ outputs:
               - 'localhost'
               - {get_param: [ServiceNetMap, NeutronApiNetwork]}
             tripleo::profile::base::neutron::server::l3_ha_override: {get_param: NeutronL3HA}
+          -
+            if:
+            - neutron_workers_unset
+            - {}
+            - neutron::server::api_workers: {get_param: NeutronWorkers}
+              neutron::server::rpc_workers: {get_param: NeutronWorkers}
       step_config: |
         include tripleo::profile::base::neutron::server
       service_config_settings:
@@ -202,3 +228,5 @@ outputs:
           tags: step1
           when: neutron_server_enabled.rc == 0
           service: name=neutron-server state=stopped
+      metadata_settings:
+        get_attr: [TLSProxyBase, role_data, metadata_settings]
index 5536193..57581b5 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron base service. Shared for all Neutron agents.
@@ -22,10 +22,18 @@ parameters:
     default: 5672
     description: Set rabbit subscriber port, change this if using SSL
     type: number
+  DatabaseSyncTimeout:
+    default: 300
+    description: DB Sync Timeout default
+    type: number
   NeutronDhcpAgentsPerNetwork:
     type: number
     default: 0
     description: The number of neutron dhcp agents to schedule per network
+  NeutronDnsDomain:
+    type: string
+    default: openstacklocal
+    description: Domain to use for building the hostnames.
   NeutronCorePlugin:
     default: 'ml2'
     description: |
@@ -44,10 +52,10 @@ parameters:
     description: Set to True to enable debugging on all services.
   EnableConfigPurge:
     type: boolean
-    default: true
+    default: false
     description: >
-        Remove configuration that is not generated by TripleO. Setting
-        to false may result in configuration remnants after updates/upgrades.
+        Remove configuration that is not generated by TripleO. Used to avoid
+        configuration remnants after upgrades.
   NeutronGlobalPhysnetMtu:
     type: number
     default: 1500
@@ -66,6 +74,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -91,10 +107,12 @@ outputs:
             neutron::debug: {get_param: Debug}
             neutron::purge_config: {get_param: EnableConfigPurge}
             neutron::allow_overlapping_ips: true
+            neutron::dns_domain: {get_param: NeutronDnsDomain}
             neutron::rabbit_heartbeat_timeout_threshold: 60
             neutron::host: '%{::fqdn}'
             neutron::db::database_db_max_retries: -1
             neutron::db::database_max_retries: -1
+            neutron::db::sync::db_sync_timeout: {get_param: DatabaseSyncTimeout}
             neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
           - if:
             - dhcp_agents_zero
diff --git a/puppet/services/neutron-bgpvpn-api.yaml b/puppet/services/neutron-bgpvpn-api.yaml
new file mode 100644 (file)
index 0000000..a70337d
--- /dev/null
@@ -0,0 +1,42 @@
+heat_template_version: pike
+
+description: >
+  BGPVPN API service configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  BgpvpnServiceProvider:
+    default: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
+    description: Backend to use as a service provider for BGPVPN
+    type: string
+
+outputs:
+  role_data:
+    description: Role data for the BGPVPN role.
+    value:
+      service_name: neutron_bgpvpn_api
+      config_settings:
+        neutron::services::bgpvpn::service_providers: {get_param: BgpvpnServiceProvider}
+      step_config: |
+        include ::tripleo::profile::base::neutron::bgpvpn
diff --git a/puppet/services/neutron-bigswitch-agent.yaml b/puppet/services/neutron-bigswitch-agent.yaml
new file mode 100644 (file)
index 0000000..3faf788
--- /dev/null
@@ -0,0 +1,37 @@
+heat_template_version: pike
+
+description: >
+  Installs bigswitch agent and enables the services
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+
+
+outputs:
+  role_data:
+    description: Configure the bigswitch agent services
+    value:
+      service_name: neutron_bigswitch_agent
+      step_config: |
+        include ::tripleo::profile::base::neutron::agents::bigswitch
index 5b6fcca..75b0304 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Compute Midonet plugin
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 04431e2..a165725 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Compute Nuage plugin
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -22,6 +30,10 @@ parameters:
     description: The password for the nova service account, used by nova-api.
     type: string
     hidden: true
+  NuageMetadataPort:
+    description: TCP Port to listen for metadata server requests
+    type: string
+    default: '9697'
 
 outputs:
   role_data:
@@ -32,5 +44,11 @@ outputs:
         tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name: 'service'
         tripleo::profile::base::neutron::agents::nuage::nova_os_password: {get_param: NovaPassword}
         tripleo::profile::base::neutron::agents::nuage::nova_auth_ip: {get_param: [EndpointMap, KeystoneInternal, host]}
+        tripleo.neutron_compute_plugin_nuage.firewall_rules:
+          '118 neutron vxlan networks':
+            proto: 'udp'
+            dport: 4789
+          '100 metadata agent':
+            dport: {get_param: NuageMetadataPort}
       step_config: |
         include ::tripleo::profile::base::neutron::agents::nuage
index e3a4da9..b5ce790 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Compute OVN agent
@@ -12,6 +12,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -48,6 +56,7 @@ outputs:
         ovn::controller::ovn_encap_type: {get_param: OVNTunnelEncapType}
         ovn::controller::ovn_encap_ip: {get_param: [ServiceNetMap, NeutronApiNetwork]}
         ovn::controller::ovn_bridge_mappings: {get_param: NeutronBridgeMappings}
+        nova::compute::force_config_drive: true
         tripleo.neutron_compute_plugin_ovn.firewall_rules:
           '118 neutron vxlan networks':
                 proto: 'udp'
index 09aa619..08cecf6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Compute Plumgrid plugin
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index fe7f9f3..91582db 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron DHCP agent configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -52,6 +60,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
diff --git a/puppet/services/neutron-l2gw-agent.yaml b/puppet/services/neutron-l2gw-agent.yaml
new file mode 100644 (file)
index 0000000..39c443f
--- /dev/null
@@ -0,0 +1,106 @@
+heat_template_version: pike
+
+description: >
+  L2 Gateway agent configured with Puppet
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  L2gwAgentOvsdbHosts:
+    default: ''
+    description: L2 gateway agent OVSDB server list.
+    type: comma_delimited_list
+  L2gwAgentEnableManager:
+    default: false
+    description: Connection can be initiated by the ovsdb server.
+    type: boolean
+  L2gwAgentManagerTableListeningPort:
+    default: 6632
+    description: port number for L2 gateway agent, so that it can listen
+    type: number
+  L2gwAgentPeriodicInterval:
+    default: 20
+    description: The L2 gateway agent checks connection state with the OVSDB
+                 servers. The interval is number of seconds between attempts.
+    type: number
+  L2gwAgentMaxConnectionRetries:
+    default: 10
+    description: The L2 gateway agent retries to connect to the OVSDB server
+    type: number
+  L2gwAgentSocketTimeout:
+    default: 30
+    description: socket timeout
+    type: number
+  MonitoringSubscriptionNeutronL2gwAgent:
+    default: 'overcloud-neutron-l2gw-agent'
+    type: string
+  NeutronL2gwAgentLoggingSource:
+    type: json
+    default:
+      tag: openstack.neutron.agent.l2gw
+      path: /var/log/neutron/l2gw-agent.log
+
+conditions:
+  internal_manager_enabled: {equals: [{get_param: L2gwAgentEnableManager}, True]}
+
+outputs:
+  role_data:
+    description: Role data for the L2 Gateway role.
+    value:
+      service_name: neutron_l2gw_agent
+      monitoring_subscription: {get_param: MonitoringSubscriptionNeutronL2gwAgent}
+      logging_source: {get_param: NeutronL2gwAgentLoggingSource}
+      logging_groups:
+        - neutron
+      config_settings:
+        map_merge:
+          - neutron::agents::l2gw::ovsdb_hosts: {get_param: L2gwAgentOvsdbHosts}
+            neutron::agents::l2gw::enable_manager: {get_param: L2gwAgentEnableManager}
+            neutron::agents::l2gw::manager_table_listening_port: {get_param: L2gwAgentManagerTableListeningPort}
+            neutron::agents::l2gw::periodic_interval: {get_param: L2gwAgentPeriodicInterval}
+            neutron::agents::l2gw::max_connection_retries: {get_param: L2gwAgentMaxConnectionRetries}
+            neutron::agents::l2gw::socket_timeout: {get_param: L2gwAgentSocketTimeout}
+          -
+            if:
+              - internal_manager_enabled
+              - tripleo.neutron_l2gw_agent.firewall_rules:
+                  '142 neutron l2gw agent input':
+                    proto: 'tcp'
+                    dport: {get_param: L2gwAgentManagerTableListeningPort}
+              - null
+
+      step_config: |
+        include tripleo::profile::base::neutron::agents::l2gw
+      upgrade_tasks:
+        - name: Check if neutron_l2gw_agent is deployed
+          command: systemctl is-enabled neutron-l2gw-agent
+          tags: common
+          ignore_errors: True
+          register: neutron_l2gw_agent_enabled
+        - name: "PreUpgrade step0,validation: Check service neutron-l2gw-agent is running"
+          shell: /usr/bin/systemctl show 'neutron-l2gw-agent' --property ActiveState | grep '\bactive\b'
+          when: neutron_l2gw_agent_enabled.rc == 0
+          tags: step0,validation
+        - name: Stop neutron_l2gw_agent service
+          tags: step1
+          when: neutron_l2gw_agent_enabled.rc == 0
+          service: name=neutron-l2gw-agent state=stopped
diff --git a/puppet/services/neutron-l2gw-api.yaml b/puppet/services/neutron-l2gw-api.yaml
new file mode 100644 (file)
index 0000000..1ad009b
--- /dev/null
@@ -0,0 +1,62 @@
+heat_template_version: pike
+
+description: >
+  L2 Gateway service plugin configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  L2gwServiceDefaultInterfaceName:
+    default: 'FortyGigE1/0/1'
+    description: default interface name of the L2 gateway
+    type: string
+  L2gwServiceDefaultDeviceName:
+    default: 'Switch1'
+    description: default device name of the L2 gateway
+    type: string
+  L2gwServiceQuotaL2Gateway:
+    default: 5
+    description: quota of the L2 gateway
+    type: number
+  L2gwServicePeriodicMonitoringInterval:
+    default: 5
+    description: The periodic interval at which the plugin
+    type: number
+  L2gwServiceProvider:
+    default: ["L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default"]
+    description: Backend to use as a service provider for L2 Gateway
+    type: comma_delimited_list
+
+outputs:
+  role_data:
+    description: Role data for the L2 Gateway role.
+    value:
+      service_name: neutron_l2gw_api
+      config_settings:
+        neutron::services::l2gw::default_interface_name: {get_param: L2gwServiceDefaultInterfaceName}
+        neutron::services::l2gw::default_device_name: {get_param: L2gwServiceDefaultDeviceName}
+        neutron::services::l2gw::quota_l2_gateway: {get_param: L2gwServiceQuotaL2Gateway}
+        neutron::services::l2gw::periodic_monitoring_interval: {get_param: L2gwServicePeriodicMonitoringInterval}
+        neutron::services::l2gw::service_providers: {get_param: L2gwServiceProvider}
+      step_config: |
+        include tripleo::profile::base::neutron::l2gw
index 1d6a237..1a4a4f6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron L3 agent for DVR enabled compute nodes
@@ -14,6 +14,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -52,6 +60,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index cd9870b..0598639 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron L3 agent configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -60,6 +68,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 32ef567..593fae4 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Metadata agent configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -46,6 +54,9 @@ parameters:
       tag: openstack.neutron.agent.metadata
       path: /var/log/neutron/metadata-agent.log
 
+conditions:
+  neutron_workers_unset: {equals : [{get_param: NeutronWorkers}, '']}
+
 resources:
 
   NeutronBase:
@@ -54,6 +65,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -68,11 +81,15 @@ outputs:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
           - neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret}
-            neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
             neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
             neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
             neutron::agents::metadata::auth_tenant: 'service'
             neutron::agents::metadata::metadata_ip: "%{hiera('nova_metadata_vip')}"
+          -
+            if:
+            - neutron_workers_unset
+            - {}
+            - neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
       step_config: |
         include tripleo::profile::base::neutron::metadata
       upgrade_tasks:
index 9198f35..8ace3e5 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Midonet plugin and services
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 01471ba..76d5c26 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron OVS agent configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -81,6 +89,11 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+  OpenVswitchUpgrade:
+    type: ./openvswitch-upgrade.yaml
 
 outputs:
   role_data:
@@ -121,16 +134,22 @@ outputs:
       step_config: |
         include ::tripleo::profile::base::neutron::ovs
       upgrade_tasks:
-        - name: Check if neutron_ovs_agent is deployed
-          command: systemctl is-enabled neutron-openvswitch-agent
-          tags: common
-          ignore_errors: True
-          register: neutron_ovs_agent_enabled
-        - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
-          shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
-          when: neutron_ovs_agent_enabled.rc == 0
-          tags: step0,validation
-        - name: Stop neutron_ovs_agent service
-          tags: step1
-          when: neutron_ovs_agent_enabled.rc == 0
-          service: name=neutron-openvswitch-agent state=stopped
+        yaql:
+          expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
+          data:
+            ovs_upgrade:
+              get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+            neutron_ovs_upgrade:
+              - name: Check if neutron_ovs_agent is deployed
+                command: systemctl is-enabled neutron-openvswitch-agent
+                tags: common
+                ignore_errors: True
+                register: neutron_ovs_agent_enabled
+              - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
+                shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
+                when: neutron_ovs_agent_enabled.rc == 0
+                tags: step0,validation
+              - name: Stop neutron_ovs_agent service
+                tags: step1
+                when: neutron_ovs_agent_enabled.rc == 0
+                service: name=neutron-openvswitch-agent state=stopped
index e25bc49..8f3f7b2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron OVS DPDK configured with Puppet for Compute Role
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -61,6 +69,11 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+  OpenVswitchUpgrade:
+    type: ./openvswitch-upgrade.yaml
 
 outputs:
   role_data:
@@ -69,7 +82,10 @@ outputs:
       service_name: neutron_ovs_dpdk_agent
       config_settings:
         map_merge:
-          - get_attr: [NeutronOvsAgent, role_data, config_settings]
+          - map_replace:
+            - get_attr: [NeutronOvsAgent, role_data, config_settings]
+            - keys:
+                tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
           - neutron::agents::ml2::ovs::enable_dpdk: true
             neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
             neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
@@ -79,3 +95,5 @@ outputs:
             vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory}
             vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType}
       step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
+      upgrade_tasks:
+        get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
index becd25c..a151695 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Configure hieradata for Fujitsu C-Fabric plugin configuration
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -53,6 +61,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 85971f1..c4bf075 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: Configure hieradata for Fujitsu fossw plugin configuration
 
@@ -12,6 +12,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -56,6 +64,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
diff --git a/puppet/services/neutron-plugin-ml2-odl.yaml b/puppet/services/neutron-plugin-ml2-odl.yaml
new file mode 100644 (file)
index 0000000..6424b76
--- /dev/null
@@ -0,0 +1,55 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Neutron ML2/OpenDaylight plugin configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  OpenDaylightPortBindingController:
+    description: OpenDaylight port binding controller
+    type: string
+    default: 'network-topology'
+
+resources:
+
+  NeutronMl2Base:
+    type: ./neutron-plugin-ml2.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Neutron ML2/ODL plugin.
+    value:
+      service_name: neutron_plugin_ml2_odl
+      config_settings:
+        map_merge:
+          - get_attr: [NeutronMl2Base, role_data, config_settings]
+          - neutron::plugins::ml2::opendaylight::port_binding_controller: {get_param: OpenDaylightPortBindingController}
+      step_config: |
+        include ::tripleo::profile::base::neutron::plugins::ml2
index 4d4c390..4cda87b 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron ML2/OVN plugin configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -60,6 +68,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 3abd04f..130f889 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron ML2 Plugin configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -68,6 +76,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
diff --git a/puppet/services/neutron-plugin-nsx.yaml b/puppet/services/neutron-plugin-nsx.yaml
new file mode 100644 (file)
index 0000000..c4088e6
--- /dev/null
@@ -0,0 +1,74 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Neutron NSX
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  DefaultOverlayTz:
+    description: UUID of the default NSX overlay transport zone.
+    type: string
+  DefaultTier0Router:
+    description: UUID of the default tier0 router that will be used for connecting to
+                 tier1 logical routers and configuring external networks.
+    type: string
+  NsxApiManagers:
+    description: IP address of one or more NSX managers separated by commas.
+    type: string
+  NsxApiUser:
+    description: User name of NSX Manager.
+    type: string
+  NsxApiPassword:
+    description: Password of NSX Manager.
+    type: string
+  NativeDhcpMetadata:
+    default: True
+    description: This is the flag to indicate if using native DHCP/Metadata or not.
+    type: string
+  DhcpProfileUuid:
+    description: This is the UUID of the NSX DHCP Profile that will be used to enable
+                 native DHCP service.
+    type: string
+  MetadataProxyUuid:
+    description: This is the UUID of the NSX Metadata Proxy that will be used to enable
+                 native metadata service.
+    type: string
+
+outputs:
+  role_data:
+    description: Role data for the Neutron NSX plugin
+    value:
+      service_name: neutron_plugin_nsx
+      config_settings:
+        neutron::plugins::nsx_v3::default_overlay_tz: {get_param: DefaultOverlayTz}
+        neutron::plugins::nsx_v3::default_tier0_router: {get_param: DefaultTier0Router}
+        neutron::plugins::nsx_v3::nsx_api_managers: {get_param: NsxApiManagers}
+        neutron::plugins::nsx_v3::nsx_api_user: {get_param: NsxApiUser}
+        neutron::plugins::nsx_v3::nsx_api_password: {get_param: NsxApiPassword}
+        neutron::plugins::nsx_v3::native_dhcp_metadata: {get_param: NativeDhcpMetadata}
+        neutron::plugins::nsx_v3::dhcp_profile_uuid: {get_param: DhcpProfileUuid}
+        neutron::plugins::nsx_v3::metadata_proxy_uuid: {get_param: MetadataProxyUuid}
+
+      step_config: |
+        include tripleo::profile::base::neutron::plugins::nsx_v3
index e09cd70..953ffeb 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Nuage plugin
@@ -13,16 +13,20 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
   # Config specific parameters, to be provided via parameter_defaults
-  NeutronNuageOSControllerIp:
-    description: IP address of the OpenStack Controller
-    type: string
-
   NeutronNuageNetPartitionName:
     description: Specifies the title that you will see on the VSD
     type: string
@@ -67,6 +71,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -76,8 +82,7 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
-          - neutron::plugins::nuage::nuage_oscontroller_ip: {get_param: NeutronNuageOSControllerIp}
-            neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName}
+          - neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName}
             neutron::plugins::nuage::nuage_vsd_ip: {get_param: NeutronNuageVSDIp}
             neutron::plugins::nuage::nuage_vsd_username: {get_param: NeutronNuageVSDUsername}
             neutron::plugins::nuage::nuage_vsd_password: {get_param: NeutronNuageVSDPassword}
index f948dd0..a158010 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron Plumgrid plugin
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -92,15 +100,15 @@ outputs:
       service_name: neutron_plugin_plumgrid
       config_settings:
         neutron::plugins::plumgrid::connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://neutron:'
-              - {get_param: NeutronPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/ovs_neutron'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: neutron
+            password: {get_param: NeutronPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /ovs_neutron
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneInternal, host]}
         neutron::plugins::plumgrid::admin_password: {get_param: AdminPassword}
         neutron::plugins::plumgrid::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
index d3c82d8..c124d1e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Neutron SR-IOV nic agent configured with Puppet
@@ -14,6 +14,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -54,6 +62,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
diff --git a/puppet/services/neutron-vpp-agent.yaml b/puppet/services/neutron-vpp-agent.yaml
new file mode 100644 (file)
index 0000000..cb72f67
--- /dev/null
@@ -0,0 +1,56 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Neutron ML2/VPP agent configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: >
+      Mapping of service_name -> network name. Typically set via
+      parameter_defaults in the resource registry. This mapping overrides those
+      in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  NeutronVPPAgentPhysnets:
+    description: >
+      List of <physical_network>:<VPP Interface>
+      Example: "physnet1:GigabitEthernet2/2/0,physnet2:GigabitEthernet2/3/0"
+    type: comma_delimited_list
+    default: ""
+
+resources:
+
+  NeutronBase:
+    type: ./neutron-base.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+
+outputs:
+  role_data:
+    description: Role data for the Neutron ML2/VPP agent service.
+    value:
+      service_name: neutron_vpp_agent
+      config_settings:
+        map_merge:
+          - get_attr: [NeutronBase, role_data, config_settings]
+          - tripleo::profile::base::neutron::agents::vpp::physnet_mapping: {get_param: NeutronVPPAgentPhysnets}
+      step_config: |
+        include ::tripleo::profile::base::neutron::agents::vpp
\ No newline at end of file
index f27b53f..835edf0 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -62,6 +70,12 @@ parameters:
     default: 300
     description: Timeout for Nova db sync
     type: number
+  NovaApiPolicies:
+    description: |
+      A hash of policies to configure for Nova API.
+      e.g. { nova-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 conditions:
   nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
@@ -75,6 +89,8 @@ resources:
   #     ServiceNetMap: {get_param: ServiceNetMap}
   #     DefaultPasswords: {get_param: DefaultPasswords}
   #     EndpointMap: {get_param: EndpointMap}
+  #     RoleName: {get_param: RoleName}
+  #     RoleParameters: {get_param: RoleParameters}
   #     EnableInternalTLS: {get_param: EnableInternalTLS}
 
   NovaBase:
@@ -83,6 +99,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -110,8 +128,10 @@ outputs:
                 - 13774
                 - 8775
           nova::keystone::authtoken::project_name: 'service'
+          nova::keystone::authtoken::user_domain_name: 'Default'
+          nova::keystone::authtoken::project_domain_name: 'Default'
           nova::keystone::authtoken::password: {get_param: NovaPassword}
-          nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+          nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
           nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
           nova::api::enabled: true
           nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool}
@@ -143,6 +163,7 @@ outputs:
           nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
           nova::api::instance_name_template: {get_param: InstanceNameTemplate}
           nova_enable_db_purge: {get_param: NovaEnableDBPurge}
+          nova::policy::policies: {get_param: NovaApiPolicies}
         -
           if:
           - nova_workers_zero
@@ -218,14 +239,14 @@ outputs:
         - name: Run puppet apply to set tranport_url in nova.conf
           tags: step5
           when: is_bootstrap_node
-          command: puppet apply --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+          command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
           register: puppet_apply_nova_api_upgrade
           failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
           changed_when: puppet_apply_nova_api_upgrade.rc == 2
         - name: Setup cell_v2 (map cell0)
           tags: step5
           when: is_bootstrap_node
-          command: nova-manage cell_v2 map_cell0
+          shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
         - name: Setup cell_v2 (create default cell)
           tags: step5
           when: is_bootstrap_node
@@ -241,15 +262,15 @@ outputs:
           command: nova-manage db sync
           async: {get_param: NovaDbSyncTimeout}
           poll: 10
-        - name: Setup cell_v2 (migrate hosts)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage cell_v2 map_cell_and_hosts
         - name: Setup cell_v2 (get cell uuid)
           tags: step5
           when: is_bootstrap_node
           shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
           register: nova_api_cell_uuid
+        - name: Setup cell_v2 (migrate hosts)
+          tags: step5
+          when: is_bootstrap_node
+          command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
         - name: Setup cell_v2 (migrate instances)
           tags: step5
           when: is_bootstrap_node
index ceacb0b..ea21af8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova base service. Shared for all Nova services.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -52,16 +60,20 @@ parameters:
     default: 5672
     description: Set rabbit subscriber port, change this if using SSL
     type: number
+  DatabaseSyncTimeout:
+    default: 300
+    description: DB Sync Timeout default
+    type: number
   Debug:
     type: string
     default: ''
     description: Set to True to enable debugging on all services.
   EnableConfigPurge:
     type: boolean
-    default: true
+    default: false
     description: >
-        Remove configuration that is not generated by TripleO. Setting
-        to false may result in configuration remnants after updates/upgrades.
+        Remove configuration that is not generated by TripleO. Used to avoid
+        configuration remnants after upgrades.
   NovaIPv6:
     default: false
     description: Enable IPv6 features in Nova
@@ -142,35 +154,45 @@ outputs:
           nova::placement::os_region_name: {get_param: KeystoneRegion}
           nova::placement::os_interface: {get_param: NovaPlacementAPIInterface}
           nova::database_connection:
-            list_join:
-              - ''
-              - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                - '://nova:'
-                - {get_param: NovaPassword}
-                - '@'
-                - {get_param: [EndpointMap, MysqlInternal, host]}
-                - '/nova'
-                - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+            make_url:
+              scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+              username: nova
+              password: {get_param: NovaPassword}
+              host: {get_param: [EndpointMap, MysqlInternal, host]}
+              path: /nova
+              query:
+                read_default_file: /etc/my.cnf.d/tripleo.cnf
+                read_default_group: tripleo
+          nova::cell0_database_connection:
+            make_url:
+              scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+              username: nova
+              password: {get_param: NovaPassword}
+              host: {get_param: [EndpointMap, MysqlInternal, host]}
+              path: /nova_cell0
+              query:
+                read_default_file: /etc/my.cnf.d/tripleo.cnf
+                read_default_group: tripleo
           nova::api_database_connection:
-            list_join:
-              - ''
-              - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                - '://nova_api:'
-                - {get_param: NovaPassword}
-                - '@'
-                - {get_param: [EndpointMap, MysqlInternal, host]}
-                - '/nova_api'
-                - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+            make_url:
+              scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+              username: nova_api
+              password: {get_param: NovaPassword}
+              host: {get_param: [EndpointMap, MysqlInternal, host]}
+              path: /nova_api
+              query:
+                read_default_file: /etc/my.cnf.d/tripleo.cnf
+                read_default_group: tripleo
           nova::placement_database_connection:
-            list_join:
-              - ''
-              - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                - '://nova_placement:'
-                - {get_param: NovaPassword}
-                - '@'
-                - {get_param: [EndpointMap, MysqlInternal, host]}
-                - '/nova_placement'
-                - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+            make_url:
+              scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+              username: nova_placement
+              password: {get_param: NovaPassword}
+              host: {get_param: [EndpointMap, MysqlInternal, host]}
+              path: /nova_placement
+              query:
+                read_default_file: /etc/my.cnf.d/tripleo.cnf
+                read_default_group: tripleo
           nova::debug: {get_param: Debug}
           nova::purge_config: {get_param: EnableConfigPurge}
           nova::network::neutron::neutron_project_name: 'service'
@@ -188,6 +210,8 @@ outputs:
           nova::network::neutron::neutron_auth_type: 'v3password'
           nova::db::database_db_max_retries: -1
           nova::db::database_max_retries: -1
+          nova::db::sync::db_sync_timeout: {get_param: DatabaseSyncTimeout}
+          nova::db::sync_api::db_sync_timeout: {get_param: DatabaseSyncTimeout}
           nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
           nova::use_ipv6: {get_param: NovaIPv6}
           nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge}
index d208bed..16ccb9e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova Compute service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -52,7 +60,7 @@ parameters:
       For different formats, refer to the nova.conf documentation for
       pci_passthrough_whitelist configuration
     type: json
-    default: {}
+    default: ''
   NovaVcpuPinSet:
     description: >
       A list or range of physical CPU cores to reserve for virtual machine
@@ -79,6 +87,13 @@ parameters:
     type: string
     description: Nova Compute upgrade level
     default: auto
+  MigrationSshKey:
+    type: json
+    description: >
+      SSH key for migration.
+      Expects a dictionary with keys 'public_key' and 'private_key'.
+      Values should be identical to SSH public/private key files.
+    default: {}
 
 resources:
   NovaBase:
@@ -87,6 +102,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -101,12 +118,22 @@ outputs:
         map_merge:
           - get_attr: [NovaBase, role_data, config_settings]
           - nova::compute::libvirt::manage_libvirt_services: false
-            nova::compute::pci_passthrough: {get_param: NovaPCIPassthrough}
+            nova::compute::pci_passthrough:
+              str_replace:
+                template: "JSON_PARAM"
+                params:
+                  JSON_PARAM: {get_param: NovaPCIPassthrough}
             nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
             nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
             # we manage migration in nova common puppet profile
             nova::compute::libvirt::migration_support: false
             tripleo::profile::base::nova::manage_migration: true
+            tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
+            tripleo::profile::base::nova::migration_ssh_localaddrs:
+              - "%{hiera('cold_migration_ssh_inbound_addr')}"
+              - "%{hiera('live_migration_ssh_inbound_addr')}"
+            live_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+            cold_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaColdMigrationNetwork]}
             tripleo::profile::base::nova::nova_compute_enabled: true
             nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
@@ -163,6 +190,9 @@ outputs:
               template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
               params:
                 LEVEL: {get_param: UpgradeLevelNovaCompute}
+        - name: install openstack-nova-migration
+          tags: step3
+          yum: name=openstack-nova-migration state=latest
         - name: Start nova-compute service
           tags: step6
           service: name=openstack-nova-compute state=started
index 4574cae..30eb127 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova Conductor service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -45,6 +53,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 82f329b..fa1168a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova Consoleauth service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -34,6 +42,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 5eb2170..4f66432 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova Compute service configured with Puppet and using Ironic
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -30,6 +38,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -44,10 +54,14 @@ outputs:
             nova::compute::vnc_enabled: false
             nova::ironic::common::password: {get_param: IronicPassword}
             nova::ironic::common::project_name: 'service'
-            nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+            nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             nova::ironic::common::username: 'ironic'
             nova::ironic::common::api_endpoint: {get_param: [EndpointMap, IronicInternal, uri]}
             nova::network::neutron::dhcp_domain: ''
             nova::scheduler::filter::scheduler_host_manager: 'ironic_host_manager'
       step_config: |
         include tripleo::profile::base::nova::compute::ironic
+      upgrade_tasks:
+        - name: Stop openstack-nova-compute service
+          tags: step1
+          service: name=openstack-nova-compute state=stopped enabled=no
index faf1ae4..4e762b5 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Libvirt service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -32,6 +40,48 @@ parameters:
   MonitoringSubscriptionNovaLibvirt:
     default: 'overcloud-nova-libvirt'
     type: string
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  UseTLSTransportForLiveMigration:
+    type: boolean
+    default: true
+    description: If set to true and if EnableInternalTLS is enabled, it will
+                 set the libvirt URI's transport to tls and configure the
+                 relevant keys for libvirt.
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
+  LibvirtCACert:
+    type: string
+    default: ''
+    description: This specifies the CA certificate to use for TLS in libvirt.
+                 This file will be symlinked to the default CA path in libvirt,
+                 which is /etc/pki/CA/cacert.pem. Note that due to limitations
+                 GNU TLS, which is the TLS backend for libvirt, the file must
+                 be less than 65K (so we can't use the system's CA bundle).
+                 This parameter should be used if the default (which comes from
+                 the InternalTLSCAFile parameter) is not desired. The current
+                 default reflects TripleO's default CA, which is FreeIPA.
+                 It will only be used if internal TLS is enabled.
+
+conditions:
+
+  use_tls_for_live_migration:
+    and:
+    - equals:
+      - {get_param: EnableInternalTLS}
+      - true
+    - equals:
+      - {get_param: UseTLSTransportForLiveMigration}
+      - true
+
+  libvirt_specific_ca_unset:
+    equals:
+      - {get_param: LibvirtCACert}
+      - ''
 
 resources:
   NovaBase:
@@ -40,6 +90,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -66,10 +118,64 @@ outputs:
             tripleo.nova_libvirt.firewall_rules:
               '200 nova_libvirt':
                 dport:
-                  - 16509
                   - 16514
                   - '49152-49215'
                   - '5900-5999'
 
+          -
+            if:
+              - use_tls_for_live_migration
+              -
+                generate_service_certificates: true
+                tripleo::profile::base::nova::libvirt_tls: true
+                nova::migration::libvirt::live_migration_inbound_addr:
+                  str_replace:
+                    template:
+                      "%{hiera('fqdn_$NETWORK')}"
+                    params:
+                      $NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+                tripleo::certmonger::ca::libvirt::origin_ca_pem:
+                  if:
+                    - libvirt_specific_ca_unset
+                    - get_param: InternalTLSCAFile
+                    - get_param: LibvirtCACert
+                tripleo::certmonger::libvirt_dirs::certificate_dir: '/etc/pki/libvirt'
+                tripleo::certmonger::libvirt_dirs::key_dir: '/etc/pki/libvirt/private'
+                libvirt_certificates_specs:
+                  libvirt-server-cert:
+                    service_certificate: '/etc/pki/libvirt/servercert.pem'
+                    service_key: '/etc/pki/libvirt/private/serverkey.pem'
+                    hostname:
+                      str_replace:
+                        template: "%{hiera('fqdn_NETWORK')}"
+                        params:
+                          NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+                    principal:
+                      str_replace:
+                        template: "libvirt/%{hiera('fqdn_NETWORK')}"
+                        params:
+                          NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+                  libvirt-client-cert:
+                    service_certificate: '/etc/pki/libvirt/clientcert.pem'
+                    service_key: '/etc/pki/libvirt/private/clientkey.pem'
+                    hostname:
+                      str_replace:
+                        template: "%{hiera('fqdn_NETWORK')}"
+                        params:
+                          NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+                    principal:
+                      str_replace:
+                        template: "libvirt/%{hiera('fqdn_NETWORK')}"
+                        params:
+                          NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+              - {}
       step_config: |
         include tripleo::profile::base::nova::libvirt
+      metadata_settings:
+        if:
+          - use_tls_for_live_migration
+          -
+            - service: libvirt
+              network: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+              type: node
+          - null
index 376f95b..335b2c2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index b59e2fc..86aa079 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova Placement API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -52,6 +60,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
   NovaBase:
@@ -60,6 +70,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index e4b6bb4..5da6d43 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova Scheduler service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -45,6 +53,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 42335ad..2db44d6 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Nova Vncproxy service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -34,6 +42,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 909a303..e64a00f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Octavia API service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -34,6 +42,12 @@ parameters:
     default:
       tag: openstack.octavia.api
       path: /var/log/octavia/api.log
+  OctaviaApiPolicies:
+    description: |
+      A hash of policies to configure for Octavia API.
+      e.g. { octavia-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
 
@@ -43,6 +57,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -57,16 +73,17 @@ outputs:
         map_merge:
           - get_attr: [OctaviaBase, role_data, config_settings]
           - octavia::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+            octavia::policy::policies: {get_param: OctaviaApiPolicies}
             octavia::db::database_connection:
-              list_join:
-                - ''
-                - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-                  - '://octavia:'
-                  - {get_param: OctaviaPassword}
-                  - '@'
-                  - {get_param: [EndpointMap, MysqlInternal, host]}
-                  - '/octavia'
-                  - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+              make_url:
+                scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+                username: octavia
+                password: {get_param: OctaviaPassword}
+                host: {get_param: [EndpointMap, MysqlInternal, host]}
+                path: /octavia
+                query:
+                  read_default_file: /etc/my.cnf.d/tripleo.cnf
+                  read_default_group: tripleo
             octavia::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             octavia::keystone::authtoken::project_name: 'service'
             octavia::keystone::authtoken::password: {get_param: OctaviaPassword}
@@ -77,7 +94,6 @@ outputs:
                   - 9876
                   - 13876
             octavia::api::host: {get_param: [ServiceNetMap, OctaviaApiNetwork]}
-            neutron::server::service_providers: ['LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default']
       step_config: |
         include tripleo::profile::base::octavia::api
       service_config_settings:
@@ -96,3 +112,5 @@ outputs:
           octavia::db::mysql::allowed_hosts:
             - '%'
             - "%{hiera('mysql_bind_host')}"
+        neutron_api:
+          neutron::server::service_providers: ['LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default']
index b537a2b..19dc5b4 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Octavia base service. Shared for all Octavia services
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -24,10 +32,10 @@ parameters:
     description: Set to True to enable debugging on all services.
   EnableConfigPurge:
     type: boolean
-    default: true
+    default: false
     description: >
-        Remove configuration that is not generated by TripleO. Setting
-        to false may result in configuration remnants after updates/upgrades.
+        Remove configuration that is not generated by TripleO. Used to avoid
+        configuration remnants after upgrades.
   RabbitPassword:
     description: The password for RabbitMQ
     type: string
@@ -56,7 +64,7 @@ outputs:
          octavia::debug: {get_param: Debug}
          octavia::purge_config: {get_param: EnableConfigPurge}
          octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
-         tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName}
-         tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword}
-         tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort}
+         octavia::rabbit_userid: {get_param: RabbitUserName}
+         octavia::rabbit_password: {get_param: RabbitPassword}
+         octavia::rabbit_port: {get_param: RabbitClientPort}
 
index 51d32f2..853567d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Octavia Health Manager service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -39,6 +47,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 84c3343..6c556fa 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Octavia Housekeeping service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -46,6 +54,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 9212b76..4feae41 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Octavia Worker service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -69,6 +77,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 1e7aa47..af85f4a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenDaylight SDN Controller.
@@ -28,7 +28,7 @@ parameters:
   OpenDaylightFeatures:
     description: List of features to install with ODL
     type: comma_delimited_list
-    default: ["odl-netvirt-openstack","odl-netvirt-ui"]
+    default: ["odl-netvirt-openstack","odl-netvirt-ui","odl-jolokia"]
   OpenDaylightApiVirtualIP:
     type: string
     default: ''
@@ -46,6 +46,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
 
 outputs:
   role_data:
@@ -59,12 +67,36 @@ outputs:
         opendaylight::extra_features: {get_param: OpenDaylightFeatures}
         opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
         opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
-        opendaylight::nb_connection_protocol: {get_param: OpenDayLightConnectionProtocol}
+        opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
         tripleo.opendaylight_api.firewall_rules:
               '137 opendaylight api':
                 dport:
                   - {get_param: OpenDaylightPort}
                   - 6640
                   - 6653
+                  - 2550
       step_config: |
         include tripleo::profile::base::neutron::opendaylight
+      upgrade_tasks:
+        - name: Check if opendaylight is deployed
+          command: systemctl is-enabled opendaylight
+          tags: common
+          ignore_errors: True
+          register: opendaylight_enabled
+        - name: "PreUpgrade step0,validation: Check service opendaylight is running"
+          shell: /usr/bin/systemctl show 'opendaylight' --property ActiveState | grep '\bactive\b'
+          when: opendaylight_enabled.rc == 0
+          tags: step0,validation
+        - name: Stop opendaylight service
+          tags: step1
+          when: opendaylight_enabled.rc == 0
+          service: name=opendaylight state=stopped
+        - name: Removes ODL snapshots, data, journal directories
+          file:
+            state: absent
+            path: /opt/opendaylight/{{item}}
+          tags: step2
+          with_items:
+            - snapshots
+            - data
+            - journal
index cfec3c4..0d859be 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenDaylight OVS Configuration.
@@ -47,6 +47,18 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+  OpenVswitchUpgrade:
+    type: ./openvswitch-upgrade.yaml
 
 outputs:
   role_data:
@@ -60,11 +72,7 @@ outputs:
         opendaylight_check_url: {get_param: OpenDaylightCheckURL}
         opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
         neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
-        neutron::plugins::ovs::opendaylight::provider_mappings:
-          str_replace:
-            template: MAPPINGS
-            params:
-              MAPPINGS: {get_param: OpenDaylightProviderMappings}
+        neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
         tripleo.opendaylight_ovs.firewall_rules:
           '118 neutron vxlan networks':
              proto: 'udp'
@@ -73,3 +81,23 @@ outputs:
              proto: 'gre'
       step_config: |
         include tripleo::profile::base::neutron::plugins::ovs::opendaylight
+      upgrade_tasks:
+        yaql:
+          expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
+          data:
+            ovs_upgrade:
+              get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+            opendaylight_upgrade:
+              - name: Check if openvswitch is deployed
+                command: systemctl is-enabled openvswitch
+                tags: common
+                ignore_errors: True
+                register: openvswitch_enabled
+              - name: "PreUpgrade step0,validation: Check service openvswitch is running"
+                shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b'
+                when: openvswitch_enabled.rc == 0
+                tags: step0,validation
+              - name: Stop openvswitch service
+                tags: step1
+                when: openvswitch_enabled.rc == 0
+                service: name=openvswitch state=stopped
diff --git a/puppet/services/openvswitch-upgrade.yaml b/puppet/services/openvswitch-upgrade.yaml
new file mode 100644 (file)
index 0000000..f6e7846
--- /dev/null
@@ -0,0 +1,50 @@
+heat_template_version: pike
+
+description: >
+  Openvswitch package special handling for upgrade.
+
+outputs:
+  role_data:
+    description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
+    value:
+      service_name: openvswitch_upgrade
+      upgrade_tasks:
+        - name: Check openvswitch version.
+          tags: step2
+          register: ovs_version
+          ignore_errors: true
+          shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+        - name: Check openvswitch packaging.
+          tags: step2
+          shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+          register: ovs_packaging_issue
+          ignore_errors: true
+        - block:
+            - name: "Ensure empty directory: emptying."
+              file:
+                state: absent
+                path: /root/OVS_UPGRADE
+            - name: "Ensure empty directory: creating."
+              file:
+                state: directory
+                path: /root/OVS_UPGRADE
+                owner: root
+                group: root
+                mode: 0750
+            - name: Download OVS packages.
+              command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+            - name: Get rpm list for manual upgrade of OVS.
+              shell: ls -1 /root/OVS_UPGRADE/*.rpm
+              register: ovs_list_of_rpms
+            - name: Manual upgrade of OVS
+              shell: |
+                rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+                rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+              args:
+                chdir: /root/OVS_UPGRADE
+              with_items:
+                - "{{ovs_list_of_rpms.stdout_lines}}"
+          tags: step2
+          when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+                or
+                ovs_packaging_issue|succeeded"
index 7f81afd..20c38d8 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OVN databases configured with puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -36,5 +44,11 @@ outputs:
           ovn::northbound::port: {get_param: OVNNorthboundServerPort}
           ovn::southbound::port: {get_param: OVNSouthboundServerPort}
           ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+          tripleo.ovn_dbs.firewall_rules:
+            '121 OVN DB server ports':
+              proto: 'tcp'
+              dport:
+                - {get_param: OVNNorthboundServerPort}
+                - {get_param: OVNSouthboundServerPort}
       step_config: |
         include ::tripleo::profile::base::neutron::ovn_northd
index 5be58c1..1c89011 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Pacemaker service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -87,10 +95,16 @@ parameters:
         \[(?<pid>[^ ]*)\]
         (?<host>[^ ]*)
         (?<message>.*)$/
+
+  EnableLoadBalancer:
+    default: true
+    description: Whether to deploy a LoadBalancer on the Controller
+    type: boolean
+
   PacemakerResources:
     type: comma_delimited_list
     description: List of resources managed by pacemaker
-    default: ['rabbitmq','haproxy']
+    default: ['rabbitmq', 'galera']
 
 outputs:
   role_data:
@@ -135,6 +149,8 @@ outputs:
         - name: Check pacemaker cluster running before upgrade
           tags: step0,validation
           pacemaker_cluster: state=online check_and_fail=true
+          async: 30
+          poll: 4
         - name: Stop pacemaker cluster
           tags: step2
           pacemaker_cluster: state=offline
@@ -143,5 +159,13 @@ outputs:
           pacemaker_cluster: state=online
         - name: Check pacemaker resource
           tags: step4
-          pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500
+          pacemaker_is_active:
+            resource: "{{ item }}"
+            max_wait: 500
           with_items: {get_param: PacemakerResources}
+        - name: Check pacemaker haproxy resource
+          tags: step4
+          pacemaker_is_active:
+            resource: haproxy
+            max_wait: 500
+          when: {get_param: EnableLoadBalancer}
index 7686028..7ecb64d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Ceph RBD mirror service.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -29,6 +37,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index e75ac97..d888d4a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Cinder Backup service with Pacemaker configured with Puppet
@@ -25,6 +25,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -39,6 +47,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       CinderBackupBackend: {get_param: CinderBackupBackend}
       CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
       CephClientUserName: {get_param: CephClientUserName}
index bef47a5..659368a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Cinder Volume service with Pacemaker configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -27,6 +35,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index 93bf596..d8e942d 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   MySQL with Pacemaker service deployment using puppet
@@ -14,6 +14,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -28,6 +36,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index e702d28..5bc28ed 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Redis service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -26,6 +34,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -37,5 +47,6 @@ outputs:
           - get_attr: [RedisBase, role_data, config_settings]
           - redis::service_manage: false
             redis::notify_service: false
+            redis::managed_by_cluster_manager: true
       step_config: |
         include ::tripleo::profile::pacemaker::database::redis
index 598deae..0fb8393 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   HAproxy service with Pacemaker configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -26,6 +34,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index ddc13df..12f6529 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   The manila-share service with Pacemaker configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -27,6 +35,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
index b018df3..7925720 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   RabbitMQ service with Pacemaker configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -26,6 +34,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -39,32 +49,5 @@ outputs:
           - rabbitmq::service_manage: false
       step_config: |
         include ::tripleo::profile::pacemaker::rabbitmq
-      upgrade_tasks:
-        - name: get bootstrap nodeid
-          tags: common
-          command: hiera bootstrap_nodeid
-          register: bootstrap_node
-        - name: set is_bootstrap_node fact
-          tags: common
-          set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
-        - name: get rabbitmq policy
-          tags: common
-          shell: pcs resource show rabbitmq | grep -q -E "Attributes:.*\"ha-mode\":\"all\""
-          register: rabbit_ha_mode
-          when: is_bootstrap_node
-          ignore_errors: true
-        - name: set migrate_rabbit_ha_mode fact
-          tags: common
-          set_fact: migrate_rabbit_ha_mode={{rabbit_ha_mode.rc == 0}}
-          when: is_bootstrap_node
-        - name: Fixup for rabbitmq ha-queues LP#1668600
-          tags: step0,pre-upgrade
-          shell: |
-            nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
-            nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
-            if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
-                echo "ERROR: The nr. of HA queues during the rabbit upgrade is out of range: $nr_queues"
-                exit 1
-            fi
-            pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
-          when: is_bootstrap_node and migrate_rabbit_ha_mode
+      metadata_settings:
+        get_attr: [RabbitMQServiceBase, role_data, metadata_settings]
index daee43e..74aaf59 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Pacemaker remote service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index eed9825..a41e34f 100644 (file)
@@ -1,7 +1,9 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
-  OpenStack Panko API service configured with Puppet
+  OpenStack Panko API service configured with Puppet.
+  Note, This service is deprecated in Pike release and will
+  be disabled in future releases.
 
 parameters:
   ServiceNetMap:
@@ -13,6 +15,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -24,6 +34,12 @@ parameters:
   EnableInternalTLS:
     type: boolean
     default: false
+  PankoApiPolicies:
+    description: |
+      A hash of policies to configure for Panko API.
+      e.g. { panko-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
   PankoBase:
@@ -32,6 +48,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
   ApacheServiceBase:
     type: ./apache.yaml
@@ -39,6 +57,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
       EnableInternalTLS: {get_param: EnableInternalTLS}
 
 outputs:
@@ -58,6 +78,7 @@ outputs:
                   "%{hiera('fqdn_$NETWORK')}"
                 params:
                   $NETWORK: {get_param: [ServiceNetMap, PankoApiNetwork]}
+            panko::policy::policies: {get_param: PankoApiPolicies}
             panko::api::service_name: 'httpd'
             panko::api::enable_proxy_headers_parsing: true
             tripleo.panko_api.firewall_rules:
@@ -85,21 +106,27 @@ outputs:
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: Check if httpd is deployed
-          command: systemctl is-enabled httpd
-          tags: common
-          ignore_errors: True
-          register: httpd_enabled
-        - name: "PreUpgrade step0,validation: Check if httpd is running"
-          shell: >
-            /usr/bin/systemctl show 'httpd' --property ActiveState |
-            grep '\bactive\b'
-          when: httpd_enabled.rc == 0
-          tags: step0,validation
-        - name: Stop panko-api service (running under httpd)
-          tags: step1
-          service: name=httpd state=stopped
-          when: httpd_enabled.rc == 0
-        - name: Install openstack-panko-api package if it was not installed
-          tags: step3
-          yum: name=openstack-panko-api state=latest
+        yaql:
+          expression: $.data.apache_upgrade + $.data.panko_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            panko_api_upgrade:
+              - name: Check if httpd is deployed
+                command: systemctl is-enabled httpd
+                tags: common
+                ignore_errors: True
+                register: httpd_enabled
+              - name: "PreUpgrade step0,validation: Check if httpd is running"
+                shell: >
+                  /usr/bin/systemctl show 'httpd' --property ActiveState |
+                  grep '\bactive\b'
+                when: httpd_enabled.rc == 0
+                tags: step0,validation
+              - name: Stop panko-api service (running under httpd)
+                tags: step1
+                service: name=httpd state=stopped
+                when: httpd_enabled.rc == 0
+              - name: Install openstack-panko-api package if it was not installed
+                tags: step3
+                yum: name=openstack-panko-api state=latest
index 998e64e..84817bc 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Panko service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -38,20 +46,22 @@ outputs:
       service_name: panko_base
       config_settings:
         panko::db::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://panko:'
-              - {get_param: PankoPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/panko'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: panko
+            password: {get_param: PankoPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /panko
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         panko::debug: {get_param: Debug}
         panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         panko::keystone::authtoken::project_name: 'service'
+        panko::keystone::authtoken::user_domain_name: 'Default'
+        panko::keystone::authtoken::project_domain_name: 'Default'
         panko::keystone::authtoken::password: {get_param: PankoPassword}
-        panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+        panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         panko::auth::auth_password: {get_param: PankoPassword}
         panko::auth::auth_region: 'regionOne'
diff --git a/puppet/services/qdr.yaml b/puppet/services/qdr.yaml
new file mode 100644 (file)
index 0000000..0659a94
--- /dev/null
@@ -0,0 +1,68 @@
+heat_template_version: pike
+
+description: >
+  Qpid dispatch router service configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  RabbitUserName:
+    default: guest
+    description: The username for Qdr
+    type: string
+  RabbitPassword:
+    description: The password for Qdr
+    type: string
+    hidden: true
+  RabbitClientPort:
+    description: Listening port for Qdr
+    default: 5672
+    type: number
+  MonitoringSubscriptionQdr:
+    default: 'overcloud-qdr'
+    type: string
+
+outputs:
+  role_data:
+    description: Role data for the Qdr role.
+    value:
+      service_name: rabbitmq
+      monitoring_subscription: {get_param: MonitoringSubscriptionQdr}
+      global_config_settings:
+        messaging_notify_service_name: 'amqp'
+        messaging_rpc_service_name: 'amqp'
+        keystone::messaging::amqp::amqp_pre_settled: 'notify'
+      config_settings:
+        tripleo.rabbitmq.firewall_rules:
+          '109 qdr':
+            dport:
+              - {get_param: RabbitClientPort}
+        qdr::listener_addr: {get_param: [ServiceNetMap, QdrNetwork]}
+        # cannot pass qdr::listener_port directly because it needs to be a string
+        # we do the conversion in the puppet layer
+        tripleo::profile::base::qdr::qdr_listener_port: {get_param: RabbitClientPort}
+        tripleo::profile::base::qdr::qdr_username: {get_param: RabbitUserName}
+        tripleo::profile::base::qdr::qdr_password: {get_param: RabbitPassword}
+
+      step_config: |
+        include ::tripleo::profile::base::qdr
index 2c4ccbc..1a42fda 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   RabbitMQ service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -40,14 +48,20 @@ parameters:
     hidden: true
   RabbitHAQueues:
     description:
-      The number of HA queues to be configured in rabbit. The default is 0 which will
-      be automatically overridden to CEIL(N/2) where N is the number of nodes running
-      rabbitmq.
-    default: 0
+      The number of HA queues to be configured in rabbit. The default is -1 which
+      translates to "ha-mode all". The special value 0 will be automatically
+      overridden to CEIL(N/2) where N is the number of nodes running rabbitmq.
+    default: -1
     type: number
   MonitoringSubscriptionRabbitmq:
     default: 'overcloud-rabbitmq'
     type: string
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
 
 outputs:
   role_data:
@@ -56,51 +70,79 @@ outputs:
       service_name: rabbitmq
       monitoring_subscription: {get_param: MonitoringSubscriptionRabbitmq}
       config_settings:
-        rabbitmq::file_limit: {get_param: RabbitFDLimit}
-        rabbitmq::default_user: {get_param: RabbitUserName}
-        rabbitmq::default_pass: {get_param: RabbitPassword}
-        rabbit_ipv6: {get_param: RabbitIPv6}
-        tripleo.rabbitmq.firewall_rules:
-          '109 rabbitmq':
-            dport:
-              - 4369
-              - 5672
-              - 25672
-        rabbitmq::delete_guest_user: false
-        rabbitmq::wipe_db_on_cookie_change: true
-        rabbitmq::port: '5672'
-        rabbitmq::package_provider: yum
-        rabbitmq::package_source: undef
-        rabbitmq::repos_ensure: false
-        rabbitmq::tcp_keepalive: true
-        rabbitmq_environment:
-          NODE_PORT: ''
-          NODE_IP_ADDRESS: ''
-          RABBITMQ_NODENAME: "rabbit@%{::hostname}"
-          RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
-          'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
-        rabbitmq_kernel_variables:
-          inet_dist_listen_min: '25672'
-          inet_dist_listen_max: '25672'
-        rabbitmq_config_variables:
-          cluster_partition_handling: 'pause_minority'
-          queue_master_locator: '<<"min-masters">>'
-          loopback_users: '[]'
-        rabbitmq::erlang_cookie:
-          yaql:
-            expression: $.data.passwords.where($ != '').first()
-            data:
-              passwords:
-                - {get_param: RabbitCookie}
-                - {get_param: [DefaultPasswords, rabbit_cookie]}
-        # NOTE: bind IP is found in Heat replacing the network name with the
-        # local node IP for the given network; replacement examples
-        # (eg. for internal_api):
-        # internal_api -> IP
-        # internal_api_uri -> [IP]
-        # internal_api_subnet - > IP/CIDR
-        rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
-        rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+        map_merge:
+          -
+            rabbitmq::file_limit: {get_param: RabbitFDLimit}
+            rabbitmq::default_user: {get_param: RabbitUserName}
+            rabbitmq::default_pass: {get_param: RabbitPassword}
+            rabbit_ipv6: {get_param: RabbitIPv6}
+            tripleo.rabbitmq.firewall_rules:
+              '109 rabbitmq':
+                dport:
+                  - 4369
+                  - 5672
+                  - 25672
+            rabbitmq::delete_guest_user: false
+            rabbitmq::wipe_db_on_cookie_change: true
+            rabbitmq::port: '5672'
+            rabbitmq::package_provider: yum
+            rabbitmq::package_source: undef
+            rabbitmq::repos_ensure: false
+            rabbitmq::tcp_keepalive: true
+            rabbitmq_environment:
+              NODE_PORT: ''
+              NODE_IP_ADDRESS: ''
+              RABBITMQ_NODENAME: "rabbit@%{::hostname}"
+              RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+              'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
+            rabbitmq_kernel_variables:
+              inet_dist_listen_min: '25672'
+              inet_dist_listen_max: '25672'
+            rabbitmq_config_variables:
+              cluster_partition_handling: 'pause_minority'
+              queue_master_locator: '<<"min-masters">>'
+              loopback_users: '[]'
+            rabbitmq::erlang_cookie:
+              yaql:
+                expression: $.data.passwords.where($ != '').first()
+                data:
+                  passwords:
+                    - {get_param: RabbitCookie}
+                    - {get_param: [DefaultPasswords, rabbit_cookie]}
+            # NOTE: bind IP is found in Heat replacing the network name with the
+            # local node IP for the given network; replacement examples
+            # (eg. for internal_api):
+            # internal_api -> IP
+            # internal_api_uri -> [IP]
+            # internal_api_subnet - > IP/CIDR
+            rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+            rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+            rabbitmq::ssl: {get_param: EnableInternalTLS}
+            rabbitmq::ssl_port: '5672'
+            rabbitmq::ssl_depth: 1
+            rabbitmq::ssl_only: {get_param: EnableInternalTLS}
+            rabbitmq::ssl_interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+            # TODO(jaosorior): Remove this once we set a proper default in
+            # puppet-tripleo
+            tripleo::profile::base::rabbitmq::enable_internal_tls: {get_param: EnableInternalTLS}
+          -
+            if:
+            - internal_tls_enabled
+            - generate_service_certificates: true
+              tripleo::profile::base::rabbitmq::certificate_specs:
+                service_certificate: '/etc/pki/tls/certs/rabbitmq.crt'
+                service_key: '/etc/pki/tls/private/rabbitmq.key'
+                hostname:
+                  str_replace:
+                    template: "%{hiera('fqdn_NETWORK')}"
+                    params:
+                      NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+                principal:
+                  str_replace:
+                    template: "rabbitmq/%{hiera('fqdn_NETWORK')}"
+                    params:
+                      NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+            - {}
       step_config: |
         include ::tripleo::profile::base::rabbitmq
       upgrade_tasks:
@@ -110,4 +152,11 @@ outputs:
         - name: Start rabbitmq service
           tags: step4
           service: name=rabbitmq-server state=started
-
+      metadata_settings:
+        if:
+          - internal_tls_enabled
+          -
+            - service: rabbitmq
+              network: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+              type: node
+          - null
diff --git a/puppet/services/releasenotes/notes/mod_ssl-e7fd4db71189242e.yaml b/puppet/services/releasenotes/notes/mod_ssl-e7fd4db71189242e.yaml
new file mode 100644 (file)
index 0000000..eb7b513
--- /dev/null
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - When a service is deployed in WSGI with Apache, make sure mode_ssl
+    package is deployed during the upgrade process, it's now required
+    by default so Apache can start properly.
index 96b3d6e..3df4ce7 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Sahara API service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -38,6 +46,12 @@ parameters:
     default:
       tag: openstack.sahara.api
       path: /var/log/sahara/sahara-api.log
+  SaharaApiPolicies:
+    description: |
+      A hash of policies to configure for Sahara API.
+      e.g. { sahara-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 resources:
   SaharaBase:
@@ -46,6 +60,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
@@ -60,6 +76,7 @@ outputs:
         map_merge:
           - get_attr: [SaharaBase, role_data, config_settings]
           - sahara::port: {get_param: [EndpointMap, SaharaInternal, port]}
+            sahara::policy::policies: {get_param: SaharaApiPolicies}
             sahara::service::api::api_workers: {get_param: SaharaWorkers}
             # NOTE: bind IP is found in Heat replacing the network name with the local node IP
             # for the given network; replacement examples (eg. for internal_api):
index 224989b..1ee6d17 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Sahara base service. Shared for all Sahara services.
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -56,26 +64,28 @@ outputs:
       service_name: sahara_base
       config_settings:
         sahara::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://sahara:'
-              - {get_param: SaharaPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/sahara'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: sahara
+            password: {get_param: SaharaPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /sahara
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
         sahara::rabbit_password: {get_param: RabbitPassword}
         sahara::rabbit_user: {get_param: RabbitUserName}
         sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         sahara::rabbit_port: {get_param: RabbitClientPort}
         sahara::debug: {get_param: Debug}
+        # Remove admin_password when https://review.openstack.org/442619 is merged.
         sahara::admin_password: {get_param: SaharaPassword}
-        sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
-        sahara::identity_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
         sahara::use_neutron: true
         sahara::plugins: {get_param: SaharaPlugins}
         sahara::rpc_backend: rabbit
-        sahara::admin_tenant_name: 'service'
         sahara::db::database_db_max_retries: -1
         sahara::db::database_max_retries: -1
+        sahara::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+        sahara::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+        sahara::keystone::authtoken::password: {get_param: SaharaPassword}
+        sahara::keystone::authtoken::project_name: 'service'
index c0b6b3e..b6c108e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Sahara Engine service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -34,6 +42,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 outputs:
   role_data:
diff --git a/puppet/services/securetty.yaml b/puppet/services/securetty.yaml
new file mode 100644 (file)
index 0000000..84a370f
--- /dev/null
@@ -0,0 +1,44 @@
+heat_template_version: pike
+
+description: >
+  Configure securetty values
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  TtyValues:
+    default: {}
+    description: Configures console values in securetty
+    type: json
+    constraints:
+      - length: { min: 1}
+
+outputs:
+  role_data:
+    description: Console data for the securetty
+    value:
+      service_name: securetty
+      config_settings:
+        tripleo::profile::base::securetty::tty_list: {get_param: TtyValues}
+      step_config: |
+        include ::tripleo::profile::base::securetty
index a2286d1..0e7b6d2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Utility stack to convert an array of services into a set of combined
@@ -26,6 +26,14 @@ parameters:
     description: Mapping of service -> default password. Used to help
                  pass top level passwords managed by Heat into services.
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    description: Role Specific parameters to be provided to service
+    default: {}
+    type: json
 
 resources:
 
@@ -38,6 +46,8 @@ resources:
         ServiceNetMap: {get_param: ServiceNetMap}
         EndpointMap: {get_param: EndpointMap}
         DefaultPasswords: {get_param: DefaultPasswords}
+        RoleName: {get_param: RoleName}
+        RoleParameters: {get_param: RoleParameters}
 
   LoggingConfiguration:
     type: OS::TripleO::LoggingConfiguration
@@ -90,14 +100,11 @@ outputs:
         # fluentd user.
         yaql:
           expression: >
-            set($.data.groups.flatten()).where($)
+            set(($.data.default + $.data.extra + $.data.role_data.where($ != null).select($.get('logging_groups'))).flatten()).where($)
           data:
-            groups:
-              - [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
-              - yaql:
-                  expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null))
-                  data: {role_data: {get_attr: [ServiceChain, role_data]}}
-              - [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
+            default: {get_attr: [LoggingConfiguration, LoggingDefaultGroups]}
+            extra: {get_attr: [LoggingConfiguration, LoggingExtraGroups]}
+            role_data: {get_attr: [ServiceChain, role_data]}
       config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
       global_config_settings:
         map_merge:
index 80c29f9..ffa5d31 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   SNMP client configured with Puppet, to facilitate Ceilometer Hardware
@@ -15,6 +15,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -28,6 +36,14 @@ parameters:
     description: The user password for SNMPd with readonly rights running on all Overcloud nodes
     type: string
     hidden: true
+  SnmpdBindHost:
+    description: An array of bind host addresses on which SNMP daemon will listen.
+    type: comma_delimited_list
+    default: ['udp:161','udp6:[::1]:161']
+  SnmpdOptions:
+    description: A string containing the commandline options passed to snmpd
+    type: string
+    default: '-LS0-5d'
 
 outputs:
   role_data:
@@ -37,6 +53,8 @@ outputs:
       config_settings:
         tripleo::profile::base::snmp::snmpd_user: {get_param: SnmpdReadonlyUserName}
         tripleo::profile::base::snmp::snmpd_password: {get_param: SnmpdReadonlyUserPassword}
+        snmp::agentaddress: {get_param: SnmpdBindHost}
+        snmp::snmpd_options: {get_param: SnmpdOptions}
         tripleo.snmp.firewall_rules:
           '127 snmp':
             dport: 161
index 41e144a..30058f0 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Configure sshd_config
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -22,6 +30,33 @@ parameters:
     default: ''
     description: Configures Banner text in sshd_config
     type: string
+  MessageOfTheDay:
+    default: ''
+    description: Configures /etc/motd text
+    type: string
+  SshServerOptions:
+    default:
+      HostKey:
+        - '/etc/ssh/ssh_host_rsa_key'
+        - '/etc/ssh/ssh_host_ecdsa_key'
+        - '/etc/ssh/ssh_host_ed25519_key'
+      SyslogFacility: 'AUTHPRIV'
+      AuthorizedKeysFile: '.ssh/authorized_keys'
+      PasswordAuthentication: 'no'
+      ChallengeResponseAuthentication: 'no'
+      GSSAPIAuthentication: 'yes'
+      GSSAPICleanupCredentials: 'no'
+      UsePAM: 'yes'
+      X11Forwarding: 'yes'
+      UsePrivilegeSeparation: 'sandbox'
+      AcceptEnv:
+        - 'LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES'
+        - 'LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT'
+        - 'LC_IDENTIFICATION LC_ALL LANGUAGE'
+        - 'XMODIFIERS'
+      Subsystem: 'sftp  /usr/libexec/openssh/sftp-server'
+    description: Mapping of sshd_config values
+    type: json
 
 outputs:
   role_data:
@@ -29,6 +64,8 @@ outputs:
     value:
       service_name: sshd
       config_settings:
-        BannerText: {get_param: BannerText}
+        tripleo::profile::base::sshd::bannertext: {get_param: BannerText}
+        tripleo::profile::base::sshd::motd: {get_param: MessageOfTheDay}
+        tripleo::profile::base::sshd::options: {get_param: SshServerOptions}
       step_config: |
         include ::tripleo::profile::base::sshd
index 6046d5e..3066aec 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Swift Proxy service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 9b0d2de..f3b7ee4 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Swift Proxy service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -31,9 +39,9 @@ parameters:
     description: Timeout for requests going from swift-proxy to swift a/c/o services.
     type: number
   SwiftWorkers:
-    default: 0
+    default: auto
     description: Number of workers for Swift service.
-    type: number
+    type: string
   KeystoneRegion:
     type: string
     default: 'regionOne'
@@ -63,10 +71,14 @@ parameters:
         Rabbit client subscriber parameter to specify
         an SSL connection to the RabbitMQ host.
     type: string
+  EnableInternalTLS:
+    type: boolean
+    default: false
 
 conditions:
 
   ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+  use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
 
 resources:
   SwiftBase:
@@ -75,6 +87,16 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+  TLSProxyBase:
+    type: OS::TripleO::Services::TLSProxyBase
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
 
 outputs:
   role_data:
@@ -85,7 +107,7 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [SwiftBase, role_data, config_settings]
-
+          - get_attr: [TLSProxyBase, role_data, config_settings]
           - swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
             swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             swift::proxy::authtoken::password: {get_param: SwiftPassword}
@@ -146,7 +168,22 @@ outputs:
             # internal_api -> IP
             # internal_api_uri -> [IP]
             # internal_api_subnet - > IP/CIDR
-            swift::proxy::proxy_local_net_ip: {get_param: [ServiceNetMap, SwiftProxyNetwork]}
+            tripleo::profile::base::swift::proxy::tls_proxy_bind_ip:
+              get_param: [ServiceNetMap, SwiftProxyNetwork]
+            tripleo::profile::base::swift::proxy::tls_proxy_fqdn:
+              str_replace:
+                template:
+                  "%{hiera('fqdn_$NETWORK')}"
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, SwiftProxyNetwork]}
+            tripleo::profile::base::swift::proxy::tls_proxy_port:
+              get_param: [EndpointMap, SwiftInternal, port]
+            swift::proxy::port: {get_param: [EndpointMap, SwiftInternal, port]}
+            swift::proxy::proxy_local_net_ip:
+              if:
+              - use_tls_proxy
+              - 'localhost'
+              - {get_param: [ServiceNetMap, SwiftProxyNetwork]}
       step_config: |
         include ::tripleo::profile::base::swift::proxy
       service_config_settings:
@@ -169,3 +206,5 @@ outputs:
         - name: Stop swift_proxy service
           tags: step1
           service: name=openstack-swift-proxy state=stopped
+      metadata_settings:
+        get_attr: [TLSProxyBase, role_data, metadata_settings]
index 2e3c818..3808dbc 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Swift Ringbuilder
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -42,6 +50,14 @@ parameters:
     default: true
     description: 'Use a local directory for Swift storage services when building rings'
     type: boolean
+  SwiftRingGetTempurl:
+    default: ''
+    description: A temporary Swift URL to download rings from.
+    type: string
+  SwiftRingPutTempurl:
+    default: ''
+    description: A temporary Swift URL to upload rings to.
+    type: string
 
 conditions:
   swift_use_local_dir:
@@ -59,6 +75,8 @@ outputs:
     value:
       service_name: swift_ringbuilder
       config_settings:
+        tripleo::profile::base::swift::ringbuilder::swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
+        tripleo::profile::base::swift::ringbuilder::swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
         tripleo::profile::base::swift::ringbuilder::build_ring: {get_param: SwiftRingBuild}
         tripleo::profile::base::swift::ringbuilder::replicas: {get_param: SwiftReplicas}
         tripleo::profile::base::swift::ringbuilder::part_power: {get_param: SwiftPartPower}
index 261aade..f1a9b93 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Swift Storage service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -55,6 +63,8 @@ resources:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
 
 conditions:
   swift_mount_check:
index 6f92066..e121feb 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   OpenStack Tacker service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -47,6 +55,12 @@ parameters:
     default: 5672
     description: Set rabbit subscriber port, change this if using SSL
     type: number
+  TackerPolicies:
+    description: |
+      A hash of policies to configure for Tacker.
+      e.g. { tacker-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
 
 outputs:
   role_data:
@@ -56,15 +70,15 @@ outputs:
       config_settings:
         tacker_password: {get_param: TackerPassword}
         tacker::db::database_connection:
-          list_join:
-            - ''
-            - - {get_param: [EndpointMap, MysqlInternal, protocol]}
-              - '://tacker:'
-              - {get_param: TackerPassword}
-              - '@'
-              - {get_param: [EndpointMap, MysqlInternal, host]}
-              - '/tacker'
-              - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          make_url:
+            scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+            username: tacker
+            password: {get_param: TackerPassword}
+            host: {get_param: [EndpointMap, MysqlInternal, host]}
+            path: /tacker
+            query:
+              read_default_file: /etc/my.cnf.d/tripleo.cnf
+              read_default_group: tripleo
 
         tacker::debug: {get_param: Debug}
         tacker::rpc_backend: rabbit
@@ -75,8 +89,10 @@ outputs:
         tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]}
 
         tacker::keystone::authtoken::project_name: 'service'
-        tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
-        tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+        tacker::keystone::authtoken::user_domain_name: 'Default'
+        tacker::keystone::authtoken::project_domain_name: 'Default'
+        tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+        tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
 
         tacker::db::mysql::password: {get_param: TackerPassword}
         tacker::db::mysql::user: tacker
@@ -85,10 +101,12 @@ outputs:
         tacker::db::mysql::allowed_hosts:
           - '%'
           - {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+        tacker::policy::policies: {get_param: TackerPolicies}
 
       service_config_settings:
         keystone:
           tacker::keystone::auth::tenant: 'service'
+          tacker::keystone::auth::region: {get_param: KeystoneRegion}
           tacker::keystone::auth::password: {get_param: TackerPassword}
           tacker::keystone::auth::public_url: {get_param: [EndpointMap, TackerPublic, uri]}
           tacker::keystone::auth::internal_url: {get_param: [EndpointMap, TackerInternal, uri]}
index b14d7bc..92c3f9e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   NTP service deployment using puppet, this YAML file
@@ -16,6 +16,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 5d0eeae..aece02c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Composable Timezone service
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 67e14d9..9fb590e 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   TripleO Firewall settings
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -37,3 +45,9 @@ outputs:
         tripleo::firewall::purge_firewall_rules: {get_param: PurgeFirewallRules}
       step_config: |
         include ::tripleo::firewall
+      upgrade_tasks:
+        - name: blank ipv6 rule before activating ipv6 firewall.
+          tags: step3
+          shell: cat /etc/sysconfig/ip6tables > /etc/sysconfig/ip6tables.n-o-upgrade; cat</dev/null>/etc/sysconfig/ip6tables
+          args:
+            creates: /etc/sysconfig/ip6tables.n-o-upgrade
index 737be82..2b9b883 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   TripleO Package installation settings
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
index 59866d3..e3e28a2 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Vpp service configured with Puppet
@@ -13,6 +13,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   EndpointMap:
     default: {}
     description: Mapping of service endpoint -> protocol. Typically set
@@ -42,6 +50,16 @@ outputs:
       step_config: |
         include ::tripleo::profile::base::vpp
       upgrade_tasks:
+        - name: Check if vpp is deployed
+          command: systemctl is-enabled vpp
+          tags: common
+          ignore_errors: True
+          register: vpp_enabled
+        - name: "PreUpgrade step0,validation: Check service vpp is running"
+          shell: /usr/bin/systemctl show 'vpp' --property ActiveState | grep '\bactive\b'
+          when: vpp_enabled.rc == 0
+          tags: step0,validation
         - name: Stop vpp service
-          tags: step2
+          tags: step1
+          when: vpp_enabled.rc == 0
           service: name=vpp state=stopped
index a320f69..6bc296a 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 
 description: >
   Openstack Zaqar service. Shared for all Heat services.
@@ -18,6 +18,14 @@ parameters:
   DefaultPasswords:
     default: {}
     type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
   Debug:
     default: ''
     description: Set to True to enable debugging on all services.
@@ -30,7 +38,32 @@ parameters:
     type: string
     default: 'regionOne'
     description: Keystone region for endpoint
+  ZaqarPolicies:
+    description: |
+      A hash of policies to configure for Zaqar.
+      e.g. { zaqar-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    default: {}
+    type: json
+  ZaqarWorkers:
+    type: string
+    description: Set the number of workers for zaqar::wsgi::apache
+    default: '%{::os_workers}'
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+  zaqar_workers_zero: {equals : [{get_param: ZaqarWorkers}, 0]}
+
+resources:
 
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
 
 outputs:
   role_data:
@@ -38,15 +71,31 @@ outputs:
     value:
       service_name: zaqar
       config_settings:
-        zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
-        zaqar::keystone::authtoken::project_name: 'service'
-        zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
-        zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
-        zaqar::debug: {get_param: Debug}
-        zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
-        zaqar::transport::wsgi::bind: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
-        zaqar::message_pipeline: 'zaqar.notification.notifier'
-        zaqar::unreliable: true
+        map_merge:
+          - get_attr: [ApacheServiceBase, role_data, config_settings]
+          - zaqar::policy::policies: {get_param: ZaqarPolicies}
+            zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
+            zaqar::keystone::authtoken::project_name: 'service'
+            zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+            zaqar::debug: {get_param: Debug}
+            zaqar::server::service_name: 'httpd'
+            zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
+            zaqar::wsgi::apache::ssl: false
+            zaqar::wsgi::apache::bind_host: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
+            zaqar::message_pipeline: 'zaqar.notification.notifier'
+            zaqar::unreliable: true
+            zaqar::wsgi::apache::servername:
+              str_replace:
+                template:
+                  "%{hiera('fqdn_$NETWORK')}"
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
+          -
+            if:
+            - zaqar_workers_zero
+            - {}
+            - zaqar::wsgi::apache::workers: {get_param: ZaqarWorkers}
       service_config_settings:
         keystone:
           zaqar::keystone::auth::password: {get_param: ZaqarPassword}
@@ -65,22 +114,37 @@ outputs:
       step_config: |
         include ::tripleo::profile::base::zaqar
       upgrade_tasks:
-        - name: Check if zaqar is deployed
-          command: systemctl is-enabled openstack-zaqar
-          tags: common
-          ignore_errors: True
-          register: zaqar_enabled
-        - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
-          shell: >
-            /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
-            grep '\bactive\b'
-          when: zaqar_enabled.rc == 0
-          tags: step0,validation
-        - name: Stop zaqar service
-          tags: step1
-          when: zaqar_enabled.rc == 0
-          service: name=openstack-zaqar state=stopped
-        - name: Install openstack-zaqar package if it was disabled
-          tags: step3
-          yum: name=openstack-zaqar state=latest
-          when: zaqar_enabled.rc != 0
+        yaql:
+          expression: $.data.apache_upgrade + $.data.zaqar_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            zaqar_upgrade:
+              - name: Check if zaqar is deployed
+                command: systemctl is-enabled openstack-zaqar
+                tags: common
+                ignore_errors: True
+                register: zaqar_enabled
+              - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
+                shell: >
+                  /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
+                  grep '\bactive\b'
+                when: zaqar_enabled.rc == 0
+                tags: step0,validation
+              - name: Check for zaqar running under apache (post upgrade)
+                tags: step1
+                shell: "httpd -t -D DUMP_VHOSTS | grep -q zaqar_wsgi"
+                register: zaqar_apache
+                ignore_errors: true
+              - name: Stop zaqar service (running under httpd)
+                tags: step1
+                service: name=httpd state=stopped
+                when: zaqar_apache.rc == 0
+              - name: Stop and disable zaqar service (pre-upgrade not under httpd)
+                tags: step1
+                when: zaqar_enabled.rc == 0
+                service: name=openstack-zaqar state=stopped enabled=no
+              - name: Install openstack-zaqar package if it was disabled
+                tags: step3
+                yum: name=openstack-zaqar state=latest
+                when: zaqar_enabled.rc != 0
index 2cfd43f..4f96717 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: ocata
+heat_template_version: pike
 description: 'Upgrade for via ansible by applying a step related tag'
 
 parameters:
diff --git a/releasenotes/notes/Add-Internal-TLS-CA-File-parameter-c24ee13daaa11dfc.yaml b/releasenotes/notes/Add-Internal-TLS-CA-File-parameter-c24ee13daaa11dfc.yaml
new file mode 100644 (file)
index 0000000..8847b22
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - Adds the InternalTLSCAFile parameter, which defines which CA file should be
+    used by the internal services to verify that the peer's certificate is
+    trusted. This is applicable if internal TLS is enabled. Currently, it
+    defaults to using the CA file for FreeIPA, which is the default CA.
diff --git a/releasenotes/notes/Enable-TLS-for-libvirt-0aab48cd8339da0f.yaml b/releasenotes/notes/Enable-TLS-for-libvirt-0aab48cd8339da0f.yaml
new file mode 100644 (file)
index 0000000..e8941b7
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    If TLS in the internal network is enabled, libvirt's transport defaults to
+    using TLS. This can be changed by setting the ``UseTLSTransportForLiveMigration``
+    parameter, which is ``true`` by default.
diff --git a/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml b/releasenotes/notes/Switch-keystone's-default-token-provider-to-fernet-2542fccb5a588852.yaml
new file mode 100644 (file)
index 0000000..50b8167
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - Keystone's default token provider is now fernet instead of UUID
+upgrade:
+  - When upgrading, old tokens will not work anymore due to the provider
+    changing from UUID to fernet.
diff --git a/releasenotes/notes/add-all-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml b/releasenotes/notes/add-all-hosts-to-hostsentry-20a8ee8a1a210ce2.yaml
new file mode 100644 (file)
index 0000000..b0ad9d9
--- /dev/null
@@ -0,0 +1,9 @@
+---
+fixes:
+  - Previously only the VIPs and their associated hostnames were present
+    in the HostsEntry output, due to the hosts_entries output on the
+    hosts-config.yaml nested stack being empty. It was referencing an
+    invalid attribute. See
+    https://bugs.launchpad.net/tripleo/+bug/1683517
+
+
diff --git a/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml b/releasenotes/notes/add-bgpvpn-support-f60c5a9cee0bb393.yaml
new file mode 100644 (file)
index 0000000..2af6aa7
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add support for BGPVPN Neutron service plugin
diff --git a/releasenotes/notes/add-ceilometer-agent-ipmi-2c86726d0373d354.yaml b/releasenotes/notes/add-ceilometer-agent-ipmi-2c86726d0373d354.yaml
new file mode 100644 (file)
index 0000000..d1f7340
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add support to configure Ceilometer Agent Ipmi profiles.
diff --git a/releasenotes/notes/add-ipv6-diable-options-9aaee219bb87ac6a.yaml b/releasenotes/notes/add-ipv6-diable-options-9aaee219bb87ac6a.yaml
new file mode 100644 (file)
index 0000000..8b57f58
--- /dev/null
@@ -0,0 +1,7 @@
+---
+security:
+  - |
+    Add IPv6 disable option and make it configurable for user to disable IPv6
+    when it's not used, this will descrease the risk of ipv6 attack.
+    Both net.ipv6.conf.default.disable_ipv6 & net.ipv6.conf.all.disable_ipv6
+    will be explicitly set to the default value (0) which is enabled.
diff --git a/releasenotes/notes/add-l2gw-agent-1a2f14a6ceefe362.yaml b/releasenotes/notes/add-l2gw-agent-1a2f14a6ceefe362.yaml
new file mode 100644 (file)
index 0000000..7f88e26
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+   - Add support for L2 Gateway Neutron agent
diff --git a/releasenotes/notes/add-l2gw-api-support-2206d3d14f409088.yaml b/releasenotes/notes/add-l2gw-api-support-2206d3d14f409088.yaml
new file mode 100644 (file)
index 0000000..8183532
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add support for L2 Gateway Neutron service plugin
diff --git a/releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml b/releasenotes/notes/add-ldap-backend-0bda702fb0aa24bf.yaml
new file mode 100644 (file)
index 0000000..19452f2
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Add capabilities to configure LDAP backends as for keystone domains.
+    This can be done by using the KeystoneLDAPDomainEnable and
+    KeystoneLDAPBackendConfigs parameters.
diff --git a/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml b/releasenotes/notes/add-opendaylight-ha-e46ef46e29689dde.yaml
new file mode 100644 (file)
index 0000000..882ee4e
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Adds support for OpenDaylight HA clustering.  Now when specifying
+    three or more ODL roles, ODL will be deployed in a cluster, and
+    use port 2550 for cluster communication.
diff --git a/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml b/releasenotes/notes/add-parameters-for-heat-apis-over-httpd-df83ab04d9f9ebb2.yaml
new file mode 100644 (file)
index 0000000..b3a62ce
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - The relevant parameters have been added to deploy the heat APIs over httpd.
+    This means that the HeatWorkers now affect httpd instead of the heat API
+    themselves, and that the apache hieradata will also be deployed in the
+    nodes where the heat APIs run.
diff --git a/releasenotes/notes/add-qdr-99a27dffef42c13e.yaml b/releasenotes/notes/add-qdr-99a27dffef42c13e.yaml
new file mode 100644 (file)
index 0000000..163536d
--- /dev/null
@@ -0,0 +1,8 @@
+---
+features:
+  - Introduce the ability to deploy the qpid-dispatch-router (Qdr) for
+    the oslo.messaging AMQP 1.0 driver backend. The Qdr provides
+    direct messaging (e.g. brokerless) communications for
+    oslo.messaging services. To facilitate simple use for evaluation
+    in an overcloud deployment, the Qdr aliases the RabbitMQ service
+    to provide the messaging backend.
diff --git a/releasenotes/notes/add-support-for-pure-cinder-1a595f1940d5a06f.yaml b/releasenotes/notes/add-support-for-pure-cinder-1a595f1940d5a06f.yaml
new file mode 100644 (file)
index 0000000..da326e4
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Added Pure Storage FlashArray iSCSI and FC backend support for cinder
diff --git a/releasenotes/notes/add_db_sync_timeout-c9b2f401cca0b37d.yaml b/releasenotes/notes/add_db_sync_timeout-c9b2f401cca0b37d.yaml
new file mode 100644 (file)
index 0000000..ecf3593
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Adds DatabaseSyncTimeout parameter to Nova and Neutron templates.
diff --git a/releasenotes/notes/api-policy-4ca739519537f6f4.yaml b/releasenotes/notes/api-policy-4ca739519537f6f4.yaml
new file mode 100644 (file)
index 0000000..54beb30
--- /dev/null
@@ -0,0 +1,13 @@
+---
+features:
+  - |
+    TripleO is now able to configure role-based access API policies with new
+    parameters for each API service.
+    For example, Nova API service has now NovaApiPolicies and the value
+    could be { nova-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+    It will configure /etc/nova/policy.json file and configure context_is_admin
+    to true. Puppet will take care of this configuration and API services are
+    restarted when the file is touched.
+    We're also adding augeas resource to the list of Puppet providers that
+    container deployments grab in the catalog to generate configurations, so
+    this feature can be used when deploying TripleO in containers.
diff --git a/releasenotes/notes/big-switch-agent-4c743a2112251234.yaml b/releasenotes/notes/big-switch-agent-4c743a2112251234.yaml
new file mode 100644 (file)
index 0000000..49ede20
--- /dev/null
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Updated bigswitch environment file to include the bigswitch agent
+    installation and correct support for the restproxy configuration.
diff --git a/releasenotes/notes/change-rabbitmq-ha-mode-policy-default-6c6cd7f02181f0e0.yaml b/releasenotes/notes/change-rabbitmq-ha-mode-policy-default-6c6cd7f02181f0e0.yaml
new file mode 100644 (file)
index 0000000..d6f74ef
--- /dev/null
@@ -0,0 +1,11 @@
+---
+upgrade:
+  - |
+    We are not changing the rabbitmq ha-mode policy during upgrades any longer.
+    The policy chosen at deploy time will remain the same but can be changed
+    manually.
+fixes:
+  - |
+    Due to https://bugs.launchpad.net/tripleo/+bug/1686337 we switch the
+    default of rabbitmq back ha-mode "all". This is to make the installation
+    more robust in the face of network issues.
diff --git a/releasenotes/notes/configurable-snmpd-options-3954c5858e2c7656.yaml b/releasenotes/notes/configurable-snmpd-options-3954c5858e2c7656.yaml
new file mode 100644 (file)
index 0000000..d69bf4f
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Per default, don't log a message in syslog for each incoming SNMP query.
+    So set the default log level to '-LS0-5d'. Allow the operator to customize
+    the log level via a parameter.
diff --git a/releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml b/releasenotes/notes/deployed-server-firewall-purge-9d9fe73faf925056.yaml
new file mode 100644 (file)
index 0000000..298a8ec
--- /dev/null
@@ -0,0 +1,6 @@
+---
+fixes:
+  - The initial firewall will now be purged by the deployed-server bootstrap
+    scripts. This is needed to prevent possible issues with bootstrapping the
+    initial Pacemaker cluster. See
+    https://bugs.launchpad.net/tripleo/+bug/1679234
diff --git a/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml b/releasenotes/notes/deprecate-NeutronExternalNetworkBridge-7d42f1a0718da327.yaml
new file mode 100644 (file)
index 0000000..0906729
--- /dev/null
@@ -0,0 +1,10 @@
+---
+upgrade:
+  - The ``NeutronExternalNetworkBridge`` parameter changed its default value
+    from ``br-ex`` to an empty string value. It means that by default Neutron
+    L3 agent will be able to serve multiple external networks. (It was always
+    the case for those who were using templates with the value of the parameter
+    overridden by an empty string value.)
+deprecations:
+  - The ``NeutronExternalNetworkBridge`` parameter is deprecated and will be
+    removed in a next release.
diff --git a/releasenotes/notes/deprecate-ceilometer-expirer-83b193a07631d89d.yaml b/releasenotes/notes/deprecate-ceilometer-expirer-83b193a07631d89d.yaml
new file mode 100644 (file)
index 0000000..9088f96
--- /dev/null
@@ -0,0 +1,11 @@
+---
+upgrade:
+  - With expirer deprecated and disabled by default, there is an upgrade
+    impact here. If you had expirer enabled in ocata and you upgrade to
+    pike the expirer will not be enabled anymore. If you wish to use
+    expirer, ensure you include the ceilometer-expirer.yaml
+    to your upgrade deploy command. Also note that with collector
+    disabled, there is no need for expirer to be running.
+deprecations:
+  - Deprecate and turn off expirer service as collector. Without collector
+    and standard storage, expirer has no use.
diff --git a/releasenotes/notes/deprecate-collector-a16e5d58ae00806d.yaml b/releasenotes/notes/deprecate-collector-a16e5d58ae00806d.yaml
new file mode 100644 (file)
index 0000000..b9546a9
--- /dev/null
@@ -0,0 +1,14 @@
+---
+upgrade:
+  - With collector deprecated and disabled by default, there is an upgrade
+    impact here. If you had collector enabled in ocata and you upgrade to
+    pike the collector will not be enabled anymore. If you wish to use
+    collector, ensure you include the ceilometer-collector.yaml
+    to your upgrade deploy command. We recommend switching to using the
+    new pipeline approach with publisher instead.
+deprecations:
+  - Deprecate and disable ceilometer collector service by default. Instead
+    use the publisher directly in the pipeline to push data where appropriate.
+    This can be manually enabled by passing the environment file to deploy
+    command which is included in environment dir as ceilometer-collector.yaml.
+    By default, the pipeline publisher pushes data automatically to gnocchi.
diff --git a/releasenotes/notes/deprecate-panko-b2bdce647d2b9a6d.yaml b/releasenotes/notes/deprecate-panko-b2bdce647d2b9a6d.yaml
new file mode 100644 (file)
index 0000000..96f2c55
--- /dev/null
@@ -0,0 +1,5 @@
+---
+deprecations:
+  - Panko API service is deprecated in Pike release. Note that this service
+    will remain enabled by default as there is no replacement yet. This will
+    be disabled in future releases.
diff --git a/releasenotes/notes/disable-ceilo-api-dfe5d0947563bbe0.yaml b/releasenotes/notes/disable-ceilo-api-dfe5d0947563bbe0.yaml
new file mode 100644 (file)
index 0000000..2661f7c
--- /dev/null
@@ -0,0 +1,4 @@
+---
+deprecations:
+  - Deprecate and disable ceilometer Api by default. This can be enabled
+    by passing in an env file to deploy command.
diff --git a/releasenotes/notes/disable-core-dump-for-setuid-programs-e83a2a5da908b9c3.yaml b/releasenotes/notes/disable-core-dump-for-setuid-programs-e83a2a5da908b9c3.yaml
new file mode 100644 (file)
index 0000000..3168a54
--- /dev/null
@@ -0,0 +1,12 @@
+---
+upgrade:
+  - |
+    The fs.suid_dumpable kernel parameter is now explicitly set to 0 to prevent
+    exposing sensitive data through core dumps of processes with elevated
+    permissions. Deployments that set or depend on non-zero values for
+    fs.suid_dumpable may be affected by upgrading.
+security:
+  - |
+    Explicitly disable core dump for setuid programs by setting
+    fs.suid_dumpable = 0, this will descrease the risk of unauthorized access
+    of core dump file generated by setuid program.
diff --git a/releasenotes/notes/disable-kernel-parameter-for-icmp-redirects-f325f91d71b58b5f.yaml b/releasenotes/notes/disable-kernel-parameter-for-icmp-redirects-f325f91d71b58b5f.yaml
new file mode 100644 (file)
index 0000000..0f226a8
--- /dev/null
@@ -0,0 +1,19 @@
+---
+upgrade:
+  - The net.ipv4.conf.default.send_redirects & net.ipv4.conf.all.send_redirects
+    are now set to 0 to prevent a compromised host from sending invalid ICMP
+    redirects to other router devices.
+  - The net.ipv4.conf.default.accept_redirects,
+    net.ipv6.conf.default.accept_redirects & net.ipv6.conf.all.accept_redirects
+    are now set to 0 to prevent forged ICMP packet from altering host's routing
+    tables.
+  - The net.ipv4.conf.default.secure_redirects &
+    net.ipv4.conf.all.secure_redirects are now set to 0 to disable acceptance
+    of secure ICMP redirected packets.
+security:
+  - Invalide ICMP redirects may corrupt routing and have users access a system
+    set up by the attacker as opposed to a valid system.
+  - Routing tables may be altered by bogus ICMP redirect messages and send
+    packets to incorrect networks.
+  - Secure ICMP redirects are the same as ICMP redirects, except they come from
+    gateways listed on the default gateway list.
diff --git a/releasenotes/notes/disable-manila-cephfs-snapshots-by-default-d5320a05d9b501cf.yaml b/releasenotes/notes/disable-manila-cephfs-snapshots-by-default-d5320a05d9b501cf.yaml
new file mode 100644 (file)
index 0000000..98d70b6
--- /dev/null
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    Disabled cephfs snapshot support (ManilaCephFSNativeCephFSEnableSnapshots
+    parameter) in manila by default.
diff --git a/releasenotes/notes/disable_default_apache_vhost-f41d11fe07605f7f.yaml b/releasenotes/notes/disable_default_apache_vhost-f41d11fe07605f7f.yaml
new file mode 100644 (file)
index 0000000..279e25c
--- /dev/null
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    Disable default vhost for apache. It is required for a hybrid deployments
+    when WSGI based services running both at host and in containers, without
+    conflicting default ports.
diff --git a/releasenotes/notes/docker-service-all-roles-5c22a018caeafcf0.yaml b/releasenotes/notes/docker-service-all-roles-5c22a018caeafcf0.yaml
new file mode 100644 (file)
index 0000000..734db08
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    When deploying with environments/docker.yaml, the docker service
+    is now deployed on all predefined roles.
diff --git a/releasenotes/notes/enable-logging-suspicious-packets-d5545586f917d2ca.yaml b/releasenotes/notes/enable-logging-suspicious-packets-d5545586f917d2ca.yaml
new file mode 100644 (file)
index 0000000..bb2543f
--- /dev/null
@@ -0,0 +1,9 @@
+---
+upgrade:
+  - |
+    The net.ipv4.conf.default.log_martians & net.ipv4.conf.all.log_martians are
+    now set to 1 to enable logging of suspicious packets.
+security:
+  - |
+    Logging of suspicious packets allows an administrator to investigate the
+    spoofed packets sent to their system.
diff --git a/releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml b/releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml
new file mode 100644 (file)
index 0000000..83b05bb
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Added support for external swift proxy. Users may need to
+    configure endpoints pointing to swift proxy service
+    already available.
diff --git a/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml b/releasenotes/notes/etcdtoken-4c46bdfac940acda.yaml
new file mode 100644 (file)
index 0000000..da99594
--- /dev/null
@@ -0,0 +1,6 @@
+---
+security:
+  - |
+    Secure EtcdInitialClusterToken by removing the default value
+    and make the parameter hidden.
+    Fixes `bug 1673266 <https://bugs.launchpad.net/tripleo/+bug/1673266>`__.
diff --git a/releasenotes/notes/expose-metric-processing-delay-0c098d7ec0af0728.yaml b/releasenotes/notes/expose-metric-processing-delay-0c098d7ec0af0728.yaml
new file mode 100644 (file)
index 0000000..1fc4f10
--- /dev/null
@@ -0,0 +1,3 @@
+---
+fixes:
+  - Expose metric_processing_delay to tweak gnocchi performance.
diff --git a/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml b/releasenotes/notes/fix-cinder-nfs-share-usage-0968f88eff7ffb99.yaml
new file mode 100644 (file)
index 0000000..682171c
--- /dev/null
@@ -0,0 +1,6 @@
+---
+fixes:
+  - Fixes an issue when using the CinderNfsServers
+    parameter_defaults setting.  It now works using a
+    single share as well as a comma-separated list of
+    shares.
diff --git a/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml b/releasenotes/notes/fix-neutron-dpdk-firewall-436aee39a0d7ed65.yaml
new file mode 100644 (file)
index 0000000..bb18aed
--- /dev/null
@@ -0,0 +1,5 @@
+---
+fixes:
+  - Fixes firewall rules from neutron OVS agent not being
+    inherited correctly and applied in neutron OVS DPDK
+    template.
diff --git a/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml b/releasenotes/notes/fix-odl-provider-mapping-hiera-5b3472184be490e2.yaml
new file mode 100644 (file)
index 0000000..79cea05
--- /dev/null
@@ -0,0 +1,4 @@
+---
+fixes:
+  - Fixes OpenDaylightProviderMappings parsing on a
+    comma delimited list.
diff --git a/releasenotes/notes/get-occ-config-local-connector-5bbec3f591a9f311.yaml b/releasenotes/notes/get-occ-config-local-connector-5bbec3f591a9f311.yaml
new file mode 100644 (file)
index 0000000..ef8877a
--- /dev/null
@@ -0,0 +1,10 @@
+---
+fixes:
+  - The deployed-server Heat agent configuration script,
+    get-occ-config.sh, is now updated to configure the
+    local data source for os-collect-config instead of
+    configuring /etc/os-collect-config.conf directly. Doing
+    so means that the configuration template for os-apply-config
+    no longer has to be deleted as the file will be rendered
+    correctly with the right data. See
+    https://bugs.launchpad.net/tripleo/+bug/1679705
diff --git a/releasenotes/notes/glance-keystonev3-d35182ba9a3778eb.yaml b/releasenotes/notes/glance-keystonev3-d35182ba9a3778eb.yaml
new file mode 100644 (file)
index 0000000..072e85a
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - Deploy Glance with Keystone v3 endpoints and make
+    sure it doesn't rely on Keystone v2 anymore.
diff --git a/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml b/releasenotes/notes/gnocchi-keystonev3-d288ba40226545c9.yaml
new file mode 100644 (file)
index 0000000..2f2513c
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - Deploy Gnocchi with Keystone v3 endpoints and make
+    sure it doesn't rely on Keystone v2 anymore.
diff --git a/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml b/releasenotes/notes/ha-by-default-55326e699ee8602c.yaml
deleted file mode 100644 (file)
index edcc125..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
----
-deprecations:
-  - The environments/puppet-pacemaker.yaml file is now deprecated and the HA
-    deployment is now the default. In order to get the non-HA deployment use
-    environments/nonha-arch.yaml explicitly.
diff --git a/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml b/releasenotes/notes/install-openstack-selinux-d14b2e26feb6d04e.yaml
new file mode 100644 (file)
index 0000000..d2b2eb9
--- /dev/null
@@ -0,0 +1,6 @@
+---
+fixes:
+  - openstack-selinux is now installed by the deployed-server
+    bootstrap scripts. Previously, it was not installed, so
+    if SELinux was set to enforcing, all OpenStack policy
+    was missing.
diff --git a/releasenotes/notes/ironic-boot-option-3f3036aa5e82ec7e.yaml b/releasenotes/notes/ironic-boot-option-3f3036aa5e82ec7e.yaml
new file mode 100644 (file)
index 0000000..53191bd
--- /dev/null
@@ -0,0 +1,12 @@
+---
+features:
+  - |
+    New configuration ``IronicDefaultBootOption`` allows to change the default
+    boot option to use for bare metal instances in the overcloud.
+upgrade:
+  - |
+    The default boot option for bare metal instances in overcloud was changed
+    to "local". This was already the default for whole-disk images, but for
+    partition images it requires ``grub2`` to be installed on them.
+    Use the new ``IronicDefaultBootOption`` configuration to override, or
+    set ``boot_option`` capability on nodes and flavors.
diff --git a/releasenotes/notes/ironic-hardware-types-fe5140549d3bb792.yaml b/releasenotes/notes/ironic-hardware-types-fe5140549d3bb792.yaml
new file mode 100644 (file)
index 0000000..da3da6c
--- /dev/null
@@ -0,0 +1,9 @@
+---
+features:
+  - |
+    Configuring enabled Ironic hardware types is now possible via new
+    ``IronicEnabledHardwareTypes`` parameter. See this spec for details:
+    http://specs.openstack.org/openstack/ironic-specs/specs/approved/driver-composition-reform.html.
+  - |
+    Bare metal serial console support via ``socat`` utility is enabled for
+    Ironic hardware types supporting it (currently only ``ipmi``).
diff --git a/releasenotes/notes/ironic-neutron-integration-76c4f9e0d10785e4.yaml b/releasenotes/notes/ironic-neutron-integration-76c4f9e0d10785e4.yaml
new file mode 100644 (file)
index 0000000..dd99acc
--- /dev/null
@@ -0,0 +1,9 @@
+---
+features:
+  - |
+    Allow setting the Ironic provisioning network UUID or name via new
+    ``IronicProvisioningNetwork`` configuration.
+  - |
+    Enable support for "neutron" Ironic networking plugin, enabling advanced
+    integration with Neutron, such as VLAN/VXLAN network support, bonding and
+    security groups.
diff --git a/releasenotes/notes/leave-satellite-repo-enabled-8b60528bd5450c7b.yaml b/releasenotes/notes/leave-satellite-repo-enabled-8b60528bd5450c7b.yaml
new file mode 100644 (file)
index 0000000..c327265
--- /dev/null
@@ -0,0 +1,6 @@
+---
+fixes:
+  - |
+    Previously the RHEL registration script disabled the satellite repo after
+    installing the necessary packages from it.  This makes it awkward to
+    update those packages later, so the repo will no longer be disabled.
diff --git a/releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml b/releasenotes/notes/make-panko-default-8d0e824fc91cef56.yaml
new file mode 100644 (file)
index 0000000..d062426
--- /dev/null
@@ -0,0 +1,4 @@
+---
+fixes:
+  - Since panko is enabled by default, include it the default dispatcher
+    for ceilometer events.
diff --git a/releasenotes/notes/match-enable_dvr-with-NeutronEnableDVR-fe8aac6c4ce52bce.yaml b/releasenotes/notes/match-enable_dvr-with-NeutronEnableDVR-fe8aac6c4ce52bce.yaml
new file mode 100644 (file)
index 0000000..5440043
--- /dev/null
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    Neutron API controller no longer advertises ``dvr`` extension if the
+    cloud is not configured for DVR. This is achieved by setting ``enable_dvr``
+    to match ``NeutronEnableDVR`` setting.
diff --git a/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml b/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml
new file mode 100644 (file)
index 0000000..45ca9fe
--- /dev/null
@@ -0,0 +1,14 @@
+---
+features:
+  - |
+    Add support for cold migration over ssh.
+
+    This enables nova cold migration.
+
+    This also switches to SSH as the default transport for live-migration.
+    The tripleo-common mistral action that generates passwords supplies the
+    MigrationSshKey parameter that enables this.
+deprecations:
+  - |
+    The TCP transport is no longer used for live-migration and the firewall
+    port has been closed.
diff --git a/releasenotes/notes/nsx-support-1254839718d8df8c.yaml b/releasenotes/notes/nsx-support-1254839718d8df8c.yaml
new file mode 100644 (file)
index 0000000..1d9f5f8
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add support for NSX Neutron plugin
diff --git a/releasenotes/notes/octavia-1687026-c01313aab53f55a4.yaml b/releasenotes/notes/octavia-1687026-c01313aab53f55a4.yaml
new file mode 100644 (file)
index 0000000..2ba01c7
--- /dev/null
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Octavia API and Neutron Server can now be deployed on separated nodes.
+    See https://bugs.launchpad.net/tripleo/+bug/1687026
diff --git a/releasenotes/notes/ovn-fcd4b0168e6745a8.yaml b/releasenotes/notes/ovn-fcd4b0168e6745a8.yaml
new file mode 100644 (file)
index 0000000..f5ccec0
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - Support configuring NeutronBridgeMappings
+  - Set force_config_drive to true as OVN doesn't support metadata service
+  - Add necessary iptables rules to allow Geneve traffic and ovsdb-server
+    traffic for Northbound and Southbound databases.
diff --git a/releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml b/releasenotes/notes/ovs-2.5-2.6-composable-upgrades-workaround-73f4e56127c910b4.yaml
new file mode 100644 (file)
index 0000000..8c21082
--- /dev/null
@@ -0,0 +1,12 @@
+---
+issues:
+  - During the ovs upgrade for 2.5 to 2.6 we need to workaround the classic
+    yum update command by handling the upgrade of the package separately to not
+    loose the IPs and the connectivity on the nodes. The workaround is
+    discussed here https://bugs.launchpad.net/tripleo/+bug/1669714
+upgrade:
+  - The upgrade from openvswitch 2.5 to 2.6 is handled gracefully and there should
+    be no user impact in particular no restart of the openvswitch service. For more
+    information please see the related bug above which also links the relevant code reviews.
+    The workaround (transparent to the user/doesn't require any input) is to download the OVS
+    package and install with --nopostun and --notriggerun options provided by the rpm binary.
diff --git a/releasenotes/notes/pluggable-server-type-per-role-314f38f8e5d4c84e.yaml b/releasenotes/notes/pluggable-server-type-per-role-314f38f8e5d4c84e.yaml
new file mode 100644 (file)
index 0000000..5b58d3d
--- /dev/null
@@ -0,0 +1,8 @@
+---
+features:
+  - The server resource type, OS::TripleO::Server can now be
+    mapped per role instead of globally. This allows users to
+    mix baremetal (OS::Nova::Server) and
+    deployed-server (OS::Heat::DeployedServer) server resources
+    in the same deployment. See
+    https://blueprints.launchpad.net/tripleo/+spec/pluggable-server-type-per-role
diff --git a/releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml b/releasenotes/notes/replace-references-to-old-ctlplane-0df7f2ae8910559c.yaml
new file mode 100644 (file)
index 0000000..09d3be0
--- /dev/null
@@ -0,0 +1,20 @@
+---
+upgrade:
+  - |
+    The default network for the ctlplane changed from 192.0.2.0/24 to
+    192.168.24.0/24. All references to the ctlplane network in the templates
+    have been updated to reflect this change. When upgrading from a previous
+    release, if the default network was used for the ctlplane (192.0.2.0/24),
+    then it is necessary to provide as input, via environment file, the correct
+    setting for all the parameters that previously defaulted to 192.0.2.x and
+    now default to 192.168.24.x; there is an environment file which could be
+    used on upgrade `environments/updates/update-from-192_0_2-subnet.yaml` to
+    cover a simple scenario but it won't be enough for scenarios using an
+    external load balancer, Contrail or Cisto N1KV. Follows a list of params to
+    be provided on upgrade.
+    From contrail-net.yaml: EC2MetadataIp, ControlPlaneDefaultRoute
+    From external-loadbalancer-vip-v6.yaml: ControlFixedIPs
+    From external-loadbalancer-vip.yaml: ControlFixedIPs
+    From network-environment.yaml: EC2MetadataIp, ControlPlaneDefaultRoute
+    From neutron-ml2-cisco-n1kv.yaml: N1000vVSMIP, N1000vMgmtGatewayIP
+    From contrail-vrouter.yaml: ContrailVrouterGateway
diff --git a/releasenotes/notes/restrict-access-to-kernel-message-buffer-809160674b92a073.yaml b/releasenotes/notes/restrict-access-to-kernel-message-buffer-809160674b92a073.yaml
new file mode 100644 (file)
index 0000000..c24e892
--- /dev/null
@@ -0,0 +1,11 @@
+---
+upgrade:
+  - |
+    The kernel.dmesg_restrict is now set to 1 to prevent exposure of sensitive
+    kernel address information with unprivileged access. Deployments that set
+    or depend on values other than 1 for kernel.dmesg_restrict may be affected
+    by upgrading.
+security:
+  - |
+    Kernel syslog contains sensitive kernel address information, setting
+    kernel.dmesg_restrict to avoid unprivileged access to this information.
diff --git a/releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml b/releasenotes/notes/restrict-mongodb-memory-de7bf6754d7234d9.yaml
new file mode 100644 (file)
index 0000000..86622bc
--- /dev/null
@@ -0,0 +1,3 @@
+---
+fixes:
+  - Add knobs to limit memory comsumed by mongodb with systemd
diff --git a/releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml b/releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml
new file mode 100644 (file)
index 0000000..dadbfa4
--- /dev/null
@@ -0,0 +1,18 @@
+---
+features:
+  - |
+    Adds tags to roles that allow an operator to specify custom tags to use
+    when trying to find functionality available from a role. Currently a role
+    with both the 'primary' and 'controller' tag is consider to be the primary
+    role.  Historically the role named 'Controller' was the 'primary' role and
+    this primary designation is used to determine items like memcache ip
+    addresses. If no roles have the both the 'primary' and 'controller' tags,
+    the first role specified in the roles_data.yaml is used as the primary
+    role.
+upgrade:
+  - |
+    If using custom roles data, the logic was changed to leverage the first
+    role listed in the roles_data.yaml file to be the primary role. This can
+    be worked around by adding the 'primary' and 'controller' tags to the
+    custom controller role in your roles_data.yaml to ensure that the defined
+    custom controller role is still considered the primary role.
diff --git a/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml b/releasenotes/notes/sahara_auth_v3-65bd276b39b4e284.yaml
new file mode 100644 (file)
index 0000000..c744e0f
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - Sahara is now deployed with keystone_authtoken parameters and move
+    forward with Keystone v3 version.
diff --git a/releasenotes/notes/service-role-name-0b8609d314564885.yaml b/releasenotes/notes/service-role-name-0b8609d314564885.yaml
new file mode 100644 (file)
index 0000000..6c73808
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - Role specific informations are added to the service template to enable
+    role specific decisions on the service.
diff --git a/releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml b/releasenotes/notes/set-ceilometer-auth-flag-382f68ddb2cbcb6b.yaml
new file mode 100644 (file)
index 0000000..07407f2
--- /dev/null
@@ -0,0 +1,5 @@
+---
+fixes:
+  - We need ceilometer user in cases where ceilometer API is disabled.
+    This is to ensure other ceilometer services can still authenticate
+    with keystone.
diff --git a/releasenotes/notes/snmp_listen-2364188f73d43b14.yaml b/releasenotes/notes/snmp_listen-2364188f73d43b14.yaml
new file mode 100644 (file)
index 0000000..7cff9ee
--- /dev/null
@@ -0,0 +1,7 @@
+---
+features:
+  - |
+    Adding a new parameter to SNMP profile, SnmpdBindHost
+    so users can change the binding addresses on SNMP daemon.
+    The parameter is an array and takes the default value that
+    were previously hardcoded in puppet-tripleo.
diff --git a/releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml b/releasenotes/notes/sriov-pci-passthrough-8f28719b889bdaf7.yaml
new file mode 100644 (file)
index 0000000..20146b0
--- /dev/null
@@ -0,0 +1,4 @@
+---
+fixes:
+  - The ``pci_passthrough`` hiera value should be passed as a string
+    (`bug 1675036 <https://bugs.launchpad.net/tripleo/+bug/1675036>`__).
diff --git a/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml b/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml
new file mode 100644 (file)
index 0000000..8b533b1
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - SSH host key exchange. The ssh host keys are collected from each host,
+    combined, and written to /etc/ssh/ssh_known_hosts.
diff --git a/releasenotes/notes/sshd-service-extensions-0c4d0879942a2052.yaml b/releasenotes/notes/sshd-service-extensions-0c4d0879942a2052.yaml
new file mode 100644 (file)
index 0000000..4cc01df
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Added ability to manage MOTD Banner
+    Enabled SSHD composible service by default. Puppet-ssh manages the sshd config.
diff --git a/releasenotes/notes/stack-name-input-73f4d4d052f1377e.yaml b/releasenotes/notes/stack-name-input-73f4d4d052f1377e.yaml
new file mode 100644 (file)
index 0000000..2ccbee9
--- /dev/null
@@ -0,0 +1,5 @@
+---
+fixes:
+  - The stack name can now be overridden in the get-occ-config.sh script
+    for deployed-server's by setting the $STACK_NAME variable in the
+    environment.
diff --git a/releasenotes/notes/swap-prepuppet-and-postpuppet-to-preconfig-and-postconfig-debd5f28bc578d51.yaml b/releasenotes/notes/swap-prepuppet-and-postpuppet-to-preconfig-and-postconfig-debd5f28bc578d51.yaml
new file mode 100644 (file)
index 0000000..875b704
--- /dev/null
@@ -0,0 +1,6 @@
+---
+fixes:
+  - This commit merges both [Pre|Post]Puppet and [Pre|Post]Config
+    resources, giving an agnostic name for the configuration
+    steps. The [Pre|Post]Puppet resource is removed and should not
+    be used anymore.
diff --git a/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml b/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml
new file mode 100644 (file)
index 0000000..70051f6
--- /dev/null
@@ -0,0 +1,7 @@
+---
+fixes:
+  - The token flush cron job has been modified to run hourly instead of once
+    a day. This is because this was causing issues with larger deployments, as
+    the operation would take too long and sometimes even fail because of the
+    transaction being so large. Note that this only affects people using the
+    UUID token provider.
diff --git a/releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml b/releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml
new file mode 100644 (file)
index 0000000..ad1f39c
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Adds a new boolean parameter for RHEL Registration called
+    'UpdateOnRHELRegistration' that when enabled will trigger a yum update
+    on the node after the registration process completes.
diff --git a/releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml b/releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml
new file mode 100644 (file)
index 0000000..29d32cb
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add name and description fields to plan-environment.yaml
diff --git a/releasenotes/notes/upgrade-stack-action-94598796a9d3511f.yaml b/releasenotes/notes/upgrade-stack-action-94598796a9d3511f.yaml
new file mode 100644 (file)
index 0000000..bdff0e6
--- /dev/null
@@ -0,0 +1,9 @@
+---
+upgrade:
+  - |
+    The new StackUpdateType parameter is now set to UPGRADE
+    when a major version upgrade is in progress.  This enables application
+    configuration via puppet to distinuish a major version upgrade from a
+    normal stack update (e.g for minor updates or reconfiguration) by
+    inspecting the stack_update_type hiera value.  In future other values may be added to
+    flag e.g minor updates vs reconfiguration, but for now only UPGRADE is considered.
diff --git a/releasenotes/notes/vpp-ml2-8e115f7763510531.yaml b/releasenotes/notes/vpp-ml2-8e115f7763510531.yaml
new file mode 100644 (file)
index 0000000..2f8ae14
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Adds support for networking-vpp ML2 mechanism driver and agent.
diff --git a/releasenotes/notes/zaqar-httpd-e7d91bf396da28d0.yaml b/releasenotes/notes/zaqar-httpd-e7d91bf396da28d0.yaml
new file mode 100644 (file)
index 0000000..a2172aa
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Run the Zaqar WSGI service over httpd in Puppet.
index 8da995b..ec158ce 100644 (file)
@@ -52,9 +52,9 @@ copyright = u'2017, TripleO Developers'
 # built documents.
 #
 # The full version, including alpha/beta/rc tags.
-release = '6.0.0.0b3'
+release = '7.0.0.0b1'
 # The short X.Y version.
-version = '6.0.0'
+version = '7.0.0'
 
 # The full version, including alpha/beta/rc tags.
 
index 057aa28..4a9b725 100644 (file)
@@ -1,6 +1,6 @@
 # The order of packages is significant, because pip processes them in the order
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
-pbr>=1.8 # Apache-2.0
+pbr!=2.1.0,>=2.0.0 # Apache-2.0
 Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
 six>=1.9.0 # MIT
index 95b25d9..3e0ef75 100644 (file)
 # ServicesDefault: (list) optional default list of services to be deployed
 # on the role, defaults to an empty list. Sets the default for the
 # {{role.name}}Services parameter in overcloud.yaml
-
-- name: Controller # the 'primary' role goes first
+#
+# tags: (list) list of tags used by other parts of the deployment process to
+# find the role for a specific type of functionality. Currently a role
+# with both 'primary' and 'controller' is used as the primary role for the
+# deployment process. If no roles have have 'primary' and 'controller', the
+# first role in this file is used as the primary role.
+#
+- name: Controller
   CountDefault: 1
+  tags:
+    - primary
+    - controller
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephMds
     - OS::TripleO::Services::CephMon
     - OS::TripleO::Services::CephExternal
     - OS::TripleO::Services::CinderBackup
     - OS::TripleO::Services::CinderScheduler
     - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::CinderBackendDellPs
+    - OS::TripleO::Services::CinderBackendDellSc
+    - OS::TripleO::Services::CinderBackendNetApp
+    - OS::TripleO::Services::CinderBackendScaleIO
     - OS::TripleO::Services::Congress
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::HeatEngine
     - OS::TripleO::Services::MySQL
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronBgpVpnApi
     - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL2gwApi
     - OS::TripleO::Services::NeutronL3Agent
     - OS::TripleO::Services::NeutronMetadataAgent
     - OS::TripleO::Services::NeutronApi
     - OS::TripleO::Services::NeutronCorePlugin
     - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::NeutronL2gwAgent
     - OS::TripleO::Services::RabbitMQ
     - OS::TripleO::Services::HAproxy
     - OS::TripleO::Services::Keepalived
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::ExternalSwiftProxy
     - OS::TripleO::Services::SwiftStorage
     - OS::TripleO::Services::SwiftRingBuilder
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::Timezone
-    - OS::TripleO::Services::CeilometerApi
-    - OS::TripleO::Services::CeilometerCollector
-    - OS::TripleO::Services::CeilometerExpirer
     - OS::TripleO::Services::CeilometerAgentCentral
     - OS::TripleO::Services::CeilometerAgentNotification
     - OS::TripleO::Services::Horizon
     - OS::TripleO::Services::OctaviaHousekeeping
     - OS::TripleO::Services::OctaviaWorker
     - OS::TripleO::Services::Vpp
+    - OS::TripleO::Services::NeutronVppAgent
+    - OS::TripleO::Services::Docker
 
 - name: Compute
   CountDefault: 1
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephClient
     - OS::TripleO::Services::CephExternal
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::NovaCompute
     - OS::TripleO::Services::NovaLibvirt
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Vpp
+    - OS::TripleO::Services::NeutronVppAgent
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Docker
 
 - name: BlockStorage
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::BlockStorageCinderVolume
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::SensuClient
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Docker
 
 - name: ObjectStorage
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::SwiftStorage
     - OS::TripleO::Services::SwiftRingBuilder
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Docker
 
 - name: CephStorage
   ServicesDefault:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::CertmongerUser
     - OS::TripleO::Services::CephOSD
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::Snmp
     - OS::TripleO::Services::Sshd
+    - OS::TripleO::Services::Securetty
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Docker
index 5070ef3..d57c8fc 100644 (file)
@@ -1,6 +1,9 @@
-- name: Undercloud # the 'primary' role goes first
+- name: Undercloud
   CountDefault: 1
   disable_constraints: True
+  tags:
+    - primary
+    - controller
   ServicesDefault:
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::MySQL
     - OS::TripleO::Services::NeutronCorePlugin
     - OS::TripleO::Services::NeutronOvsAgent
     - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::UndercloudAodhApi
+    - OS::TripleO::Services::UndercloudAodhEvaluator
+    - OS::TripleO::Services::UndercloudAodhNotifier
+    - OS::TripleO::Services::UndercloudAodhListener
+    - OS::TripleO::Services::UndercloudGnocchiApi
+    - OS::TripleO::Services::UndercloudGnocchiMetricd
+    - OS::TripleO::Services::UndercloudGnocchiStatsd
+    - OS::TripleO::Services::UndercloudPankoApi
+    - OS::TripleO::Services::UndercloudCeilometerAgentCentral
+    - OS::TripleO::Services::UndercloudCeilometerAgentNotification
index f456b31..b3109a0 100755 (executable)
@@ -13,14 +13,16 @@ write_entries() {
 
     if grep -q "^# HEAT_HOSTS_START" "$file"; then
         temp=$(mktemp)
-        awk -v v="$entries" '/^# HEAT_HOSTS_START/ {
-            print $0
-            print v
-            f=1
-            }f &&!/^# HEAT_HOSTS_END$/{next}/^# HEAT_HOSTS_END$/{f=0}!f' "$file" > "$temp"
-            echo "INFO: Updating hosts file $file, check below for changes"
-            diff "$file" "$temp" || true
-            cat "$temp" > "$file"
+        (
+        sed '/^# HEAT_HOSTS_START/,$d' "$file"
+        echo -ne "\n# HEAT_HOSTS_START - Do not edit manually within this section!\n"
+        echo "$entries"
+        echo -ne "# HEAT_HOSTS_END\n\n"
+        sed '1,/^# HEAT_HOSTS_END/d' "$file"
+        ) > "$temp"
+        echo "INFO: Updating hosts file $file, check below for changes"
+        diff "$file" "$temp" || true
+        cat "$temp" > "$file"
     else
         echo -ne "\n# HEAT_HOSTS_START - Do not edit manually within this section!\n" >> "$file"
         echo "$entries" >> "$file"
index 782bb21..566d844 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -25,5 +25,5 @@ except ImportError:
     pass
 
 setuptools.setup(
-    setup_requires=['pbr>=1.8'],
+    setup_requires=['pbr>=2.0.0'],
     pbr=True)
index 1c8c4ba..69ed96a 100755 (executable)
@@ -32,6 +32,9 @@ def parse_opts(argv):
     parser.add_argument('-r', '--roles-data', metavar='ROLES_DATA',
                         help="""relative path to the roles_data.yaml file.""",
                         default='roles_data.yaml')
+    parser.add_argument('-n', '--network-data', metavar='NETWORK_DATA',
+                        help="""relative path to the network_data.yaml file.""",
+                        default='network_data.yaml')
     parser.add_argument('--safe',
                         action='store_true',
                         help="""Enable safe mode (do not overwrite files).""",
@@ -71,11 +74,15 @@ def _j2_render_to_file(j2_template, j2_data, outfile_name=None,
         out_f.write(r_template)
 
 
-def process_templates(template_path, role_data_path, output_dir, overwrite):
+def process_templates(template_path, role_data_path, output_dir,
+                      network_data_path, overwrite):
 
     with open(role_data_path) as role_data_file:
         role_data = yaml.safe_load(role_data_file)
 
+    with open(network_data_path) as network_data_file:
+        network_data = yaml.safe_load(network_data_file)
+
     j2_excludes_path = os.path.join(template_path, 'j2_excludes.yaml')
     with open(j2_excludes_path) as role_data_file:
         j2_excludes = yaml.safe_load(role_data_file)
@@ -150,7 +157,8 @@ def process_templates(template_path, role_data_path, output_dir, overwrite):
                     print("jinja2 rendering normal template %s" % f)
                     with open(file_path) as j2_template:
                         template_data = j2_template.read()
-                        j2_data = {'roles': role_data}
+                        j2_data = {'roles': role_data,
+                                   'networks': network_data}
                         out_f = os.path.basename(f).replace('.j2.yaml', '.yaml')
                         out_f_path = os.path.join(out_dir, out_f)
                         _j2_render_to_file(template_data, j2_data, out_f_path,
@@ -164,5 +172,7 @@ def process_templates(template_path, role_data_path, output_dir, overwrite):
 opts = parse_opts(sys.argv)
 
 role_data_path = os.path.join(opts.base_path, opts.roles_data)
+network_data_path = os.path.join(opts.base_path, opts.network_data)
 
-process_templates(opts.base_path, role_data_path, opts.output_dir, (not opts.safe))
+process_templates(opts.base_path, role_data_path, opts.output_dir,
+                  network_data_path, (not opts.safe))
index b8f07e4..cdda108 100755 (executable)
@@ -157,7 +157,7 @@ def convert(filename):
         print("Error couldn't find run-os-net-config.sh relative to filename")
         exit_usage()
 
-    for r in six.iteritems(tpl.get('resources', {})):
+    for r in (tpl.get('resources', {})).items():
         if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
             r[1].get('properties', {}).get('group') == 'os-apply-config' and
             r[1].get('properties', {}).get('config', {}).get('os_net_config')):
index 32987cb..92d76d2 100755 (executable)
@@ -17,12 +17,22 @@ import traceback
 import yaml
 
 
-required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords']
+required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
+                   'RoleName', 'RoleParameters']
 
 envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
                                 'tls-endpoints-public-ip.yaml',
                                 'tls-everywhere-endpoints-dns.yaml']
 ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
+REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
+                            'config_settings', 'step_config']
+OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
+                            'service_config_settings', 'host_prep_tasks',
+                            'metadata_settings', 'kolla_config']
+REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
+                                          'config_image']
+OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
+
 
 def exit_usage():
     print('Usage %s <yaml file or directory>' % sys.argv[0])
@@ -69,6 +79,7 @@ def validate_hci_compute_services_default(env_filename, env_tpl):
                 return 1
     return 0
 
+
 def validate_mysql_connection(settings):
     no_op = lambda *args: False
     error_status = [0]
@@ -109,6 +120,58 @@ def validate_mysql_connection(settings):
     return error_status[0]
 
 
+def validate_docker_service(filename, tpl):
+    if 'outputs' in tpl and 'role_data' in tpl['outputs']:
+        if 'value' not in tpl['outputs']['role_data']:
+            print('ERROR: invalid role_data for filename: %s'
+                  % filename)
+            return 1
+        role_data = tpl['outputs']['role_data']['value']
+
+        for section_name in REQUIRED_DOCKER_SECTIONS:
+            if section_name not in role_data:
+                print('ERROR: %s is required in role_data for %s.'
+                      % (section_name, filename))
+                return 1
+
+        for section_name in role_data.keys():
+            if section_name in REQUIRED_DOCKER_SECTIONS:
+                continue
+            else:
+                if section_name in OPTIONAL_DOCKER_SECTIONS:
+                    continue
+                else:
+                    print('ERROR: %s is extra in role_data for %s.'
+                          % (section_name, filename))
+                    return 1
+
+        if 'puppet_config' in role_data:
+            puppet_config = role_data['puppet_config']
+            for key in puppet_config:
+                if key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
+                    continue
+                else:
+                    if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS:
+                        continue
+                    else:
+                      print('ERROR: %s should not be in puppet_config section.'
+                            % key)
+                      return 1
+            for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
+              if key not in puppet_config:
+                  print('ERROR: %s is required in puppet_config for %s.'
+                        % (key, filename))
+                  return 1
+
+    if 'parameters' in tpl:
+        for param in required_params:
+            if param not in tpl['parameters']:
+                print('ERROR: parameter %s is required for %s.'
+                      % (param, filename))
+                return 1
+    return 0
+
+
 def validate_service(filename, tpl):
     if 'outputs' in tpl and 'role_data' in tpl['outputs']:
         if 'value' not in tpl['outputs']['role_data']:
@@ -154,10 +217,16 @@ def validate(filename):
                   % filename)
             return 1
 
+        # qdr aliases rabbitmq service to provide alternative messaging backend
         if (filename.startswith('./puppet/services/') and
-                filename != './puppet/services/services.yaml'):
+                filename not in ['./puppet/services/services.yaml',
+                                 './puppet/services/qdr.yaml']):
             retval = validate_service(filename, tpl)
 
+        if (filename.startswith('./docker/services/') and
+                filename != './docker/services/services.yaml'):
+            retval = validate_docker_service(filename, tpl)
+
         if filename.endswith('hyperconverged-ceph.yaml'):
             retval = validate_hci_compute_services_default(filename, tpl)
 
diff --git a/tox.ini b/tox.ini
index 3796a54..b92e545 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -4,6 +4,7 @@ skipsdist = True
 
 [testenv]
 usedevelop = True
+install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
 
index 0b8b352..ed7fefb 100644 (file)
@@ -67,5 +67,53 @@ function ping_default_gateways() {
   echo "SUCCESS"
 }
 
+# Verify the FQDN from the nova/ironic deployment matches
+# FQDN in the heat templates.
+function fqdn_check() {
+  HOSTNAME=$(hostname)
+  SHORT_NAME=$(hostname -s)
+  FQDN_FROM_HOSTS=$(awk '$3 == "'${SHORT_NAME}'"{print $2}' /etc/hosts)
+  echo -n "Checking hostname vs /etc/hosts entry..."
+  if [[ $HOSTNAME != $FQDN_FROM_HOSTS ]]; then
+    echo "FAILURE"
+    echo -e "System hostname: ${HOSTNAME}\nEntry from /etc/hosts: ${FQDN_FROM_HOSTS}\n"
+    exit 1
+  fi
+  echo "SUCCESS"
+}
+
+# Verify at least one time source is available.
+function ntp_check() {
+  NTP_SERVERS=$(hiera ntp::servers nil |tr -d '[],"')
+  if [[ "$NTP_SERVERS" != "nil" ]];then
+    echo -n "Testing NTP..."
+    NTP_SUCCESS=0
+    for NTP_SERVER in $NTP_SERVERS; do
+      set +e
+      NTPDATE_OUT=$(ntpdate -qud $NTP_SERVER 2>&1)
+      NTPDATE_EXIT=$?
+      set -e
+      if [[ "$NTPDATE_EXIT" == "0" ]];then
+        NTP_SUCCESS=1
+        break
+      else
+        NTPDATE_OUT_FULL="$NTPDATE_OUT_FULL $NTPDATE_OUT"
+      fi
+    done
+    if  [[ "$NTP_SUCCESS" == "0" ]];then
+      echo "FAILURE"
+      echo "$NTPDATE_OUT_FULL"
+      exit 1
+    fi
+    echo "SUCCESS"
+  fi
+}
+
 ping_controller_ips "$ping_test_ips"
 ping_default_gateways
+if [[ $validate_fqdn == "True" ]];then
+  fqdn_check
+fi
+if [[ $validate_ntp == "True" ]];then
+  ntp_check
+fi