+================+=============+=============+=============+=============+=================+
| keystone | X | X | X | X | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
-| glance | file | swift | file | file | swift |
+| glance | rbd | swift | file | swift + rbd | swift |
+----------------+-------------+-------------+-------------+-------------+-----------------+
| cinder | rbd | iscsi | | | iscsi |
+----------------+-------------+-------------+-------------+-------------+-----------------+
+----------------+-------------+-------------+-------------+-------------+-----------------+
| neutron | ovs | ovs | ovs | ovs | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
+| neutron-bgpvpn | | | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
| rabbitmq | X | X | X | X | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
| mongodb | X | X | | | |
+----------------+-------------+-------------+-------------+-------------+-----------------+
| manila | | | | X | |
+----------------+-------------+-------------+-------------+-------------+-----------------+
+| collectd | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| fluentd | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
+| sensu-client | X | | | | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive validations that occur on all nodes.
default: ''
description: A string containing a space separated list of IP addresses used to ping test each available network interface.
type: string
+ ValidateFqdn:
+ default: false
+ description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.
+ type: boolean
+ ValidateNtp:
+ default: true
+ description: Validation to ensure at least one time source is accessible.
+ type: boolean
resources:
AllNodesValidationsImpl:
inputs:
- name: ping_test_ips
default: {get_param: PingTestIps}
+ - name: validate_fqdn
+ default: {get_param: ValidateFqdn}
+ - name: validate_ntp
+ default: {get_param: ValidateNtp}
config: {get_file: ./validation-scripts/all-nodes.sh}
outputs:
--- /dev/null
+# This is a cross-platform list tracking distribution packages needed by tests;
+# see http://docs.openstack.org/infra/bindep/ for additional information.
-heat_template_version: ocata
+heat_template_version: pike
description: 'Bootstrap Config'
parameters:
# repository for deployment using puppet. It groups configuration by topic,
# describes possible combinations of environments and resource capabilities.
-# root_template: identifies repository's root template
-# root_environment: identifies root_environment, this one is special in terms of
-# order in which the environments are merged before deploying. This one serves as
-# a base and it's parameters/resource_registry gets overridden by other environments
-# if used.
-
# topics:
# High Level grouping by purpose of environments
# Attributes:
# only when that given environment is used. (resource_type of that environment can
# be implemented using multiple templates).
-root_template: overcloud.yaml
-root_environment: overcloud-resource-registry-puppet.yaml
topics:
- title: Base Resources Configuration
description:
description: >
Enable various Neutron plugins and backends
environments:
+ - file: environments/neutron-bgpvpn.yaml
+ title: Neutron BGPVPN Service Plugin
+ description: Enables Neutron BGPVPN Service Plugin
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- file: environments/neutron-ml2-bigswitch.yaml
title: BigSwitch Extensions
description: >
description: Enable FOS in the overcloud
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-nsx.yaml
+ title: Deploy NSX Services
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-l2gw.yaml
+ title: Neutron L2 gateway Service Plugin
+ description: Enables Neutron L2 gateway Service Plugin and Agent
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Nova Extensions
description:
description: >
Enable various Cinder backends
environments:
+ - file: environments/cinder-pure-config.yaml
+ title: Cinder Pure Storage FlashArray backend
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- file: environments/cinder-netapp-config.yaml
title: Cinder NetApp backend
description:
description: Enable monitoring agents
environments:
- file: environments/monitoring-environment.yaml
- title: enable monitoring agents
+ title: Enable monitoring agents
description:
requires:
- overcloud-resource-registry-puppet.yaml
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Performance monitoring
+ description: Enable performance monitoring agents
+ environments:
+ - file: environments/collectd-environment.yaml
+ title: Enable performance monitoring agents
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- title: Security Options
description: Security Hardening Options
description:
requires:
- overcloud-resource-registry-puppet.yaml
+ - title: Keystone CADF auditing
+ description: Enable CADF notifications in Keystone for auditing
+ environments:
+ - file: environments/cadf.yaml
+ title: Keystone CADF auditing
+ - title: SecureTTY Values
+ description: Set values within /etc/securetty
+ environments:
+ - file: environments/securetty.yaml
+ title: SecureTTY Values
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Sshd
- name: Controller
CountDefault: 1
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderVolume
- - OS::TripleO::Services::Core
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
--- /dev/null
+# NOTE: This is an environment specific for containers upgrade
+# CI. Mainly we deploy non-pacemakerized overcloud, as at the time
+# being containerization of services managed by pacemaker is not
+# complete, so we deploy and upgrade the non-HA services for now.
+
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+
+ # NOTE: This is needed because of upgrades from Ocata to Pike. We
+ # deploy the initial environment with Ocata templates, and
+ # overcloud-resource-registry.yaml there doesn't have this Docker
+ # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
+ # remove this.
+ OS::TripleO::Services::Docker: OS::Heat::None
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Docker
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Sshd
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ # Required for Centos 7.3 and Qemu 2.6.0
+ nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ #NOTE(gfidente): not great but we need this to deploy on ext4
+ #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+ ceph::profile::params::osd_max_object_name_len: 256
+ ceph::profile::params::osd_max_object_namespace_len: 64
+ SwiftCeilometerPipelineEnabled: False
+ Debug: True
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Core Service
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+ OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
+ OS::TripleO::Services::SensuClient: ../../puppet/services/monitoring/sensu-client.yaml
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier
- OS::TripleO::Services::AodhListener
- - OS::TripleO::Services::CeilometerApi
- - OS::TripleO::Services::CeilometerCollector
- - OS::TripleO::Services::CeilometerExpirer
- OS::TripleO::Services::CeilometerAgentCentral
+ - OS::TripleO::Services::CeilometerAgentIpmi
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::Congress
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::SensuClient
+
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
******************************************************************
CollectdExtraPlugins:
- rrdtool
+ LoggingServers:
+ - host: 127.0.0.1
+ port: 24224
+ MonitoringRabbitHost: 127.0.0.1
+ MonitoringRabbitPort: 5676
+ MonitoringRabbitPassword: sensu
+ TtyValues:
+ - console
+ - tty1
+ - tty2
+ - tty3
+ - tty4
+ - tty5
+ - tty6
OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::MistralExecutor
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
OS::TripleO::Services::ManilaShare: ../../puppet/services/pacemaker/manila-share.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+ OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
# These enable Pacemaker
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronBgpVpnApi
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
SwiftCeilometerPipelineEnabled: false
+ NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+ BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
-heat_template_version: ocata
+heat_template_version: pike
description: >
HOT template to created resources deployed by scenario001.
ram: 512
vcpus: 1
-# Disabling this resource now
-# https://bugs.launchpad.net/tripleo/+bug/1646506
-# gnocchi_res_alarm:
-# type: OS::Aodh::GnocchiResourcesAlarm
-# properties:
-# description: Do stuff with gnocchi
-# metric: cpu_util
-# aggregation_method: mean
-# granularity: 60
-# evaluation_periods: 1
-# threshold: 50
-# alarm_actions: []
-# resource_type: instance
-# resource_id: { get_resource: server1 }
-# comparison_operator: gt
+ gnocchi_res_alarm:
+ type: OS::Aodh::GnocchiResourcesAlarm
+ properties:
+ description: Do stuff with gnocchi
+ metric: cpu_util
+ aggregation_method: mean
+ granularity: 60
+ evaluation_periods: 1
+ threshold: 50
+ alarm_actions: []
+ resource_type: instance
+ resource_id: { get_resource: server1 }
+ comparison_operator: gt
asg:
type: OS::Heat::AutoScalingGroup
-heat_template_version: ocata
+heat_template_version: pike
description: >
HOT template to created resources deployed by scenario002.
-heat_template_version: ocata
+heat_template_version: pike
description: >
HOT template to created resources deployed by scenario003.
-heat_template_version: ocata
+heat_template_version: pike
description: >
HOT template to created resources deployed by scenario004.
properties:
name: default
driver_handles_share_servers: false
+ snapshot_support: false
manila_share:
type: OS::Manila::Share
properties:
+ name: pingtest
+ share_type: { get_resource: manila_share_type }
share_protocol: CEPHFS
size: 1
-heat_template_version: ocata
+heat_template_version: pike
description: >
This template resides in tripleo-ci for Mitaka CI jobs only.
-heat_template_version: ocata
+heat_template_version: pike
description: Passwords we manage at the top level
parameter_defaults:
ControlPlaneDefaultRoute: 192.168.122.130
ControlPlaneSubnetCidr: "24"
- EC2MetadataIp: "192.0.2.1"
+ EC2MetadataIp: "192.168.24.1"
In this example, 192.168.122.130 is the external management IP of an
undercloud, thus it is the default route for the configured local_ip value of
-192.0.2.1.
+192.168.24.1.
os-collect-config
-heat_template_version: ocata
+heat_template_version: pike
parameters:
network:
-heat_template_version: ocata
+heat_template_version: pike
description: "
A fake OS::Neutron::Port stack which outputs fixed_ips and subnets based on
openstack-puppet-modules \
os-net-config \
openvswitch \
- python-heat-agent*
+ python-heat-agent* \
+ openstack-selinux
ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config
+
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
-heat_template_version: ocata
+heat_template_version: pike
description: 'Deployed Server Bootstrap Config'
openstack-puppet-modules \
os-net-config \
openvswitch \
- python-heat-agent*
+ python-heat-agent* \
+ openstack-selinux
ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
+
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/iptables
+echo '# empty ruleset created by deployed-server bootstrap' > /etc/sysconfig/ip6tables
-heat_template_version: ocata
+heat_template_version: pike
description: 'Deployed Server Bootstrap Config'
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::CephRgw
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
disable_constraints: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
-heat_template_version: ocata
+heat_template_version: pike
parameters:
image:
type: string
InstanceIdDeployment:
type: OS::Heat::StructuredDeployment
properties:
+ name: InstanceIdDeployment
config: {get_resource: InstanceIdConfig}
server: {get_resource: deployed-server}
depends_on: UpgradeInitDeployment
HostsEntryDeployment:
type: OS::Heat::SoftwareDeployment
properties:
+ name: HostsEntryDeployment
config: {get_resource: HostsEntryConfig}
server: {get_resource: deployed-server}
SUBNODES_SSH_KEY=${SUBNODES_SSH_KEY:-"~/.ssh/id_rsa"}
SSH_OPTIONS="-tt -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=Verbose -o PasswordAuthentication=no -o ConnectionAttempts=32"
OVERCLOUD_ROLES=${OVERCLOUD_ROLES:-"Controller Compute BlockStorage ObjectStorage CephStorage"}
+STACK_NAME=${STACK_NAME:-"overcloud"}
# Set the _hosts vars for the default roles based on the old var names that
# were all caps for backwards compatibility.
for role in $OVERCLOUD_ROLES; do
- while ! check_stack overcloud; do
+ while ! check_stack $STACK_NAME; do
sleep $SLEEP_TIME
done
- rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
+ rg_stack=$(openstack stack resource show $STACK_NAME $role -c physical_resource_id -f value)
while ! check_stack $rg_stack; do
sleep $SLEEP_TIME
- rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
+ rg_stack=$(openstack stack resource show $STACK_NAME $role -c physical_resource_id -f value)
done
- stacks=$(openstack stack resource list $rg_stack -c physical_resource_id -f value)
+ stacks=$(openstack stack resource list $rg_stack -c resource_name -c physical_resource_id -f json | jq -r "sort_by(.resource_name) | .[] | .physical_resource_id")
i=0
done
echo "======================"
- echo "$role$i os-collect-config.conf configuration:"
+ echo "$role$i deployed-server.json configuration:"
- config="
-[DEFAULT]
-collectors=request
-command=os-refresh-config
-polling_interval=30
-
-[request]
-metadata_url=$deployed_server_metadata_url"
+ config="{
+ \"os-collect-config\": {
+ \"collectors\": [\"request\", \"local\"],
+ \"request\": {
+ \"metadata_url\": \"$deployed_server_metadata_url\"
+ }
+ }
+}"
echo "$config"
echo "======================"
host=
eval host=\${${role}_hosts_a[i]}
if [ -n "$host" ]; then
- # Delete the os-collect-config.conf template so our file won't get
- # overwritten
- ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo /bin/rm -f /usr/libexec/os-apply-config/templates/etc/os-collect-config.conf
- ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host "echo \"$config\" > os-collect-config.conf"
- ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo cp os-collect-config.conf /etc/os-collect-config.conf
- ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl restart os-collect-config
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host "echo '$config' > deployed-server.json"
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo mkdir -p -m 0700 /var/lib/os-collect-config/local-data/ || true
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo cp deployed-server.json /var/lib/os-collect-config/local-data/deployed-server.json
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl start os-collect-config
+ ssh $SSH_OPTIONS -i $SUBNODES_SSH_KEY $host sudo systemctl enable os-collect-config
fi
let i+=1
# inside of a container.
import json
+import logging
import os
import subprocess
import sys
import tempfile
import multiprocessing
+log = logging.getLogger()
+log.setLevel(logging.DEBUG)
+ch = logging.StreamHandler(sys.stdout)
+ch.setLevel(logging.DEBUG)
+formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
+ch.setFormatter(formatter)
+log.addHandler(ch)
# this is to match what we do in deployed-server
def short_hostname():
def pull_image(name):
- print('Pulling image: %s' % name)
+ log.info('Pulling image: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
- print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr:
+ log.debug(cmd_stderr)
def rm_container(name):
if os.environ.get('SHOW_DIFF', None):
- print('Diffing container: %s' % name)
+ log.info('Diffing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
- print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr:
+ log.debug(cmd_stderr)
- print('Removing container: %s' % name)
+ log.info('Removing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
- print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr and \
+ cmd_stderr != 'Error response from daemon: ' \
+ 'No such container: {}\n'.format(name):
+ log.debug(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
+log.info('Running docker-puppet')
config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
-print('docker-puppet')
-print('CONFIG: %s' % config_file)
+log.debug('CONFIG: %s' % config_file)
with open(config_file) as f:
json_data = json.load(f)
if not manifest or not config_image:
continue
- print('---------')
- print('config_volume %s' % config_volume)
- print('puppet_tags %s' % puppet_tags)
- print('manifest %s' % manifest)
- print('config_image %s' % config_image)
- print('volumes %s' % volumes)
+ log.debug('config_volume %s' % config_volume)
+ log.debug('puppet_tags %s' % puppet_tags)
+ log.debug('manifest %s' % manifest)
+ log.debug('config_image %s' % config_image)
+ log.debug('volumes %s' % volumes)
# We key off of config volume for all configs.
if config_volume in configs:
# Append puppet tags and manifest.
- print("Existing service, appending puppet tags and manifest\n")
+ log.info("Existing service, appending puppet tags and manifest")
if puppet_tags:
configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
puppet_tags)
configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
manifest)
if configs[config_volume][3] != config_image:
- print("WARNING: Config containers do not match even though"
- " shared volumes are the same!\n")
+ log.warn("Config containers do not match even though"
+ " shared volumes are the same!")
else:
- print("Adding new service\n")
+ log.info("Adding new service")
configs[config_volume] = service
-print('Service compilation completed.\n')
+log.info('Service compilation completed.')
def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
- print('---------')
- print('config_volume %s' % config_volume)
- print('puppet_tags %s' % puppet_tags)
- print('manifest %s' % manifest)
- print('config_image %s' % config_image)
- print('volumes %s' % volumes)
- hostname = short_hostname()
- sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume
+ log.debug('config_volume %s' % config_volume)
+ log.debug('puppet_tags %s' % puppet_tags)
+ log.debug('manifest %s' % manifest)
+ log.debug('config_image %s' % config_image)
+ log.debug('volumes %s' % volumes)
+ sh_script = '/var/lib/docker-puppet/docker-puppet.sh'
with open(sh_script, 'w') as script_file:
os.chmod(script_file.name, 0755)
mkdir -p /etc/puppet
cp -a /tmp/puppet-etc/* /etc/puppet
rm -Rf /etc/puppet/ssl # not in use and causes permission errors
- echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json
+ echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker.json
TAGS=""
- if [ -n "%(puppet_tags)s" ]; then
- TAGS='--tags "%(puppet_tags)s"'
+ if [ -n "$PUPPET_TAGS" ]; then
+ TAGS="--tags \"$PUPPET_TAGS\""
fi
- FACTER_hostname=%(hostname)s FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
+ FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
# Disables archiving
- if [ -z "%(no_archive)s" ]; then
- rm -Rf /var/lib/config-data/%(name)s
+ if [ -z "$NO_ARCHIVE" ]; then
+ rm -Rf /var/lib/config-data/${NAME}
# copying etc should be enough for most services
- mkdir -p /var/lib/config-data/%(name)s/etc
- cp -a /etc/* /var/lib/config-data/%(name)s/etc/
+ mkdir -p /var/lib/config-data/${NAME}/etc
+ cp -a /etc/* /var/lib/config-data/${NAME}/etc/
if [ -d /root/ ]; then
- cp -a /root/ /var/lib/config-data/%(name)s/root/
+ cp -a /root/ /var/lib/config-data/${NAME}/root/
fi
if [ -d /var/lib/ironic/tftpboot/ ]; then
- mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
- cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/tftpboot/
+ mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
+ cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/tftpboot/
fi
if [ -d /var/lib/ironic/httpboot/ ]; then
- mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
- cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/httpboot/
+ mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
+ cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/httpboot/
fi
# apache services may files placed in /var/www/
if [ -d /var/www/ ]; then
- mkdir -p /var/lib/config-data/%(name)s/var/www
- cp -a /var/www/* /var/lib/config-data/%(name)s/var/www/
+ mkdir -p /var/lib/config-data/${NAME}/var/www
+ cp -a /var/www/* /var/lib/config-data/${NAME}/var/www/
fi
fi
- """ % {'puppet_tags': puppet_tags, 'name': config_volume,
- 'hostname': hostname,
- 'no_archive': os.environ.get('NO_ARCHIVE', ''),
- 'step': os.environ.get('STEP', '6')})
+ """)
with tempfile.NamedTemporaryFile() as tmp_man:
with open(tmp_man.name, 'w') as man_file:
dcmd = ['/usr/bin/docker', 'run',
'--user', 'root',
'--name', 'docker-puppet-%s' % config_volume,
+ '--env', 'PUPPET_TAGS=%s' % puppet_tags,
+ '--env', 'NAME=%s' % config_volume,
+ '--env', 'HOSTNAME=%s' % short_hostname(),
+ '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
+ '--env', 'STEP=%s' % os.environ.get('STEP', '6'),
'--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
'--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
'--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
'--volume', 'tripleo_logs:/var/log/tripleo/',
+ # OpenSSL trusted CA injection
+ '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
+ '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',
+ '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
+ '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
+ # script injection
'--volume', '%s:%s:rw' % (sh_script, sh_script) ]
for volume in volumes:
- dcmd.extend(['--volume', volume])
+ if volume:
+ dcmd.extend(['--volume', volume])
dcmd.extend(['--entrypoint', sh_script])
env = {}
+ # NOTE(flaper87): Always copy the DOCKER_* environment variables as
+ # they contain the access data for the docker daemon.
+ for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
+ env[k] = os.environ.get(k)
+
if os.environ.get('NET_HOST', 'false') == 'true':
- print('NET_HOST enabled')
+ log.debug('NET_HOST enabled')
dcmd.extend(['--net', 'host', '--volume',
'/etc/hosts:/etc/hosts:ro'])
dcmd.append(config_image)
+ log.debug('Running docker command: %s' % ' '.join(dcmd))
subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
cmd_stdout, cmd_stderr = subproc.communicate()
- print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr:
+ log.debug(cmd_stderr)
if subproc.returncode != 0:
- print('Failed running docker-puppet.py for %s' % config_volume)
- rm_container('docker-puppet-%s' % config_volume)
+ log.error('Failed running docker-puppet.py for %s' % config_volume)
+ else:
+ # only delete successful runs, for debugging
+ rm_container('docker-puppet-%s' % config_volume)
return subproc.returncode
# Holds all the information for each process to consume.
volumes = service[4] if len(service) > 4 else []
if puppet_tags:
- puppet_tags = "file,file_line,concat,%s" % puppet_tags
+ puppet_tags = "file,file_line,concat,augeas,%s" % puppet_tags
else:
- puppet_tags = "file,file_line,concat"
+ puppet_tags = "file,file_line,concat,augeas"
process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
for p in process_map:
- print '--\n%s' % p
+ log.debug('- %s' % p)
# Fire off processes to perform each configuration. Defaults
# to the number of CPUs on the system.
p = multiprocessing.Pool(process_count)
-p.map(mp_puppet_config, process_map)
+returncodes = list(p.map(mp_puppet_config, process_map))
+config_volumes = [pm[0] for pm in process_map]
+success = True
+for returncode, config_volume in zip(returncodes, config_volumes):
+ if returncode != 0:
+ log.error('ERROR configuring %s' % config_volume)
+ success = False
+
+if not success:
+ sys.exit(1)
--- /dev/null
+# certain initialization steps (run in a container) will occur
+# on the role marked as primary controller or the first role listed
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
+{% set deploy_steps_max = 6 -%}
+
+heat_template_version: pike
+
+description: >
+ Post-deploy configuration steps via puppet for all roles,
+ as defined in ../roles_data.yaml
+
+parameters:
+ servers:
+ type: json
+ description: Mapping of Role name e.g Controller to a list of servers
+ role_data:
+ type: json
+ description: Mapping of Role name e.g Controller to the per-role data
+ DeployIdentifier:
+ default: ''
+ type: string
+ description: >
+ Setting this to a unique value will re-run any deployment tasks which
+ perform configuration on a Heat stack-update.
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+ # These utility tasks use docker-puppet.py to execute tasks via puppet
+ # We only execute these on the first node in the primary role
+ {{primary_role_name}}DockerPuppetTasks:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ yaql:
+ expression:
+ $.data.default_tasks + dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
+ data:
+ docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
+ default_tasks:
+{%- for step in range(1, deploy_steps_max) %}
+ step_{{step}}: {}
+{%- endfor %}
+
+# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
+{% for step in range(1, deploy_steps_max) %}
+
+ {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
+ {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
+
+ {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ server: {get_param: [servers, {{primary_role_name}}, '0']}
+ config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
+
+ {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: docker-puppet.py}
+ inputs:
+ - name: CONFIG
+ - name: NET_HOST
+ - name: NO_ARCHIVE
+ - name: STEP
+
+ {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
+ type: OS::Heat::SoftwareDeployment
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step}}
+ - {{dep.name}}ContainersDeployment_Step{{step}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ properties:
+ name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ server: {get_param: [servers, {{primary_role_name}}, '0']}
+ config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
+ input_values:
+ CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
+ NET_HOST: 'true'
+ NO_ARCHIVE: 'true'
+ STEP: {{step}}
+
+{% endfor %}
+# END primary_role_name docker-puppet-tasks
+
+{% for role in roles %}
+ # Post deployment steps for all roles
+ # A single config is re-applied with an incrementing step number
+ # {{role.name}} Role steps
+ {{role.name}}ArtifactsConfig:
+ type: ../puppet/deploy-artifacts.yaml
+
+ {{role.name}}ArtifactsDeploy:
+ type: OS::Heat::StructuredDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ArtifactsConfig}
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}CreateConfigDir:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: create-config-dir.sh}
+
+ {{role.name}}CreateConfigDirDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}CreateConfigDir}
+
+ {{role.name}}HostPrepAnsible:
+ type: OS::Heat::Value
+ properties:
+ value:
+ str_replace:
+ template: CONFIG
+ params:
+ CONFIG:
+ - hosts: localhost
+ connection: local
+ tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
+
+ {{role.name}}HostPrepConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: ansible
+ options:
+ modulepath: /usr/share/ansible-modules
+ config: {get_attr: [{{role.name}}HostPrepAnsible, value]}
+
+ {{role.name}}HostPrepDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}HostPrepConfig}
+
+ # this creates a JSON config file for our docker-puppet.py script
+ {{role.name}}GenPuppetConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-puppet/docker-puppet.json:
+ {get_param: [role_data, {{role.name}}, puppet_config]}
+
+ {{role.name}}GenPuppetDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}GenPuppetConfig}
+
+ {{role.name}}GenerateConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: {get_file: docker-puppet.py}
+ inputs:
+ - name: NET_HOST
+
+ {{role.name}}GenerateConfigDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment, {{role.name}}HostPrepDeployment]
+ properties:
+ name: {{role.name}}GenerateConfigDeployment
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}GenerateConfig}
+ input_values:
+ NET_HOST: 'true'
+
+ {{role.name}}PuppetStepConfig:
+ type: OS::Heat::Value
+ properties:
+ type: string
+ value:
+ yaql:
+ expression:
+ # select 'step_config' only from services that do not have a docker_config
+ $.data.service_names.zip($.data.step_config, $.data.docker_config).where($[2] = null).where($[1] != null).select($[1]).join("\n")
+ data:
+ service_names: {get_param: [role_data, {{role.name}}, service_names]}
+ step_config: {get_param: [role_data, {{role.name}}, step_config]}
+ docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+
+ {{role.name}}DockerConfig:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ yaql:
+ expression:
+ # select 'docker_config' only from services that have it
+ $.data.service_names.zip($.data.docker_config).where($[1] != null).select($[1]).reduce($1.mergeWith($2), {})
+ data:
+ service_names: {get_param: [role_data, {{role.name}}, service_names]}
+ docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
+
+ # Here we are dumping all the docker container startup configuration data
+ # so that we can have access to how they are started outside of heat
+ # and docker-cmd. This lets us create command line tools to start and
+ # test these containers.
+ {{role.name}}DockerConfigJsonStartupData:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ /var/lib/docker-container-startup-configs.json:
+ {get_attr: [{{role.name}}DockerConfig, value]}
+
+ {{role.name}}DockerConfigJsonStartupDataDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
+ servers: {get_param: [servers, {{role.name}}]}
+
+ {{role.name}}KollaJsonConfig:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: json-file
+ config:
+ {get_param: [role_data, {{role.name}}, kolla_config]}
+
+ {{role.name}}KollaJsonDeployment:
+ type: OS::Heat::SoftwareDeploymentGroup
+ properties:
+ name: {{role.name}}KollaJsonDeployment
+ config: {get_resource: {{role.name}}KollaJsonConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+
+ # BEGIN BAREMETAL CONFIG STEPS
+
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ {{role.name}}Config:
+ type: OS::TripleO::{{role.name}}Config
+ properties:
+ StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
+
+ {% for step in range(1, deploy_steps_max) %}
+
+ {{role.name}}Deployment_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ {% else %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ - {{dep.name}}ContainersDeployment_Step{{step -1}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+ {% endif %}
+ properties:
+ name: {{role.name}}Deployment_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}Config}
+ input_values:
+ step: {{step}}
+ update_identifier: {get_param: DeployIdentifier}
+
+ {% endfor %}
+ # END BAREMETAL CONFIG STEPS
+
+ # BEGIN CONTAINER CONFIG STEPS
+ {% for step in range(1, deploy_steps_max) %}
+
+ {{role.name}}ContainersConfig_Step{{step}}:
+ type: OS::Heat::StructuredConfig
+ properties:
+ group: docker-cmd
+ config:
+ {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
+
+ {{role.name}}ContainersDeployment_Step{{step}}:
+ type: OS::Heat::StructuredDeploymentGroup
+ {% if step == 1 %}
+ depends_on:
+ - {{role.name}}KollaJsonDeployment
+ - {{role.name}}GenPuppetDeployment
+ - {{role.name}}GenerateConfigDeployment
+ {%- for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
+ {%- endfor %}
+ {% else %}
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}ContainersDeployment_Step{{step -1}}
+ - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
+ {% endif %}
+ properties:
+ name: {{role.name}}ContainersDeployment_Step{{step}}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
+
+ {% endfor %}
+ # END CONTAINER CONFIG STEPS
+
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ - {{primary_role_name}}DockerPuppetTasksDeployment5
+ {% endfor %}
+ properties:
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
+ # Note, this should come last, so use depends_on to ensure
+ # this is created after any other resources.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}PostConfig
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+{% endfor %}
#!/bin/bash
set -eux
-# TODO This would be better in puppet
+# This file contains setup steps that can't be or have not yet been moved to
+# puppet
-# TODO remove this when built image includes docker
-if [ ! -f "/usr/bin/docker" ]; then
- yum -y install docker
-fi
-
-# NOTE(mandre) $docker_namespace_is_registry is not a bash variable but is
-# a place holder for text replacement done via heat
-if [ "$docker_namespace_is_registry" = "True" ]; then
- /usr/bin/systemctl stop docker.service
- # if namespace is used with local registry, trim all namespacing
- trim_var=$docker_registry
- registry_host="${trim_var%%/*}"
- /bin/sed -i -r "s/^[# ]*INSECURE_REGISTRY *=.+$/INSECURE_REGISTRY='--insecure-registry $registry_host'/" /etc/sysconfig/docker
-fi
-
-# enable and start docker
-/usr/bin/systemctl enable docker.service
-/usr/bin/systemctl start docker.service
-
-# Disable libvirtd
+# Disable libvirtd since it conflicts with nova_libvirt container
/usr/bin/systemctl disable libvirtd.service
/usr/bin/systemctl stop libvirtd.service
-heat_template_version: ocata
+heat_template_version: pike
parameters:
DockerNamespace:
--- /dev/null
+# Note the include here is the same as post.j2.yaml but the data used at
+# # the time of rendering is different if any roles disable upgrades
+{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% include 'docker-steps.j2' %}
-# certain initialization steps (run in a container) will occur
-# on the first role listed in the roles file
-{% set primary_role_name = roles[0].name -%}
-
-heat_template_version: ocata
-
-description: >
- Post-deploy configuration steps via puppet for all roles,
- as defined in ../roles_data.yaml
-
-parameters:
- servers:
- type: json
- description: Mapping of Role name e.g Controller to a list of servers
- role_data:
- type: json
- description: Mapping of Role name e.g Controller to the per-role data
- DeployIdentifier:
- default: ''
- type: string
- description: >
- Setting this to a unique value will re-run any deployment tasks which
- perform configuration on a Heat stack-update.
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- # These utility tasks use docker-puppet.py to execute tasks via puppet
- # We only execute these on the first node in the primary role
- {{primary_role_name}}DockerPuppetTasks:
- type: OS::Heat::Value
- properties:
- type: json
- value:
- yaql:
- expression:
- dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
- data:
- docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
-
-# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
-{% for step in range(1, 6) %}
-
- {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
- {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
-
- {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
- type: OS::Heat::SoftwareDeployment
- properties:
- server: {get_param: [servers, {{primary_role_name}}, '0']}
- config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
-
- {{primary_role_name}}DockerPuppetTasksConfig{{step}}:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: docker-puppet.py}
- inputs:
- - name: CONFIG
- - name: NET_HOST
- - name: NO_ARCHIVE
- - name: STEP
-
- {{primary_role_name}}DockerPuppetTasksDeployment{{step}}:
- type: OS::Heat::SoftwareDeployment
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step{{step}}
- - {{dep.name}}ContainersDeployment_Step{{step}}
- {% endfor %}
- - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
- properties:
- name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
- server: {get_param: [servers, {{primary_role_name}}, '0']}
- config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
- input_values:
- CONFIG: /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json
- NET_HOST: 'true'
- NO_ARCHIVE: 'true'
- STEP: {{step}}
-
-{% endfor %}
-# END primary_role_name docker-puppet-tasks
-
-{% for role in roles %}
- # Post deployment steps for all roles
- # A single config is re-applied with an incrementing step number
- # {{role.name}} Role steps
- {{role.name}}ArtifactsConfig:
- type: ../puppet/deploy-artifacts.yaml
-
- {{role.name}}ArtifactsDeploy:
- type: OS::Heat::StructuredDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ArtifactsConfig}
-
- {{role.name}}PreConfig:
- type: OS::TripleO::Tasks::{{role.name}}PreConfig
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}CreateConfigDir:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: create-config-dir.sh}
-
- {{role.name}}CreateConfigDirDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}CreateConfigDir}
-
- # this creates a JSON config file for our docker-puppet.py script
- {{role.name}}GenPuppetConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-puppet/docker-puppet.json:
- {get_param: [role_data, {{role.name}}, puppet_config]}
-
- {{role.name}}GenPuppetDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}GenPuppetConfig}
-
- {{role.name}}GenerateConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: docker-puppet.py}
-
- {{role.name}}GenerateConfigDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment]
- properties:
- name: {{role.name}}GenerateConfigDeployment
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}GenerateConfig}
-
- {{role.name}}PuppetStepConfig:
- type: OS::Heat::Value
- properties:
- type: string
- value:
- yaql:
- expression:
- # select 'step_config' only from services that do not have a docker_image
- $.data.service_names.zip($.data.step_config, $.data.docker_image).where($[2] = null).where($[1] != null).select($[1]).join("\n")
- data:
- service_names: {get_param: [role_data, {{role.name}}, service_names]}
- step_config: {get_param: [role_data, {{role.name}}, step_config]}
- docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
-
- {{role.name}}DockerConfig:
- type: OS::Heat::Value
- properties:
- type: json
- value:
- yaql:
- expression:
- # select 'docker_config' only from services that have a docker_image
- $.data.service_names.zip($.data.docker_config, $.data.docker_image).where($[2] != null).select($[1]).reduce($1.mergeWith($2), {})
- data:
- service_names: {get_param: [role_data, {{role.name}}, service_names]}
- docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
- docker_image: {get_param: [role_data, {{role.name}}, docker_image]}
-
- # Here we are dumping all the docker container startup configuration data
- # so that we can have access to how they are started outside of heat
- # and docker-cmd. This lets us create command line tools to start and
- # test these containers.
- {{role.name}}DockerConfigJsonStartupData:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-container-startup-configs.json:
- {get_attr: [{{role.name}}DockerConfig, value]}
-
- {{role.name}}DockerConfigJsonStartupDataDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
- servers: {get_param: [servers, {{role.name}}]}
-
- {{role.name}}KollaJsonConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- {get_param: [role_data, {{role.name}}, kolla_config]}
-
- {{role.name}}KollaJsonDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: {{role.name}}KollaJsonDeployment
- config: {get_resource: {{role.name}}KollaJsonConfig}
- servers: {get_param: [servers, {{role.name}}]}
-
- # BEGIN BAREMETAL CONFIG STEPS
-
- {% if role.name == 'Controller' %}
- ControllerPrePuppet:
- type: OS::TripleO::Tasks::ControllerPrePuppet
- properties:
- servers: {get_param: [servers, Controller]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
- {{role.name}}Config:
- type: OS::TripleO::{{role.name}}Config
- properties:
- StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
-
- {% for step in range(1, 6) %}
-
- {{role.name}}Deployment_Step{{step}}:
- type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step{{step -1}}
- - {{dep.name}}ContainersDeployment_Step{{step -1}}
- {% endfor %}
- - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
- {% endif %}
- properties:
- name: {{role.name}}Deployment_Step{{step}}
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}Config}
- input_values:
- step: {{step}}
- update_identifier: {get_param: DeployIdentifier}
-
- {% endfor %}
- # END BAREMETAL CONFIG STEPS
-
- # BEGIN CONTAINER CONFIG STEPS
- {% for step in range(1, 6) %}
-
- {{role.name}}ContainersConfig_Step{{step}}:
- type: OS::Heat::StructuredConfig
- properties:
- group: docker-cmd
- config:
- {get_attr: [{{role.name}}DockerConfig, value, step_{{step}}]}
-
- {{role.name}}ContainersDeployment_Step{{step}}:
- type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on:
- - {{role.name}}PreConfig
- - {{role.name}}KollaJsonDeployment
- - {{role.name}}GenPuppetDeployment
- - {{role.name}}GenerateConfigDeployment
- {% else %}
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}ContainersDeployment_Step{{step -1}}
- - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
- - {{dep.name}}Deployment_Step{{step -1}}
- {% endfor %}
- - {{primary_role_name}}DockerPuppetTasksDeployment{{step -1}}
- {% endif %}
- properties:
- name: {{role.name}}ContainersDeployment_Step{{step}}
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}ContainersConfig_Step{{step}}}
-
- {% endfor %}
- # END CONTAINER CONFIG STEPS
-
- {{role.name}}PostConfig:
- type: OS::TripleO::Tasks::{{role.name}}PostConfig
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step5
- - {{primary_role_name}}DockerPuppetTasksDeployment5
- {% endfor %}
- properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}PostConfig
- {% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
- properties:
- servers: {get_param: [servers, {{role.name}}]}
-
- {% if role.name == 'Controller' %}
- ControllerPostPuppet:
- depends_on:
- - ControllerExtraConfigPost
- type: OS::TripleO::Tasks::ControllerPostPuppet
- properties:
- servers: {get_param: [servers, Controller]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
-{% endfor %}
+{% include 'docker-steps.j2' %}
undercloud nova-scheduler also requires openstack-tripleo-common to
provide custom filters.
-To build Kolla images for TripleO adjust your kolla config to build your
+To build Kolla images for TripleO adjust your kolla config [*]_ to build your
centos base image with puppet using the example below:
.. code-block::
..
+.. [*] See the
+ `override file <https://github.com/openstack/tripleo-common/blob/master/contrib/tripleo_kolla_template_overrides.j2>`_
+ which can be used to build Kolla packages that work with TripleO, and an
+ `example build script <https://github.com/dprince/undercloud_containers/blob/master/build_kolla.sh>_.
Docker settings
---------------
the container itself at the /var/lib/kolla/config_files/config.json
location and drives how kolla's external config mechanisms work.
- * docker_image: The full name of the docker image that will be used.
-
* docker_config: Data that is passed to the docker-cmd hook to configure
a container, or step of containers at each step. See the available steps
below and the related docker-cmd hook documentation in the heat-agents
project.
- * puppet_tags: Puppet resource tag names that are used to generate config
- files with puppet. Only the named config resources are used to generate
- a config file. Any service that specifies tags will have the default
- tags of 'file,concat,file_line' appended to the setting.
- Example: keystone_config
-
- * config_volume: The name of the volume (directory) where config files
- will be generated for this service. Use this as the location to
- bind mount into the running Kolla container for configuration.
-
- * config_image: The name of the docker image that will be used for
- generating configuration files. This is often the same value as
- 'docker_image' above but some containers share a common set of
- config files which are generated in a common base container.
+ * puppet_config: This section is a nested set of key value pairs
+ that drive the creation of config files using puppet.
+ Required parameters include:
+
+ * puppet_tags: Puppet resource tag names that are used to generate config
+ files with puppet. Only the named config resources are used to generate
+ a config file. Any service that specifies tags will have the default
+ tags of 'file,concat,file_line,augeas' appended to the setting.
+ Example: keystone_config
+
+ * config_volume: The name of the volume (directory) where config files
+ will be generated for this service. Use this as the location to
+ bind mount into the running Kolla container for configuration.
+
+ * config_image: The name of the docker image that will be used for
+ generating configuration files. This is often the same container
+ that the runtime service uses. Some services share a common set of
+ config files which are generated in a common base container.
+
+ * step_config: This setting controls the manifest that is used to
+ create docker config files via puppet. The puppet tags below are
+ used along with this manifest to generate a config directory for
+ this container.
* docker_puppet_tasks: This section provides data to drive the
docker-puppet.py tool directly. The task is executed only once
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized aodh service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhApiImage:
+ description: image
+ default: 'centos-binary-aodh-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ AodhApiPuppetBase:
+ type: ../../puppet/services/aodh-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the aodh API role.
+ value:
+ service_name: {get_attr: [AodhApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [AodhApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_api_paste_ini,aodh_config
+ step_config: *step_config
+ config_image: &aodh_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/aodh
+ owner: aodh:aodh
+ recurse: true
+ docker_config:
+ # db sync runs before permissions set by kolla_config
+ step_3:
+ aodh_init_log:
+ start_order: 0
+ image: *aodh_image
+ user: root
+ volumes:
+ - /var/log/containers/aodh:/var/log/aodh
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
+ aodh_db_sync:
+ start_order: 1
+ image: *aodh_image
+ net: host
+ privileged: false
+ detach: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/log/containers/aodh:/var/log/aodh
+ command: /usr/bin/aodh-dbsync
+ step_4:
+ aodh_api:
+ image: *aodh_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/lib/config-data/aodh/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/aodh/var/www/:/var/www/:ro
+ - /var/log/containers/aodh:/var/log/aodh
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/aodh
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable aodh service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [AodhApiPuppetBase, role_data, metadata_settings]
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Aodh Evaluator service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhEvaluatorImage:
+ description: image
+ default: 'centos-binary-aodh-evaluator:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ AodhEvaluatorBase:
+ type: ../../puppet/services/aodh-evaluator.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhEvaluatorBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhEvaluatorBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhEvaluatorBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhEvaluatorBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_evaluator_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-evaluator.json:
+ command: /usr/bin/aodh-evaluator
+ permissions:
+ - path: /var/log/aodh
+ owner: aodh:aodh
+ recurse: true
+ docker_config:
+ step_4:
+ aodh_evaluator:
+ image: *aodh_evaluator_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/log/containers/aodh:/var/log/aodh
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/aodh
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-evaluator service
+ tags: step2
+ service: name=openstack-aodh-evaluator.service state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Aodh Listener service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhListenerImage:
+ description: image
+ default: 'centos-binary-aodh-listener:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ AodhListenerBase:
+ type: ../../puppet/services/aodh-listener.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhListenerBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhListenerBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhListenerBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhListenerBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_listener_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-listener.json:
+ command: /usr/bin/aodh-listener
+ permissions:
+ - path: /var/log/aodh
+ owner: aodh:aodh
+ recurse: true
+ docker_config:
+ step_4:
+ aodh_listener:
+ image: *aodh_listener_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/log/containers/aodh:/var/log/aodh
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/aodh
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-listener service
+ tags: step2
+ service: name=openstack-aodh-listener.service state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Aodh Notifier service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerAodhNotifierImage:
+ description: image
+ default: 'centos-binary-aodh-notifier:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ AodhNotifierBase:
+ type: ../../puppet/services/aodh-notifier.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Aodh API role.
+ value:
+ service_name: {get_attr: [AodhNotifierBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [AodhNotifierBase, role_data, config_settings]
+ step_config: &step_config
+ get_attr: [AodhNotifierBase, role_data, step_config]
+ service_config_settings: {get_attr: [AodhNotifierBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: aodh
+ puppet_tags: aodh_config
+ step_config: *step_config
+ config_image: &aodh_notifier_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/aodh-notifier.json:
+ command: /usr/bin/aodh-notifier
+ permissions:
+ - path: /var/log/aodh
+ owner: aodh:aodh
+ recurse: true
+ docker_config:
+ step_4:
+ aodh_notifier:
+ image: *aodh_notifier_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/log/containers/aodh:/var/log/aodh
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/aodh
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable openstack-aodh-notifier service
+ tags: step2
+ service: name=openstack-aodh-notifier.service state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Ceilometer Agent Central service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCeilometerCentralImage:
+ description: image
+ default: 'centos-binary-ceilometer-central:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CeilometerAgentCentralBase:
+ type: ../../puppet/services/ceilometer-agent-central.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Central role.
+ value:
+ service_name: {get_attr: [CeilometerAgentCentralBase, role_data, service_name]}
+ config_settings: {get_attr: [CeilometerAgentCentralBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CeilometerAgentCentralBase, role_data, step_config]
+ service_config_settings: {get_attr: [CeilometerAgentCentralBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ceilometer
+ puppet_tags: ceilometer_config
+ step_config: *step_config
+ config_image: &ceilometer_agent_central_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ceilometer-agent-central.json:
+ command: /usr/bin/ceilometer-polling --polling-namespaces central
+ docker_config:
+ step_3:
+ ceilometer_init_log:
+ start_order: 0
+ image: *ceilometer_agent_central_image
+ user: root
+ command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+ volumes:
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ step_4:
+ ceilometer_agent_central:
+ image: *ceilometer_agent_central_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ceilometer-agent-central.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
+ ceilometer_gnocchi_upgrade:
+ start_order: 1
+ image: *ceilometer_agent_central_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
+ upgrade_tasks:
+ - name: Stop and disable ceilometer agent central service
+ tags: step2
+ service: name=openstack-ceilometer-agent-central state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Ceilometer Agent Compute service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCeilometerComputeImage:
+ description: image
+ default: 'centos-binary-ceilometer-compute:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CeilometerAgentComputeBase:
+ type: ../../puppet/services/ceilometer-agent-compute.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Compute role.
+ value:
+ service_name: {get_attr: [CeilometerAgentComputeBase, role_data, service_name]}
+ config_settings: {get_attr: [CeilometerAgentComputeBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CeilometerAgentComputeBase, role_data, step_config]
+ service_config_settings: {get_attr: [CeilometerAgentComputeBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ceilometer
+ puppet_tags: ceilometer_config
+ step_config: *step_config
+ config_image: &ceilometer_agent_compute_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ceilometer-agent-compute.json:
+ command: /usr/bin/ceilometer-polling --polling-namespaces compute
+ docker_config:
+ step_4:
+ ceilometer_agent-compute:
+ image: *ceilometer_agent_compute_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable ceilometer-agent-compute service
+ tags: step2
+ service: name=openstack-ceilometer-agent-compute state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Ceilometer Agent Notification service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCeilometerNotificationImage:
+ description: image
+ default: 'centos-binary-ceilometer-notification:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CeilometerAgentNotificationBase:
+ type: ../../puppet/services/ceilometer-agent-notification.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Notification role.
+ value:
+ service_name: {get_attr: [CeilometerAgentNotificationBase, role_data, service_name]}
+ config_settings: {get_attr: [CeilometerAgentNotificationBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CeilometerAgentNotificationBase, role_data, step_config]
+ service_config_settings: {get_attr: [CeilometerAgentNotificationBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ceilometer
+ puppet_tags: ceilometer_config
+ step_config: *step_config
+ config_image: &ceilometer_agent_notification_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ceilometer-agent-notification.json:
+ command: /usr/bin/ceilometer-agent-notification
+ docker_config:
+ step_3:
+ ceilometer_init_log:
+ start_order: 0
+ image: *ceilometer_agent_notification_image
+ user: root
+ command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+ volumes:
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ step_4:
+ ceilometer_agent-notification:
+ image: *ceilometer_agent_notification_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ceilometer-agent-notification.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
+ ceilometer_gnocchi_upgrade:
+ start_order: 1
+ image: *ceilometer_agent_notification_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
+ upgrade_tasks:
+ - name: Stop and disable ceilometer agent notification service
+ tags: step2
+ service: name=openstack-ceilometer-notification state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Contains a static list of common things necessary for containers
+
+outputs:
+ volumes:
+ description: Common volumes for the containers.
+ value:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ # OpenSSL trusted CAs
+ - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
+ - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
+ - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
+ - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
+ # Syslog socket
+ - /dev/log:/dev/log
-heat_template_version: ocata
+heat_template_version: pike
description: >
MongoDB service deployment using puppet and docker
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- "\n"
- - "['Mongodb_database', 'Mongodb_user', 'Mongodb_replset'].each |String $val| { noop_resource($val) }"
- {get_attr: [MongodbPuppetBase, role_data, step_config]}
- upgrade_tasks: {get_attr: [MongodbPuppetBase, role_data, upgrade_tasks]}
# BEGIN DOCKER SETTINGS #
- docker_image: &mongodb_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
puppet_config:
config_volume: mongodb
puppet_tags: file # set this even though file is the default
step_config: *step_config
- config_image: *mongodb_image
+ config_image: &mongodb_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
kolla_config:
/var/lib/kolla/config_files/mongodb.json:
command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run
- config_files:
- - dest: /etc/mongod.conf
- source: /var/lib/kolla/config_files/src/etc/mongod.conf
- owner: mongodb
- perm: '0600'
- - dest: /etc/mongos.conf
- source: /var/lib/kolla/config_files/src/etc/mongos.conf
- owner: mongodb
- perm: '0600'
+ permissions:
+ - path: /var/lib/mongodb
+ owner: mongodb:mongodb
+ recurse: true
+ - path: /var/log/mongodb
+ owner: mongodb:mongodb
+ recurse: true
docker_config:
step_2:
mongodb:
privileged: false
volumes: &mongodb_volumes
- /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json
- - /var/lib/config-data/mongodb/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/mongodb/etc/:/etc/:ro
- /etc/localtime:/etc/localtime:ro
- - logs:/var/log/kolla
- - mongodb:/var/lib/mongodb/
+ - /var/log/containers/mongodb:/var/log/mongodb
+ - /var/lib/mongodb:/var/lib/mongodb
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
docker_puppet_tasks:
config_volume: 'mongodb_init_tasks'
puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset'
step_config: 'include ::tripleo::profile::base::database::mongodb'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+ config_image: *mongodb_image
volumes:
- - "mongodb:/var/lib/mongodb/"
- - "logs:/var/log/kolla:ro"
+ - /var/lib/mongodb:/var/lib/mongodb
+ - /var/log/containers/mongodb:/var/log/mongodb
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/mongodb
+ - /var/lib/mongodb
+ upgrade_tasks:
+ - name: Stop and disable mongodb service
+ tags: step2
+ service: name=mongod state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
MySQL service deployment using puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
MysqlRootPassword:
type: string
hidden: true
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- "\n"
- - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }"
- {get_attr: [MysqlPuppetBase, role_data, step_config]}
- upgrade_tasks: {get_attr: [MysqlPuppetBase, role_data, upgrade_tasks]}
# BEGIN DOCKER SETTINGS #
- docker_image: &mysql_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
puppet_config:
config_volume: mysql
puppet_tags: file # set this even though file is the default
step_config: *step_config
- config_image: *mysql_image
+ config_image: &mysql_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
kolla_config:
/var/lib/kolla/config_files/mysql.json:
command: /usr/bin/mysqld_safe
- config_files:
- - dest: /etc/mysql/my.cnf
- source: /var/lib/kolla/config_files/src/etc/my.cnf
- owner: mysql
- perm: '0644'
- - dest: /etc/my.cnf.d/galera.cnf
- source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf
- owner: mysql
- perm: '0644'
+ permissions:
+ - path: /var/lib/mysql
+ owner: mysql:mysql
+ recurse: true
docker_config:
+ # Kolla_bootstrap runs before permissions set by kolla_config
step_2:
- mysql_bootstrap:
+ mysql_init_logs:
start_order: 0
+ image: *mysql_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/mysql:/var/log/mariadb
+ command: ['/bin/bash', '-c', 'chown -R mysql:mysql /var/log/mariadb']
+ mysql_bootstrap:
+ start_order: 1
detach: false
image: *mysql_image
net: host
+ # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done
+ command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start']
volumes: &mysql_volumes
- /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json
- - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/config-data/mysql/etc/:/etc/:ro
- /etc/localtime:/etc/localtime:ro
- /etc/hosts:/etc/hosts:ro
- - mariadb:/var/lib/mysql/
+ - /var/lib/mysql:/var/lib/mysql
+ - /var/log/containers/mysql:/var/log/mariadb
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
# NOTE(mandre) skip wsrep cluster status check
- KOLLA_KUBERNETES=True
- -
+ -
list_join:
- '='
- - 'DB_ROOT_PASSWORD'
- {get_param: MysqlRootPassword}
- {get_param: [DefaultPasswords, mysql_root_password]}
mysql:
- start_order: 1
+ start_order: 2
image: *mysql_image
restart: always
net: host
config_volume: 'mysql_init_tasks'
puppet_tags: 'mysql_database,mysql_grant,mysql_user'
step_config: 'include ::tripleo::profile::base::database::mysql'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+ config_image: *mysql_image
volumes:
- - "mariadb:/var/lib/mysql/:ro"
- - "/var/lib/config-data/mysql/root:/root:ro" #provides .my.cnf
+ - /var/lib/mysql:/var/lib/mysql/:ro
+ - /var/log/containers/mysql:/var/log/mariadb
+ - /var/lib/config-data/mysql/root:/root:ro #provides .my.cnf
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/mysql
+ - /var/lib/mysql
+ upgrade_tasks:
+ - name: Stop and disable mysql service
+ tags: step2
+ service: name=mariadb state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Redis services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerRedisImage:
+ description: image
+ default: 'centos-binary-redis:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ RedisBase:
+ type: ../../../puppet/services/database/redis.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Redis API role.
+ value:
+ service_name: {get_attr: [RedisBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - {get_attr: [RedisBase, role_data, config_settings]}
+ - redis::daemonize: false
+ step_config: &step_config
+ get_attr: [RedisBase, role_data, step_config]
+ service_config_settings: {get_attr: [RedisBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: 'redis'
+ # NOTE: we need the exec tag to copy /etc/redis.conf.puppet to
+ # /etc/redis.conf
+ # https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763
+ puppet_tags: 'exec'
+ step_config: *step_config
+ config_image: &redis_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/redis.json:
+ command: /usr/bin/redis-server /etc/redis.conf
+ permissions:
+ - path: /var/run/redis
+ owner: redis:redis
+ recurse: true
+ docker_config:
+ step_1:
+ redis:
+ image: *redis_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/redis.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/redis/etc/:/etc/:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log/kolla
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/run/redis
+ file:
+ path: /var/run/redis
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable redis service
+ tags: step2
+ service: name=redis state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized etcd services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerEtcdImage:
+ description: image
+ default: 'centos-binary-etcd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EtcdInitialClusterToken:
+ description: Initial cluster token for the etcd cluster during bootstrap.
+ type: string
+ hidden: true
+
+resources:
+
+ EtcdPuppetBase:
+ type: ../../puppet/services/etcd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EtcdInitialClusterToken: {get_param: EtcdInitialClusterToken}
+
+outputs:
+ role_data:
+ description: Role data for the etcd role.
+ value:
+ service_name: {get_attr: [EtcdPuppetBase, role_data, service_name]}
+ step_config: &step_config
+ list_join:
+ - "\n"
+ - - "['Etcd_key'].each |String $val| { noop_resource($val) }"
+ - get_attr: [EtcdPuppetBase, role_data, step_config]
+ config_settings:
+ map_merge:
+ - {get_attr: [EtcdPuppetBase, role_data, config_settings]}
+ - etcd::manage_service: false
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: etcd
+ step_config: *step_config
+ config_image: &etcd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/etcd.json:
+ command: /usr/bin/etcd --config-file /etc/etcd/etcd.yml
+ permissions:
+ - path: /var/lib/etcd
+ owner: etcd:etcd
+ recurse: true
+ docker_config:
+ step_2:
+ etcd:
+ image: *etcd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/etcd:/var/lib/etcd
+ - /etc/localtime:/etc/localtime:ro
+ - /var/lib/kolla/config_files/etcd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/etcd/etc/etcd/etcd.yml:/etc/etcd/etcd.yml:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ docker_puppet_tasks:
+ # Etcd keys initialization occurs only on single node
+ step_2:
+ config_volume: 'etcd_init_tasks'
+ puppet_tags: 'etcd_key'
+ step_config: 'include ::tripleo::profile::base::etcd'
+ config_image: *etcd_image
+ volumes:
+ - /var/lib/config-data/etcd/etc/:/etc
+ - /var/lib/etcd:/var/lib/etcd:ro
+ host_prep_tasks:
+ - name: create /var/lib/etcd
+ file:
+ path: /var/lib/etcd
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable etcd service
+ tags: step2
+ service: name=etcd state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Glance service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
GlanceApiPuppetBase:
type: ../../puppet/services/glance-api.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [GlanceApiPuppetBase, role_data, step_config]
service_config_settings: {get_attr: [GlanceApiPuppetBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS #
- docker_image: &glance_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
puppet_config:
config_volume: glance_api
puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
step_config: *step_config
- config_image: *glance_image
+ config_image: &glance_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
kolla_config:
- /var/lib/kolla/config_files/glance-api.json:
- command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
- config_files:
- - dest: /etc/glance/glance-api.conf
- owner: glance
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/glance/glance-api.conf
- - dest: /etc/glance/glance-swift.conf
- owner: glance
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/glance/glance-swift.conf
+ /var/lib/kolla/config_files/glance-api.json:
+ command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
+ /var/lib/kolla/config_files/glance_api_tls_proxy.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
+ # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
step_3:
+ glance_init_logs:
+ start_order: 0
+ image: *glance_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/glance:/var/log/glance
+ command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
glance_api_db_sync:
+ start_order: 1
image: *glance_image
net: host
privileged: false
detach: false
volumes: &glance_volumes
- - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /var/lib/config-data/glance_api/:/var/lib/kolla/config_files/src:ro
- - /run:/run
- - /dev:/dev
- - /etc/hosts:/etc/hosts:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/glance_api/etc/glance/:/etc/glance/:ro
+ - /var/log/containers/glance:/var/log/glance
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
step_4:
- glance_api:
- image: *glance_image
- net: host
- privileged: false
- restart: always
- volumes: *glance_volumes
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ map_merge:
+ - glance_api:
+ start_order: 2
+ image: *glance_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *glance_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - if:
+ - internal_tls_enabled
+ - glance_api_tls_proxy:
+ start_order: 2
+ image: *glance_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/glance_api_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/glance_api/etc/httpd/:/etc/httpd/:ro
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/glance
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable glance_api service
+ tags: step2
+ service: name=openstack-glance-api state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized gnocchi service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiApiImage:
+ description: image
+ default: 'centos-binary-gnocchi-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ GnocchiApiPuppetBase:
+ type: ../../puppet/services/gnocchi-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [GnocchiApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [GnocchiApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_api_paste_ini,gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/gnocchi
+ owner: gnocchi:gnocchi
+ recurse: true
+ docker_config:
+ # db sync runs before permissions set by kolla_config
+ step_3:
+ gnocchi_init_log:
+ start_order: 0
+ image: *gnocchi_image
+ user: root
+ volumes:
+ - /var/log/containers/gnocchi:/var/log/gnocchi
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
+ gnocchi_db_sync:
+ start_order: 1
+ image: *gnocchi_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/log/containers/gnocchi:/var/log/gnocchi
+ command: ["/usr/bin/gnocchi-upgrade", "--skip-storage"]
+ step_4:
+ gnocchi_api:
+ image: *gnocchi_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/lib/config-data/gnocchi/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/gnocchi/var/www/:/var/www/:ro
+ - /var/log/containers/gnocchi:/var/log/gnocchi
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/gnocchi
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable httpd service
+ tags: step2
+ service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [GnocchiApiPuppetBase, role_data, metadata_settings]
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Gnocchi Metricd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiMetricdImage:
+ description: image
+ default: 'centos-binary-gnocchi-metricd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ GnocchiMetricdBase:
+ type: ../../puppet/services/gnocchi-metricd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiMetricdBase, role_data, service_name]}
+ config_settings: {get_attr: [GnocchiMetricdBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [GnocchiMetricdBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiMetricdBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_metricd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-metricd.json:
+ command: /usr/bin/gnocchi-metricd
+ permissions:
+ - path: /var/log/gnocchi
+ owner: gnocchi:gnocchi
+ recurse: true
+ docker_config:
+ step_4:
+ gnocchi_metricd:
+ image: *gnocchi_metricd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/log/containers/gnocchi:/var/log/gnocchi
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/gnocchi
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable openstack-gnocchi-metricd service
+ tags: step2
+ service: name=openstack-gnocchi-metricd.service state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Gnocchi Statsd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerGnocchiStatsdImage:
+ description: image
+ default: 'centos-binary-gnocchi-statsd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ GnocchiStatsdBase:
+ type: ../../puppet/services/gnocchi-statsd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Gnocchi API role.
+ value:
+ service_name: {get_attr: [GnocchiStatsdBase, role_data, service_name]}
+ config_settings: {get_attr: [GnocchiStatsdBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [GnocchiStatsdBase, role_data, step_config]
+ service_config_settings: {get_attr: [GnocchiStatsdBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: gnocchi
+ puppet_tags: gnocchi_config
+ step_config: *step_config
+ config_image: &gnocchi_statsd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/gnocchi-statsd.json:
+ command: /usr/bin/gnocchi-statsd
+ permissions:
+ - path: /var/log/gnocchi
+ owner: gnocchi:gnocchi
+ recurse: true
+ docker_config:
+ step_4:
+ gnocchi_statsd:
+ image: *gnocchi_statsd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/log/containers/gnocchi:/var/log/gnocchi
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/gnocchi
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable openstack-gnocchi-statsd service
+ tags: step2
+ service: name=openstack-gnocchi-statsd.service state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Heat API CFN service
description: image
default: 'centos-binary-heat-api-cfn:latest'
type: string
- # we configure all heat services in the same heat engine container
- DockerHeatEngineImage:
+ # puppet needs the heat-wsgi-api-cfn binary from centos-binary-heat-api-cfn
+ DockerHeatConfigImage:
description: image
- default: 'centos-binary-heat-engine:latest'
+ default: 'centos-binary-heat-api-cfn:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
HeatBase:
type: ../../puppet/services/heat-api-cfn.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [HeatBase, role_data, step_config]
service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &heat_api_cfn_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ]
puppet_config:
- config_volume: heat
+ config_volume: heat_api_cfn
puppet_tags: heat_config,file,concat,file_line
step_config: *step_config
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/heat_api_cfn.json:
- command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
- config_files:
- - dest: /etc/heat/heat.conf
- owner: heat
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ /var/lib/kolla/config_files/heat_api_cfn.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/heat
+ owner: heat:heat
+ recurse: true
docker_config:
step_4:
heat_api_cfn:
- image: *heat_api_cfn_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnImage} ]
net: host
privileged: false
restart: always
+ # NOTE(mandre) kolla image changes the user to 'heat', we need it
+ # to be root to run httpd
+ user: root
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat_api_cfn/etc/heat/:/etc/heat/:ro
+ - /var/lib/config-data/heat_api_cfn/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/heat_api_cfn/var/www/:/var/www/:ro
+ - /var/log/containers/heat:/var/log/heat
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/heat
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable heat_api_cfn service
+ tags: step2
+ service: name=httpd state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Heat API service
description: image
default: 'centos-binary-heat-api:latest'
type: string
- # we configure all heat services in the same heat engine container
- DockerHeatEngineImage:
+ # puppet needs the heat-wsgi-api binary from centos-binary-heat-api
+ DockerHeatConfigImage:
description: image
- default: 'centos-binary-heat-engine:latest'
+ default: 'centos-binary-heat-api:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
HeatBase:
type: ../../puppet/services/heat-api.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [HeatBase, role_data, step_config]
service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &heat_api_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ]
puppet_config:
- config_volume: heat
+ config_volume: heat_api
puppet_tags: heat_config,file,concat,file_line
step_config: *step_config
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/heat_api.json:
- command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
- config_files:
- - dest: /etc/heat/heat.conf
- owner: heat
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ /var/lib/kolla/config_files/heat_api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/heat
+ owner: heat:heat
+ recurse: true
docker_config:
step_4:
heat_api:
- image: *heat_api_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiImage} ]
net: host
privileged: false
restart: always
+ # NOTE(mandre) kolla image changes the user to 'heat', we need it
+ # to be root to run httpd
+ user: root
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat_api/etc/heat/:/etc/heat/:ro
+ - /var/lib/config-data/heat_api/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/heat_api/var/www/:/var/www/:ro
+ - /var/log/containers/heat:/var/log/heat
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/heat
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable heat_api service
+ tags: step2
+ service: name=httpd state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Heat Engine service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
HeatBase:
type: ../../puppet/services/heat-engine.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [HeatBase, role_data, step_config]
service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &heat_engine_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
puppet_config:
config_volume: heat
puppet_tags: heat_config,file,concat,file_line
step_config: *step_config
- config_image: *heat_engine_image
+ config_image: &heat_engine_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
kolla_config:
- /var/lib/kolla/config_files/heat_engine.json:
- command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
- config_files:
- - dest: /etc/heat/heat.conf
- owner: heat
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/heat/heat.conf
+ /var/lib/kolla/config_files/heat_engine.json:
+ command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ permissions:
+ - path: /var/log/heat
+ owner: heat:heat
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
+ heat_init_log:
+ start_order: 0
+ image: *heat_engine_image
+ user: root
+ volumes:
+ - /var/log/containers/heat:/var/log/heat
+ command: ['/bin/bash', '-c', 'chown -R heat:heat /var/log/heat']
heat_engine_db_sync:
+ start_order: 1
image: *heat_engine_image
net: host
privileged: false
detach: false
volumes:
- - /var/lib/config-data/heat/etc/heat:/etc/heat:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+ - /var/log/containers/heat:/var/log/heat
command: ['heat-manage', 'db_sync']
step_4:
heat_engine:
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+ - /var/log/containers/heat:/var/log/heat
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/heat
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable heat_engine service
+ tags: step2
+ service: name=openstack-heat-engine state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Ironic API service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
IronicApiBase:
type: ../../puppet/services/ironic-api.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [IronicApiBase, role_data, step_config]
service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &ironic_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
puppet_config:
config_volume: ironic
puppet_tags: ironic_config
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/ironic_api.json:
- command: /usr/bin/ironic-api
- config_files:
- - dest: /etc/ironic/ironic.conf
- owner: ironic
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
+ /var/lib/kolla/config_files/ironic_api.json:
+ command: /usr/bin/ironic-api
+ permissions:
+ - path: /var/log/ironic
+ owner: ironic:ironic
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
+ ironic_init_logs:
+ start_order: 0
+ image: &ironic_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/ironic:/var/log/ironic
+ command: ['/bin/bash', '-c', 'chown -R ironic:ironic /var/log/ironic']
ironic_db_sync:
+ start_order: 1
image: *ironic_image
net: host
privileged: false
detach: false
volumes:
- - /var/lib/config-data/ironic/etc/:/etc/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ironic/etc/:/etc/:ro
+ - /var/log/containers/ironic:/var/log/ironic
command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf']
step_4:
ironic_api:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/:/etc/:ro
+ - /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/ironic
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable ironic_api service
+ tags: step2
+ service: name=openstack-ironic-api state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Ironic Conductor service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
IronicConductorBase:
type: ../../puppet/services/ironic-conductor.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [IronicConductorBase, role_data, config_settings]
# to avoid hard linking errors we store these on the same
# volume/device as the ironic master_path
+ # https://github.com/docker/docker/issues/7457
- ironic::drivers::pxe::tftp_root: /var/lib/ironic/tftpboot
- ironic::drivers::pxe::tftp_master_path: /var/lib/ironic/tftpboot/master_images
- ironic::pxe::tftp_root: /var/lib/ironic/tftpboot
get_attr: [IronicConductorBase, role_data, step_config]
service_config_settings: {get_attr: [IronicConductorBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &ironic_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
puppet_config:
config_volume: ironic
puppet_tags: ironic_config
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/ironic_conductor.json:
- command: /usr/bin/ironic-conductor
- config_files:
- - dest: /etc/ironic/ironic.conf
- owner: ironic
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
- permissions:
- - path: /var/lib/ironic/httpboot
- owner: ironic:ironic
- recurse: true
- - path: /var/lib/ironic/tftpboot
- owner: ironic:ironic
- recurse: true
+ /var/lib/kolla/config_files/ironic_conductor.json:
+ command: /usr/bin/ironic-conductor
+ permissions:
+ - path: /var/lib/ironic
+ owner: ironic:ironic
+ recurse: true
+ - path: /var/log/ironic
+ owner: ironic:ironic
+ recurse: true
docker_config:
step_4:
- ironic-init-dirs:
- image: *ironic_image
- user: root
- command: ['/bin/bash', '-c', 'mkdir /var/lib/ironic/httpboot && mkdir /var/lib/ironic/tftpboot']
- volumes:
- - ironic:/var/lib/ironic
ironic_conductor:
start_order: 80
- image: *ironic_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicConductorImage} ]
net: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /sys:/sys
- - /dev:/dev
- - /run:/run #shared?
- - ironic:/var/lib/ironic
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ - /lib/modules:/lib/modules:ro
+ - /sys:/sys
+ - /dev:/dev
+ - /run:/run #shared?
+ - /var/lib/ironic:/var/lib/ironic
+ - /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/ironic
+ - /var/lib/ironic
+ - name: stat /httpboot
+ stat: path=/httpboot
+ register: stat_httpboot
+ - name: stat /tftpboot
+ stat: path=/tftpboot
+ register: stat_tftpboot
+ - name: stat /var/lib/ironic/httpboot
+ stat: path=/var/lib/ironic/httpboot
+ register: stat_ironic_httpboot
+ - name: stat /var/lib/ironic/tftpboot
+ stat: path=/var/lib/ironic/tftpboot
+ register: stat_ironic_tftpboot
+ # cannot use 'copy' module as with 'remote_src' it doesn't support recursion
+ - name: migrate /httpboot to containerized (if applicable)
+ command: /bin/cp -R /httpboot /var/lib/ironic/httpboot
+ when: stat_httpboot.stat.exists and not stat_ironic_httpboot.stat.exists
+ - name: migrate /tftpboot to containerized (if applicable)
+ command: /bin/cp -R /tftpboot /var/lib/ironic/tftpboot
+ when: stat_tftpboot.stat.exists and not stat_ironic_tftpboot.stat.exists
+ # Even if there was nothing to copy from original locations,
+ # we need to create the dirs before starting the containers
+ - name: ensure ironic pxe directories exist
+ file:
+ path: /var/lib/ironic/{{ item }}
+ state: directory
+ with_items:
+ - httpboot
+ - tftpboot
+ upgrade_tasks:
+ - name: Stop and disable ironic_conductor service
+ tags: step2
+ service: name=openstack-ironic-conductor state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Ironic PXE service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
outputs:
role_data:
step_config: &step_config ''
service_config_settings: {}
# BEGIN DOCKER SETTINGS
- docker_image: &ironic_pxe_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ]
puppet_config:
config_volume: ironic
puppet_tags: ironic_config
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/ironic_pxe_http.json:
- command: /usr/sbin/httpd -DFOREGROUND
- config_files:
- - dest: /etc/ironic/ironic.conf
- owner: ironic
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
- - dest: /etc/httpd/conf.d/10-ipxe_vhost.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-ipxe_vhost.conf
- - dest: /etc/httpd/conf/httpd.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
- - dest: /etc/httpd/conf/ports.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
- /var/lib/kolla/config_files/ironic_pxe_tftp.json:
- command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
- config_files:
- - dest: /etc/ironic/ironic.conf
- owner: ironic
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/ironic/ironic.conf
- - dest: /var/lib/ironic/tftpboot/chain.c32
- owner: ironic
- perm: '0744'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/chain.c32
- - dest: /var/lib/ironic/tftpboot/pxelinux.0
- owner: ironic
- perm: '0744'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/pxelinux.0
- - dest: /var/lib/ironic/tftpboot/ipxe.efi
- owner: ironic
- perm: '0744'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/ipxe.efi
- - dest: /var/lib/ironic/tftpboot/undionly.kpxe
- owner: ironic
- perm: '0744'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/undionly.kpxe
- - dest: /var/lib/ironic/tftpboot/map-file
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/var/lib/ironic/tftpboot/map-file
+ /var/lib/kolla/config_files/ironic_pxe_http.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ /var/lib/kolla/config_files/ironic_pxe_tftp.json:
+ command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
+ permissions:
+ - path: /var/log/ironic
+ owner: ironic:ironic
+ recurse: true
docker_config:
step_4:
ironic_pxe_tftp:
start_order: 90
- image: *ironic_pxe_image
+ image: &ironic_pxe_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIronicPxeImage} ]
net: host
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /dev/log:/dev/log
- - ironic:/var/lib/ironic/
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ # TODO(mandre) check how docker like mounting in a bind-mounted tree
+ # This directory may contain migrated data from BM
+ - /var/lib/ironic:/var/lib/ironic/
+ # These files were generated by puppet inside the config container
+ # TODO(mandre) check the mount permission (ro/rw)
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/chain.c32:/var/lib/ironic/tftpboot/chain.c32
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/pxelinux.0:/var/lib/ironic/tftpboot/pxelinux.0
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/ipxe.efi:/var/lib/ironic/tftpboot/ipxe.efi
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/undionly.kpxe:/var/lib/ironic/tftpboot/undionly.kpxe
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/map-file:/var/lib/ironic/tftpboot/map-file
+ - /dev/log:/dev/log
+ - /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
ironic_pxe_http:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/ironic/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - ironic:/var/lib/ironic/
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ - /var/lib/config-data/ironic/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/ironic/var/www/:/var/www/:ro
+ - /var/lib/ironic:/var/lib/ironic/
+ - /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/lib/ironic
+ - /var/log/containers/ironic
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Keystone service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
AdminPassword:
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
hidden: true
+ KeystoneTokenProvider:
+ description: The keystone token format
+ type: string
+ default: 'fernet'
+ constraints:
+ - allowed_values: ['uuid', 'fernet']
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
KeystoneBase:
type: ../../puppet/services/keystone.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
- {get_attr: [KeystoneBase, role_data, step_config]}
service_config_settings: {get_attr: [KeystoneBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &keystone_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
puppet_config:
config_volume: keystone
puppet_tags: keystone_config
step_config: *step_config
- config_image: *keystone_image
+ config_image: &keystone_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
kolla_config:
- /var/lib/kolla/config_files/keystone.json:
- command: /usr/sbin/httpd -DFOREGROUND
- config_files:
- - dest: /etc/keystone/keystone.conf
- owner: keystone
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/keystone/keystone.conf
- - dest: /etc/keystone/credential-keys/0
- owner: keystone
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/0
- - dest: /etc/keystone/credential-keys/1
- owner: keystone
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/keystone/credential-keys/1
- - dest: /etc/httpd/conf.d/10-keystone_wsgi_admin.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_admin.conf
- - dest: /etc/httpd/conf.d/10-keystone_wsgi_main.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-keystone_wsgi_main.conf
- - dest: /etc/httpd/conf/httpd.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
- - dest: /etc/httpd/conf/ports.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
- - dest: /var/www/cgi-bin/keystone/keystone-admin
- owner: keystone
- perm: '0644'
- source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-admin
- - dest: /var/www/cgi-bin/keystone/keystone-public
- owner: keystone
- perm: '0644'
- source: /var/lib/kolla/config_files/src/var/www/cgi-bin/keystone/keystone-public
+ /var/lib/kolla/config_files/keystone.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
+ # Kolla_bootstrap/db sync runs before permissions set by kolla_config
step_3:
- keystone-init-log:
+ keystone_init_log:
start_order: 0
image: *keystone_image
user: root
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/keystone && chown keystone:keystone /var/log/keystone']
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
volumes:
- - logs:/var/log
+ - /var/log/containers/keystone:/var/log/keystone
keystone_db_sync:
start_order: 1
image: *keystone_image
privileged: false
detach: false
volumes: &keystone_volumes
- - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/keystone/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/keystone/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - logs:/var/log
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/keystone/var/www/:/var/www/:ro
+ - /var/lib/config-data/keystone/etc/keystone/:/etc/keystone/:ro
+ - /var/lib/config-data/keystone/etc/httpd/:/etc/httpd/:ro
+ - /var/log/containers/keystone:/var/log/keystone
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
config_volume: 'keystone_init_tasks'
puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
step_config: 'include ::tripleo::profile::base::keystone'
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+ config_image: *keystone_image
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/keystone
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable keystone service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [KeystoneBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Memcached services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
MemcachedBase:
type: ../../puppet/services/memcached.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [MemcachedBase, role_data, step_config]
service_config_settings: {get_attr: [MemcachedBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &memcached_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
puppet_config:
config_volume: 'memcached'
puppet_tags: 'file'
step_config: *step_config
- config_image: *memcached_image
+ config_image: &memcached_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
kolla_config: {}
docker_config:
step_1:
+ memcached_init_logs:
+ start_order: 0
+ image: *memcached_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+ - /var/log/memcached.log:/var/log/memcached.log
+ command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; chown ${USER} /var/log/memcached.log']
memcached:
+ start_order: 1
image: *memcached_image
net: host
privileged: false
restart: always
volumes:
- - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+ # TODO(bogdando) capture memcached syslog logs from a container
command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable memcached service
+ tags: step2
+ service: name=memcached state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Mistral API service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
MistralApiBase:
type: ../../puppet/services/mistral-api.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [MistralApiBase, role_data, step_config]
service_config_settings: {get_attr: [MistralApiBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &mistral_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
puppet_config:
config_volume: mistral
puppet_tags: mistral_config
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/mistral_api.json:
- command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
- config_files:
- - dest: /etc/mistral/mistral.conf
- owner: mistral
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ /var/lib/kolla/config_files/mistral_api.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
+ permissions:
+ - path: /var/log/mistral
+ owner: mistral:mistral
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
+ mistral_init_logs:
+ start_order: 0
+ image: &mistral_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/mistral:/var/log/mistral
+ command: ['/bin/bash', '-c', 'chown -R mistral:mistral /var/log/mistral']
mistral_db_sync:
start_order: 1
image: *mistral_image
privileged: false
detach: false
volumes:
- - /var/lib/config-data/mistral/etc/:/etc/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
+ - /var/log/containers/mistral:/var/log/mistral
command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head']
mistral_db_populate:
start_order: 2
privileged: false
detach: false
volumes:
- - /var/lib/config-data/mistral/etc/:/etc/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
+ - /var/log/containers/mistral:/var/log/mistral
# NOTE: dprince this requires that we install openstack-tripleo-common into
# the Mistral API image so that we get tripleo* actions
command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate']
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ - /var/log/containers/mistral:/var/log/mistral
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/mistral
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable mistral_api service
+ tags: step2
+ service: name=openstack-mistral-api state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Mistral Engine service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
MistralBase:
type: ../../puppet/services/mistral-engine.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [MistralBase, role_data, step_config]
service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &mistral_engine_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
puppet_config:
config_volume: mistral
puppet_tags: mistral_config
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/mistral_engine.json:
- command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
- config_files:
- - dest: /etc/mistral/mistral.conf
- owner: mistral
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ /var/lib/kolla/config_files/mistral_engine.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
+ permissions:
+ - path: /var/log/mistral
+ owner: mistral:mistral
+ recurse: true
docker_config:
step_4:
mistral_engine:
- image: *mistral_engine_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralEngineImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /run:/run
+ - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ - /var/log/containers/mistral:/var/log/mistral
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/mistral
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable mistral_engine service
+ tags: step2
+ service: name=openstack-mistral-engine state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Mistral Executor service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
MistralBase:
type: ../../puppet/services/mistral-executor.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [MistralBase, role_data, step_config]
service_config_settings: {get_attr: [MistralBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &mistral_executor_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
puppet_config:
config_volume: mistral
puppet_tags: mistral_config
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerMistralConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/mistral_executor.json:
- command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
- config_files:
- - dest: /etc/mistral/mistral.conf
- owner: mistral
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/mistral/mistral.conf
+ /var/lib/kolla/config_files/mistral_executor.json:
+ command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
+ permissions:
+ - path: /var/log/mistral
+ owner: mistral:mistral
+ recurse: true
docker_config:
step_4:
mistral_executor:
- image: *mistral_executor_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMistralExecutorImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- # FIXME: this is required in order for Nova cells
- # initialization workflows on the Undercloud. Need to
- # exclude this on the overcloud for security reasons.
- - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ - /run:/run
+ # FIXME: this is required in order for Nova cells
+ # initialization workflows on the Undercloud. Need to
+ # exclude this on the overcloud for security reasons.
+ - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+ - /var/log/containers/mistral:/var/log/mistral
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/mistral
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable mistral_executor service
+ tags: step2
+ service: name=openstack-mistral-executor state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Neutron API service
# we configure all neutron services in the same neutron
DockerNeutronConfigImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent:latest'
+ default: 'centos-binary-neutron-server:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NeutronBase:
type: ../../puppet/services/neutron-api.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [NeutronBase, role_data, step_config]
service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &neutron_api_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
puppet_config:
config_volume: neutron
puppet_tags: neutron_config,neutron_api_config
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/neutron_api.json:
- command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
- config_files:
- - dest: /etc/neutron/neutron.conf
- owner: neutron
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- - dest: /etc/neutron/plugin.ini
- owner: neutron
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
+ /var/lib/kolla/config_files/neutron_api.json:
+ command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
+ /var/lib/kolla/config_files/neutron_server_tls_proxy.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
+ neutron_init_logs:
+ start_order: 0
+ image: &neutron_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/neutron:/var/log/neutron
+ command: ['/bin/bash', '-c', 'chown -R neutron:neutron /var/log/neutron']
neutron_db_sync:
+ start_order: 1
image: *neutron_api_image
net: host
privileged: false
# and run as neutron user
user: root
volumes:
- - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
- - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
+ - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
+ - /var/log/containers/neutron:/var/log/neutron
command: ['neutron-db-manage', 'upgrade', 'heads']
step_4:
- neutron_api:
- image: *neutron_api_image
- net: host
- privileged: false
- restart: always
- volumes:
- - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ map_merge:
+ - neutron_api:
+ image: *neutron_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /var/log/containers/neutron:/var/log/neutron
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - if:
+ - internal_tls_enabled
+ - neutron_server_tls_proxy:
+ image: *neutron_api_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron_server_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/httpd/:/etc/httpd/:ro
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable neutron_api service
+ tags: step2
+ service: name=neutron-server state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Neutron DHCP service
description: namespace
default: 'tripleoupstream'
type: string
- DockerNeutronApiImage:
+ DockerNeutronDHCPImage:
description: image
default: 'centos-binary-neutron-dhcp-agent:latest'
type: string
# we configure all neutron services in the same neutron
DockerNeutronConfigImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent:latest'
+ default: 'centos-binary-neutron-server:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NeutronBase:
type: ../../puppet/services/neutron-dhcp.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [NeutronBase, role_data, step_config]
service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &neutron_dhcp_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
puppet_config:
config_volume: neutron
puppet_tags: neutron_config,neutron_dhcp_agent_config
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/neutron_dhcp.json:
- command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
- config_files:
- - dest: /etc/neutron/neutron.conf
- owner: neutron
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- - dest: /etc/neutron/dhcp_agent.ini
- owner: neutron
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/neutron/dhcp_agent.ini
+ /var/lib/kolla/config_files/neutron_dhcp.json:
+ command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
docker_config:
step_4:
neutron_dhcp:
- image: *neutron_dhcp_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronDHCPImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron/:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /etc/hosts:/etc/hosts:ro
- - /lib/modules:/lib/modules:ro
- - /run/:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run/:/run
+ - /var/log/containers/neutron:/var/log/neutron
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable neutron_dhcp service
+ tags: step2
+ service: name=neutron-dhcp-agent state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Neutron L3 agent
# we configure all neutron services in the same neutron
DockerNeutronConfigImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent:latest'
+ default: 'centos-binary-neutron-server:latest'
type: string
ServiceNetMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NeutronL3Base:
type: ../../puppet/services/neutron-l3.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
value:
service_name: {get_attr: [NeutronL3Base, role_data, service_name]}
config_settings: {get_attr: [NeutronL3Base, role_data, config_settings]}
- step_config: {get_attr: [NeutronL3Base, role_data, step_config]}
- docker_image: &neutron_l3_agent_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ]
- puppet_tags: neutron_config,neutron_l3_agent_config
- config_volume: neutron
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ step_config: &step_config
+ get_attr: [NeutronL3Base, role_data, step_config]
+ puppet_config:
+ puppet_tags: neutron_config,neutron_l3_agent_config
+ config_volume: neutron
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron-l3-agent.json:
- command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
- config_files:
- - dest: /etc/neutron/neutron.conf
- owner: neutron
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- - dest: /etc/neutron/l3_agent.ini
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/neutron/l3_agent.ini
+ command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
docker_config:
step_4:
neutronl3agent:
- image: *neutron_l3_agent_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronL3AgentImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/log/containers/neutron:/var/log/neutron
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Neutron Metadata agent
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronMetadataImage:
+ description: image
+ default: 'centos-binary-neutron-metadata-agent:latest'
+ type: string
+ # we configure all neutron services in the same neutron
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-server:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ NeutronMetadataBase:
+ type: ../../puppet/services/neutron-metadata.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for Neutron Metadata agent
+ value:
+ service_name: {get_attr: [NeutronMetadataBase, role_data, service_name]}
+ config_settings: {get_attr: [NeutronMetadataBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NeutronMetadataBase, role_data, step_config]
+ puppet_config:
+ puppet_tags: neutron_config,neutron_metadata_agent_config
+ config_volume: neutron
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/neutron-metadata-agent.json:
+ command: /usr/bin/neutron-metadata-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-metadata-agent
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
+ docker_config:
+ step_4:
+ neutron_metadata_agent:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronMetadataImage} ]
+ net: host
+ pid: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron-metadata-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/log/containers/neutron:/var/log/neutron
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable neutron_metadata service
+ tags: step2
+ service: name=neutron-metadata-agent state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron openvswitch service
description: image
default: 'centos-binary-neutron-openvswitch-agent:latest'
type: string
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-server:latest'
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NeutronOvsAgentBase:
type: ../../puppet/services/neutron-ovs-agent.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]}
step_config: &step_config
get_attr: [NeutronOvsAgentBase, role_data, step_config]
- docker_image: &neutron_ovs_agent_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
puppet_config:
config_volume: neutron
puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
step_config: *step_config
- config_image: *neutron_ovs_agent_image
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron-openvswitch-agent.json:
- command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
- config_files:
- - dest: /etc/neutron/neutron.conf
- owner: neutron
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/neutron/neutron.conf
- - dest: /etc/neutron/plugins/ml2/openvswitch_agent.ini
- owner: neutron
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/openvswitch_agent.ini
- - dest: /etc/neutron/plugins/ml2/ml2_conf.ini
- owner: neutron
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/neutron/plugins/ml2/ml2_conf.ini
+ command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
docker_config:
step_4:
neutronovsagent:
- image: *neutron_ovs_agent_image
+ image: &neutron_ovs_agent_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/log/containers/neutron:/var/log/neutron
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable neutron_ovs_agent service
+ tags: step2
+ service: name=neutron-openvswitch-agent state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Neutron ML2 Plugin configured with Puppet
type: string
DockerNeutronConfigImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent:latest'
+ default: 'centos-binary-neutron-server:latest'
type: string
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [NeutronBase, role_data, step_config]
service_config_settings: {get_attr: [NeutronBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &docker_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
puppet_config:
config_volume: 'neutron'
puppet_tags: ''
step_config: *step_config
- config_image: *docker_image
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config: {}
docker_config: {}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova API service
description: image
default: 'centos-binary-nova-api:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaApiBase:
type: ../../puppet/services/nova-api.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [NovaApiBase, role_data, config_settings]
- apache::default_vhost: false
step_config: &step_config
- get_attr: [NovaApiBase, role_data, step_config]
+ list_join:
+ - "\n"
+ - - "['Nova_cell_v2'].each |String $val| { noop_resource($val) }"
+ - {get_attr: [NovaApiBase, role_data, step_config]}
service_config_settings: {get_attr: [NovaApiBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &nova_api_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
puppet_config:
config_volume: nova
puppet_tags: nova_config
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/nova_api.json:
- command: /usr/bin/nova-api
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ /var/lib/kolla/config_files/nova_api.json:
+ command: /usr/bin/nova-api
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
+ nova_init_logs:
+ start_order: 0
+ image: &nova_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/nova:/var/log/nova
+ command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
nova_api_db_sync:
start_order: 1
image: *nova_api_image
net: host
detach: false
volumes: &nova_api_volumes
- - /var/lib/config-data/nova/etc/:/etc/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /var/log/containers/nova:/var/log/nova
command: ['/usr/bin/nova-manage', 'api_db', 'sync']
# FIXME: we probably want to wait on the 'cell_v2 update' in order for this
# to be capable of upgrading a baremetal setup. This is to ensure the name
user: nova
privileged: true
restart: always
- volumes:
- - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ volumes: *nova_api_volumes
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
nova_api_discover_hosts:
- start_order: 3
+ start_order: 1
image: *nova_api_image
net: host
detach: false
- '/usr/bin/nova-manage'
- 'cell_v2'
- 'discover_hosts'
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova_api service
+ tags: step2
+ service: name=openstack-nova-api state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Compute service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
NovaComputeBase:
type: ../../puppet/services/nova-compute.yaml
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Nova Compute service.
value:
service_name: {get_attr: [NovaComputeBase, role_data, service_name]}
- config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaComputeBase, role_data, config_settings]
+ # FIXME: we need to disable migration for now as the
+ # hieradata is common for all services, and this means nova
+ # and nova_placement puppet runs also try to configure
+ # libvirt, and they fail. We can remove this override when
+ # we have hieradata separation between containers.
+ - tripleo::profile::base::nova::manage_migration: false
step_config: &step_config
get_attr: [NovaComputeBase, role_data, step_config]
- docker_image: &nova_compute_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
puppet_config:
config_volume: nova_libvirt
puppet_tags: nova_config,nova_paste_api_ini
step_config: *step_config
- config_image: *nova_compute_image
+ config_image: &nova_compute_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
kolla_config:
/var/lib/kolla/config_files/nova-compute.json:
- command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
- - dest: /etc/nova/rootwrap.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
+ command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
+ - path: /var/lib/nova
+ owner: nova:nova
+ recurse: true
docker_config:
# FIXME: run discover hosts here
step_4:
user: root
restart: always
volumes:
- - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
- - /dev:/dev
- - /etc/iscsi:/etc/iscsi
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
- - /var/lib/nova:/var/lib/nova
- - libvirtd:/var/lib/libvirt
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt/etc/nova/:/etc/nova/:ro
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/lib/nova:/var/lib/nova
+ - /var/lib/libvirt:/var/lib/libvirt
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/nova
+ - /var/lib/nova
+ - /var/lib/libvirt
+ upgrade_tasks:
+ - name: Stop and disable nova-compute service
+ tags: step2
+ service: name=openstack-nova-compute state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Conductor service
description: image
default: 'centos-binary-nova-conductor:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaConductorBase:
type: ../../puppet/services/nova-conductor.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [NovaConductorBase, role_data, step_config]
service_config_settings: {get_attr: [NovaConductorBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &nova_conductor_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
puppet_config:
config_volume: nova
puppet_tags: nova_config
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/nova_conductor.json:
- command: /usr/bin/nova-conductor
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ /var/lib/kolla/config_files/nova_conductor.json:
+ command: /usr/bin/nova-conductor
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
step_4:
nova_conductor:
- image: *nova_conductor_image
+ image: &nova_conductor_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova_conductor service
+ tags: step2
+ service: name=openstack-nova-conductor state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Ironic Compute service
description: image
default: 'centos-binary-nova-compute-ironic:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
NovaIronicBase:
type: ../../puppet/services/nova-ironic.yaml
properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
config_settings: {get_attr: [NovaIronicBase, role_data, config_settings]}
step_config: &step_config
get_attr: [NovaIronicBase, role_data, step_config]
- docker_image: &nova_ironic_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
puppet_config:
config_volume: nova
puppet_tags: nova_config,nova_paste_api_ini
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_ironic.json:
- command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
- - dest: /etc/nova/rootwrap.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/rootwrap.conf
+ command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
+ - path: /var/lib/nova
+ owner: nova:nova
+ recurse: true
docker_config:
step_5:
novacompute:
- image: *nova_ironic_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
net: host
privileged: true
user: root
restart: always
volumes:
- - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova:/var/lib/kolla/config_files/src:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - /dev:/dev
- - /etc/iscsi:/etc/iscsi
- - nova_compute:/var/lib/nova/
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /run:/run
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/nova/:/var/lib/nova
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/nova
+ - /var/lib/nova
+ upgrade_tasks:
+ - name: Stop and disable nova-compute service
+ tags: step2
+ service: name=openstack-nova-compute state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Libvirt Service
type: string
# we configure libvirt via the nova-compute container due to coupling
# in the puppet modules
- DockerNovaComputeImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-compute:latest'
type: string
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaLibvirtBase:
type: ../../puppet/services/nova-libvirt.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Libvirt service.
value:
service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]}
- config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaLibvirtBase, role_data, config_settings]
+ # FIXME: we need to disable migration for now as the
+ # hieradata is common for all services, and this means nova
+ # and nova_placement puppet runs also try to configure
+ # libvirt, and they fail. We can remove this override when
+ # we have hieradata separation between containers.
+ - tripleo::profile::base::nova::manage_migration: false
step_config: &step_config
get_attr: [NovaLibvirtBase, role_data, step_config]
- docker_image: &libvirt_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
puppet_config:
config_volume: nova_libvirt
puppet_tags: nova_config
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova-libvirt.json:
- command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
- config_files:
- - dest: /etc/libvirt/libvirtd.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/libvirt/libvirtd.conf
+ command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
step_3:
nova_libvirt:
- image: *libvirt_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_libvirt:/var/lib/kolla/config_files/src:ro
- - /dev:/dev
- - /etc/localtime:/etc/localtime:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
- - /sys/fs/cgroup:/sys/fs/cgroup
- - /var/lib/nova:/var/lib/nova
- # Needed to use host's virtlogd
- - /var/run/libvirt:/var/run/libvirt
- - libvirtd:/var/lib/libvirt
- - nova_libvirt_qemu:/etc/libvirt/qemu
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt/etc/libvirt/:/etc/libvirt/:ro
+ - /lib/modules:/lib/modules:ro
+ - /dev:/dev
+ - /run:/run
+ - /sys/fs/cgroup:/sys/fs/cgroup
+ - /var/lib/nova:/var/lib/nova
+ # Needed to use host's virtlogd
+ - /var/run/libvirt:/var/run/libvirt
+ - /var/lib/libvirt:/var/lib/libvirt
+ - /etc/libvirt/qemu:/etc/libvirt/qemu
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create libvirt persistent data directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /etc/libvirt/qemu
+ - /var/lib/libvirt
+ - /var/log/containers/nova
+ upgrade_tasks:
+ - name: Stop and disable libvirtd service
+ tags: step2
+ service: name=libvirtd state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Metadata service
DefaultPasswords:
default: {}
type: json
-
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [NovaMetadataBase, role_data, step_config]
service_config_settings: {get_attr: [NovaMetadataBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: ''
puppet_config:
config_volume: ''
puppet_tags: ''
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Placement API service
type: string
DockerNovaPlacementImage:
description: image
- default: 'centos-binary-nova-placement-api'
+ default: 'centos-binary-nova-placement-api:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaPlacementBase:
type: ../../puppet/services/nova-placement.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [NovaPlacementBase, role_data, step_config]
service_config_settings: {get_attr: [NovaPlacementBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &nova_placement_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
puppet_config:
config_volume: nova_placement
puppet_tags: nova_config
step_config: *step_config
- config_image:
+ config_image: &nova_placement_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
kolla_config:
- /var/lib/kolla/config_files/nova_placement.json:
- command: /usr/sbin/httpd -DFOREGROUND
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
- - dest: /etc/httpd/conf.d/10-placement_wsgi.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf.d/10-placement_wsgi.conf
- - dest: /etc/httpd/conf/httpd.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/httpd.conf
- - dest: /etc/httpd/conf/ports.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/httpd/conf/ports.conf
- - dest: /var/www/cgi-bin/nova/nova-placement-api
- owner: nova
- perm: '0644'
- source: /var/lib/kolla/config_files/src/var/www/cgi-bin/nova/nova-placement-api
+ /var/lib/kolla/config_files/nova_placement.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
# start this early so it is up before computes start reporting
step_3:
user: root
restart: always
volumes:
- - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_placement/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/nova_placement/etc/httpd/conf.modules.d:/etc/httpd/conf.modules.d:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_placement/etc/nova/:/etc/nova/:ro
+ - /var/lib/config-data/nova_placement/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/nova_placement/var/www/:/var/www/:ro
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova_placement service (running under httpd)
+ tags: step2
+ service: name=httpd state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Scheduler service
description: image
default: 'centos-binary-nova-scheduler:latest'
type: string
- DockerNovaBaseImage:
+ DockerNovaConfigImage:
description: image
default: 'centos-binary-nova-base:latest'
type: string
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
NovaSchedulerBase:
type: ../../puppet/services/nova-scheduler.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [NovaSchedulerBase, role_data, step_config]
service_config_settings: {get_attr: [NovaSchedulerBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &nova_scheduler_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ]
puppet_config:
- config_volume: nova
- puppet_tags: nova_config
- step_config: *step_config
- config_image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaBaseImage} ]
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
- /var/lib/kolla/config_files/nova_scheduler.json:
- command: /usr/bin/nova-scheduler
- config_files:
- - dest: /etc/nova/nova.conf
- owner: nova
- perm: '0600'
- source: /var/lib/kolla/config_files/src/etc/nova/nova.conf
+ /var/lib/kolla/config_files/nova_scheduler.json:
+ command: /usr/bin/nova-scheduler
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
step_4:
nova_scheduler:
- image: *nova_scheduler_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaSchedulerImage} ]
net: host
privileged: false
restart: always
volumes:
- - /run:/run
- - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /run:/run
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova_scheduler service
+ tags: step2
+ service: name=openstack-nova-scheduler state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Panko service configured with docker.
+ Note, this service is deprecated in Pike release and
+ will be disabled in future releases.
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerPankoApiImage:
+ description: image
+ default: 'centos-binary-panko-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ PankoApiPuppetBase:
+ type: ../../puppet/services/panko-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Panko API role.
+ value:
+ service_name: {get_attr: [PankoApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [PankoApiPuppetBase, role_data, config_settings]
+ - apache::default_vhost: false
+ step_config: &step_config
+ get_attr: [PankoApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [PankoApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: panko
+ puppet_tags: panko_api_paste_ini,panko_config
+ step_config: *step_config
+ config_image: &panko_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/panko-api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/panko
+ owner: panko:panko
+ recurse: true
+ docker_config:
+ step_3:
+ panko-init-log:
+ start_order: 0
+ image: *panko_image
+ user: root
+ volumes:
+ - /var/log/containers/panko:/var/log/panko
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
+ panko_db_sync:
+ start_order: 1
+ image: *panko_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/panko/etc/panko:/etc/panko:ro
+ - /var/log/containers/panko:/var/log/panko
+ command: /usr/bin/panko-dbsync
+ step_4:
+ panko_api:
+ start_order: 2
+ image: *panko_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/panko-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/panko/etc/panko/:/etc/panko/:ro
+ - /var/lib/config-data/panko/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/panko/var/www/:/var/www/:ro
+ - /var/log/containers/panko:/var/log/panko
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/panko
+ state: directory
+ metadata_settings:
+ get_attr: [PankoApiPuppetBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Rabbitmq service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
RabbitCookie:
type: string
default: ''
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
RabbitmqBase:
type: ../../puppet/services/rabbitmq.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Rabbitmq API role.
value:
service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
- config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]}
+ # RabbitMQ plugins initialization occurs on every node
+ config_settings:
+ map_merge:
+ - {get_attr: [RabbitmqBase, role_data, config_settings]}
+ - rabbitmq::admin_enable: false
step_config: &step_config
- get_attr: [RabbitmqBase, role_data, step_config]
+ list_join:
+ - "\n"
+ - - "['Rabbitmq_policy', 'Rabbitmq_user'].each |String $val| { noop_resource($val) }"
+ - get_attr: [RabbitmqBase, role_data, step_config]
service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &rabbitmq_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
puppet_config:
config_volume: rabbitmq
- puppet_tags: file
step_config: *step_config
- config_image: *rabbitmq_image
+ config_image: &rabbitmq_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
kolla_config:
/var/lib/kolla/config_files/rabbitmq.json:
command: /usr/lib/rabbitmq/bin/rabbitmq-server
- config_files:
- - dest: /etc/rabbitmq/rabbitmq.config
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq.config
- - dest: /etc/rabbitmq/enabled_plugins
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/rabbitmq/enabled_plugins
- - dest: /etc/rabbitmq/rabbitmq-env.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmq-env.conf
- - dest: /etc/rabbitmq/rabbitmqadmin.conf
- owner: root
- perm: '0644'
- source: /var/lib/kolla/config_files/src/etc/rabbitmq/rabbitmqadmin.conf
+ permissions:
+ - path: /var/lib/rabbitmq
+ owner: rabbitmq:rabbitmq
+ recurse: true
docker_config:
+ # Kolla_bootstrap runs before permissions set by kolla_config
step_1:
- rabbitmq_bootstrap:
+ rabbitmq_init_logs:
start_order: 0
image: *rabbitmq_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/rabbitmq:/var/log/rabbitmq
+ command: ['/bin/bash', '-c', 'chown -R rabbitmq:rabbitmq /var/log/rabbitmq']
+ rabbitmq_bootstrap:
+ start_order: 1
+ image: *rabbitmq_image
net: host
privileged: false
volumes:
- - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - rabbitmq:/var/lib/rabbitmq/
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
+ - /var/log/containers/rabbitmq:/var/log/rabbitmq
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
- -
+ -
list_join:
- '='
- - 'RABBITMQ_CLUSTER_COOKIE'
- {get_param: RabbitCookie}
- {get_param: [DefaultPasswords, rabbit_cookie]}
rabbitmq:
- start_order: 1
+ start_order: 2
image: *rabbitmq_image
net: host
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/rabbitmq/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - rabbitmq:/var/lib/rabbitmq/
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
+ - /var/log/containers/rabbitmq:/var/log/rabbitmq
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ docker_puppet_tasks:
+ # RabbitMQ users and policies initialization occurs only on single node
+ step_1:
+ config_volume: 'rabbit_init_tasks'
+ puppet_tags: 'rabbitmq_policy,rabbitmq_user'
+ step_config: 'include ::tripleo::profile::base::rabbitmq'
+ config_image: *rabbitmq_image
+ volumes:
+ - /var/lib/config-data/rabbitmq/etc/:/etc/
+ - /var/lib/rabbitmq:/var/lib/rabbitmq:ro
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/rabbitmq
+ - /var/lib/rabbitmq
+ upgrade_tasks:
+ - name: Stop and disable rabbitmq service
+ tags: step2
+ service: name=rabbitmq-server state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
Utility stack to convert an array of services into a set of combined
description: Mapping of service -> default password. Used to help
pass top level passwords managed by Heat into services.
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ServiceChain:
type: OS::Heat::ResourceChain
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
{get_attr: [PuppetServices, role_data, global_config_settings]}
step_config:
{get_attr: [ServiceChain, role_data, step_config]}
- docker_image: {get_attr: [ServiceChain, role_data, docker_image]}
puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
kolla_config:
map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
{get_attr: [ServiceChain, role_data, docker_config]}
docker_puppet_tasks:
{get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+ host_prep_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks
+ expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ upgrade_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ upgrade_batch_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
+ expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
+ service_metadata_settings:
+ get_attr: [PuppetServices, role_data, service_metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized swift proxy service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
SwiftProxyBase:
type: ../../puppet/services/swift-proxy.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [SwiftProxyBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftProxyBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &swift_proxy_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
puppet_config:
config_volume: swift
puppet_tags: swift_proxy_config
step_config: *step_config
- config_image: *swift_proxy_image
+ config_image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
kolla_config:
/var/lib/kolla/config_files/swift_proxy.json:
command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
+ permissions:
+ - path: /var/log/swift
+ owner: swift:swift
+ recurse: true
+ /var/lib/kolla/config_files/swift_proxy_tls_proxy.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
step_4:
- swift_proxy:
- image: *swift_proxy_image
- net: host
- user: swift
- restart: always
- # I'm mounting /etc/swift as rw. Are the rings written to at all during runtime?
- volumes:
- - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ map_merge:
+ - swift_proxy:
+ image: *swift_proxy_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ # FIXME I'm mounting /etc/swift as rw. Are the rings written to
+ # at all during runtime?
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - if:
+ - internal_tls_enabled
+ - swift_proxy_tls_proxy:
+ image: *swift_proxy_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_proxy_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/httpd/:/etc/httpd/:ro
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/swift
+ - /srv/node
+ upgrade_tasks:
+ - name: Stop and disable swift_proxy service
+ tags: step2
+ service: name=openstack-swift-proxy state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Ringbuilder
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [SwiftRingbuilderBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &docker_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
puppet_config:
config_volume: 'swift'
puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
step_config: *step_config
- config_image: *docker_image
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
kolla_config: {}
docker_config: {}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Swift Storage services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
SwiftStorageBase:
type: ../../puppet/services/swift-storage.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [SwiftStorageBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &swift_proxy_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
puppet_config:
config_volume: swift
puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
step_config: *step_config
- config_image: *swift_proxy_image
+ config_image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
kolla_config:
/var/lib/kolla/config_files/swift_account_auditor.json:
command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf
command: /usr/bin/swift-object-updater /etc/swift/object-server.conf
/var/lib/kolla/config_files/swift_object_server.json:
command: /usr/bin/swift-object-server /etc/swift/object-server.conf
+ permissions:
+ - path: /var/log/swift
+ owner: swift:swift
+ recurse: true
docker_config:
step_3:
# The puppet config sets this up but we don't have a way to mount the named
# volume during the configuration stage. We just need to create this
# directory and make sure it's owned by swift.
swift_setup_srv:
- image:
+ image: &swift_account_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
user: root
- command: ['/bin/bash', '-c', 'mkdir /srv/node && chown swift:swift /srv/node']
+ command: ['chown', '-R', 'swift:', '/srv/node']
volumes:
- - swift-srv:/srv
+ - /srv/node:/srv/node
step_4:
swift_account_auditor:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: &kolla_env
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
swift_account_reaper:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_account_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_account_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftAccountImage} ]
+ image: *swift_account_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_container_auditor:
- image:
+ image: &swift_container_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_container_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_container_updater:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_container_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftContainerImage} ]
+ image: *swift_container_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_auditor:
- image:
+ image: &swift_object_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_expirer:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ image: *swift_proxy_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_replicator:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_updater:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_server:
- image:
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftObjectImage} ]
+ image: *swift_object_image
net: host
user: swift
restart: always
volumes:
- - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/:/var/lib/kolla/config_files/src:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /run:/run
- - swift-srv:/srv
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/swift
+ - /srv/node
+ upgrade_tasks:
+ - name: Stop and disable swift storage services
+ tags: step2
+ service: name={{ item }} state=stopped enabled=no
+ with_items:
+ - openstack-swift-account-auditor
+ - openstack-swift-account-reaper
+ - openstack-swift-account-replicator
+ - openstack-swift-account
+ - openstack-swift-container-auditor
+ - openstack-swift-container-replicator
+ - openstack-swift-container-updater
+ - openstack-swift-container
+ - openstack-swift-object-auditor
+ - openstack-swift-object-replicator
+ - openstack-swift-object-updater
+ - openstack-swift-object
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Zaqar services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
ZaqarBase:
type: ../../puppet/services/zaqar.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
get_attr: [ZaqarBase, role_data, step_config]
service_config_settings: {get_attr: [ZaqarBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
- docker_image: &zaqar_image
- list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
puppet_config:
config_volume: zaqar
puppet_tags: zaqar_config
step_config: *step_config
- config_image: *zaqar_image
+ config_image: &zaqar_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
kolla_config:
/var/lib/kolla/config_files/zaqar.json:
- command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf
- config_files:
- - dest: /etc/zaqar/zaqar.conf
- owner: zaqar
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
+ command: /usr/sbin/httpd -DFOREGROUND
/var/lib/kolla/config_files/zaqar_websocket.json:
command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf
- config_files:
- - dest: /etc/zaqar/zaqar.conf
- owner: zaqar
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/zaqar/zaqar.conf
- - dest: /etc/zaqar/1.conf
- owner: zaqar
- perm: '0640'
- source: /var/lib/kolla/config_files/src/etc/zaqar/1.conf
+ permissions:
+ - path: /var/log/zaqar
+ owner: zaqar:zaqar
+ recurse: true
docker_config:
step_4:
zaqar:
net: host
privileged: false
restart: always
+ # NOTE(mandre) kolla image changes the user to 'zaqar', we need it
+ # to be root to run httpd
+ user: root
volumes:
- - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
+ - /var/lib/config-data/zaqar/var/www/:/var/www/:ro
+ - /var/lib/config-data/zaqar/etc/httpd/:/etc/httpd/:ro
+ - /var/log/containers/zaqar:/var/log/zaqar
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
zaqar_websocket:
privileged: false
restart: always
volumes:
- - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/zaqar/:/var/lib/kolla/config_files/src:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
+ - /var/lib/config-data/zaqar/var/www/:/var/www/:ro
+ - /var/lib/config-data/zaqar/etc/httpd/:/etc/httpd/:ro
+ - /var/log/containers/zaqar:/var/log/zaqar
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/zaqar
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable zaqar service
+ tags: step2
+ service: name=httpd state=stopped enabled=no
--- /dev/null
+parameter_defaults:
+ KeystoneNotificationFormat: cadf
# A Heat environment file which can be used to enable a
# a Cinder NetApp backend, configured via puppet
resource_registry:
- OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+ OS::TripleO::Services::CinderBackendNetApp: ../puppet/services/cinder-backend-netapp.yaml
parameter_defaults:
CinderEnableNetappBackend: true
--- /dev/null
+# A Heat environment file which can be used to enable a
+# Cinder Pure Storage FlashArray iSCSI backend, configured via puppet
+resource_registry:
+ OS::TripleO::Services::CinderBackendPure: ../puppet/services/cinder-backend-pure.yaml
+
+parameter_defaults:
+ CinderEnablePureBackend: true
+ CinderPureBackendName: 'tripleo_pure'
+ CinderPureStorageProtocol: 'iSCSI'
+ CinderPureSanIp: ''
+ CinderPureAPIToken: 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
+ CinderPureUseChap: false
+ CinderPureMultipathXfer: true
# parameter_defaults:
#
-## You can specify additional plugins to load using the
-## CollectdExtraPlugins key:
+## Collectd server configuration
+# CollectdServer: collectd0.example.com
+#
+################
+#### Other config parameters, the values shown here are the defaults
+################
+#
+# CollectdServerPort: 25826
+# CollectdSecurityLevel: None
+#
+################
+#### If CollectdSecurityLevel is set to Encrypt or Sign
+#### the following parameters are also needed
+###############
+#
+# CollectdUsername: user
+# CollectdPassword: password
+#
+## CollectdDefaultPlugins, These are the default plugins used by collectd
+#
+# CollectdDefaultPlugins:
+# - disk
+# - interface
+# - load
+# - memory
+# - processes
+# - tcpconns
+#
+## Extra plugins can be enabled by the CollectdExtraPlugins parameter:
+## All the plugins availables are:
#
# CollectdExtraPlugins:
# - disk
parameter_defaults:
ControlPlaneSubnetCidr: '24'
- ControlPlaneDefaultRoute: 192.0.2.254
+ ControlPlaneDefaultRoute: 192.168.24.254
InternalApiNetCidr: 10.0.0.0/24
InternalApiAllocationPools: [{'start': '10.0.0.10', 'end': '10.0.0.200'}]
InternalApiDefaultRoute: 10.0.0.1
ManagementInterfaceDefaultRoute: 10.1.0.1
ExternalNetCidr: 10.2.0.0/24
ExternalAllocationPools: [{'start': '10.2.0.10', 'end': '10.2.0.200'}]
- EC2MetadataIp: 192.0.2.1 # Generally the IP of the Undercloud
+ EC2MetadataIp: 192.168.24.1 # Generally the IP of the Undercloud
DnsServers: ["8.8.8.8","8.8.4.4"]
VrouterPhysicalInterface: eth1
VrouterGateway: 10.0.0.1
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces
CountDefault: 1
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
- name: BlockStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- name: ContrailController
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailConfig
- OS::TripleO::Services::ContrailControl
- OS::TripleO::Services::ContrailDatabase
- name: ContrailAnalytics
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailAnalytics
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- name: ContrailAnalyticsDatabase
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailAnalyticsDatabase
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- name: ContrailTsn
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::ContrailTsn
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
--- /dev/null
+resource_registry:
+ OS::TripleO::Server: ../deployed-server/deployed-server.yaml
+ OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
+ OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
+
+{% for role in roles %}
+ # Default nic config mappings
+ OS::TripleO::{{role.name}}::Net::SoftwareConfig: ../net-config-static.yaml
+{% endfor %}
+
+ OS::TripleO::ControllerDeployedServer::Net::SoftwareConfig: ../net-config-static-bridge.yaml
+++ /dev/null
-resource_registry:
- OS::TripleO::Server: ../deployed-server/deployed-server.yaml
- OS::TripleO::DeployedServer::ControlPlanePort: OS::Neutron::Port
- OS::TripleO::DeployedServer::Bootstrap: OS::Heat::None
resource_registry:
- OS::TripleO::Tasks::ControllerDeployedServerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerDeployedServerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerDeployedServerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
--- /dev/null
+# This environment contains the services that can work with TLS-everywhere.
+resource_registry:
+ # This can be used when you don't want to run puppet on the host,
+ # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
+ # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+ # The compute node still needs extra initialization steps
+ OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+
+ # NOTE: add roles to be docker enabled as we support them.
+ OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml
+ OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml
+ OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
+ OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+ OS::TripleO::Services::GlanceApi: ../docker/services/glance-api.yaml
+ OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml
+ OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml
+ OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::HeatApi: ../docker/services/heat-api.yaml
+ OS::TripleO::Services::HeatApiCfn: ../docker/services/heat-api-cfn.yaml
+ OS::TripleO::Services::HeatEngine: ../docker/services/heat-engine.yaml
+ OS::TripleO::Services::Keystone: ../docker/services/keystone.yaml
+ OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
+ OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
+ OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
+ OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
+ OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+ OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
+ OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
+ OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
+ OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+ OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
+
+ OS::TripleO::PostDeploySteps: ../docker/post.yaml
+ OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
+
+ OS::TripleO::Services: ../docker/services/services.yaml
+
+parameter_defaults:
+ # Defaults to 'tripleoupstream'. Specify a local docker registry
+ # Example: 192.168.24.1:8787/tripleoupstream
+ DockerNamespace: tripleoupstream
+ DockerNamespaceIsRegistry: false
+
+ ComputeServices:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::Docker
resource_registry:
- OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ # This can be used when you don't want to run puppet on the host,
+ # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
+ # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+ # The compute node still needs extra initialization steps
+ OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
#NOTE (dprince) add roles to be docker enabled as we support them
OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
- # FIXME: these need to go into a environments/services-docker dir?
- OS::TripleO::Services::NovaIronic: ../docker/services/nova-ironic.yaml
- OS::TripleO::Services::IronicApi: ../docker/services/ironic-api.yaml
- OS::TripleO::Services::IronicConductor: ../docker/services/ironic-conductor.yaml
- OS::TripleO::Services::IronicPxe: ../docker/services/ironic-pxe.yaml
OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
+ OS::TripleO::Services::NeutronMetadataAgent: ../docker/services/neutron-metadata.yaml
OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
- OS::TripleO::Services::MistralApi: ../docker/services/mistral-api.yaml
- OS::TripleO::Services::MistralEngine: ../docker/services/mistral-engine.yaml
- OS::TripleO::Services::MistralExecutor: ../docker/services/mistral-executor.yaml
- OS::TripleO::Services::Zaqar: ../docker/services/zaqar.yaml
OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
+ OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml
OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+ OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml
+ OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml
+ OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml
+ OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml
+ OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
+ OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
+ OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
+ OS::TripleO::Services::CeilometerAgentCentral: ../docker/services/ceilometer-agent-central.yaml
+ OS::TripleO::Services::CeilometerAgentCompute: ../docker/services/ceilometer-agent-compute.yaml
+ OS::TripleO::Services::CeilometerAgentNotification: ../docker/services/ceilometer-agent-notification.yaml
OS::TripleO::PostDeploySteps: ../docker/post.yaml
+ OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
+
OS::TripleO::Services: ../docker/services/services.yaml
parameter_defaults:
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::Docker
+ - OS::TripleO::Services::CeilometerAgentCompute
# a TLS for in the internal network via certmonger
parameter_defaults:
EnableInternalTLS: true
+ RabbitClientUseSSL: true
# Required for novajoin to enroll the overcloud nodes
ServerMetadata:
ipa_enroll: True
resource_registry:
+ OS::TripleO::Services::CertmongerUser: ../puppet/services/certmonger-user.yaml
+
OS::TripleO::Services::HAProxyInternalTLS: ../puppet/services/haproxy-internal-tls-certmonger.yaml
- OS::TripleO::Services::ApacheTLS: ../puppet/services/apache-internal-tls-certmonger.yaml
- OS::TripleO::Services::MySQLTLS: ../puppet/services/database/mysql-internal-tls-certmonger.yaml
+
# We use apache as a TLS proxy
OS::TripleO::Services::TLSProxyBase: ../puppet/services/apache.yaml
resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip_v6.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool_v6.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool_v6.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool_v6.yaml
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ ControlFixedIPs: [{'ip_address':'192.168.24.251'}]
PublicVirtualFixedIPs: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:0005'}]
resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
# to control your VIPs (currently one per network)
# NOTE: we will eventually move to one VIP per service
#
- ControlFixedIPs: [{'ip_address':'192.0.2.251'}]
+ ControlFixedIPs: [{'ip_address':'192.168.24.251'}]
PublicVirtualFixedIPs: [{'ip_address':'10.0.0.251'}]
InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.251'}]
StorageVirtualFixedIPs: [{'ip_address':'172.16.1.251'}]
--- /dev/null
+# This template allows the IPs to be preselected for each VIP. Note that
+# this template should be included after other templates which affect the
+# network such as network-isolation.yaml.
+
+resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+parameter_defaults:
+ # Set the IP addresses of the VIPs here.
+ # NOTE: we will eventually move to one VIP per service
+ #
+ ControlFixedIPs: [{'ip_address':'192.168.24.240'}]
+ PublicVirtualFixedIps: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
+ StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:000'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:4000:0000:0000:0000:0005'}]
+ RedisVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0006'}]
--- /dev/null
+# This template allows the IPs to be preselected for each VIP. Note that
+# this template should be included after other templates which affect the
+# network such as network-isolation.yaml.
+
+resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+parameter_defaults:
+ # Set the IP addresses of the VIPs here.
+ # NOTE: we will eventually move to one VIP per service
+ #
+ ControlFixedIPs: [{'ip_address':'192.168.24.240'}]
+ PublicVirtualFixedIps: [{'ip_address':'10.0.0.240'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.240'}]
+ StorageVirtualFixedIPs: [{'ip_address':'172.16.1.240'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'172.16.3.240'}]
+ RedisVirtualFixedIPs: [{'ip_address':'172.16.2.241'}]
parameter_defaults:
ComputeServices:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
## (note the use of port 24284 for ssl connections)
#
# LoggingServers:
-# - host: 192.0.2.11
+# - host: 192.168.24.11
# port: 24284
# LoggingUsesSSL: true
# LoggingSharedKey: secret
+++ /dev/null
-resource_registry:
- OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
+++ /dev/null
-resource_registry:
- # aodh data migration
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
-
- # no-op the rest
- OS::TripleO::PostDeploySteps: OS::Heat::None
+++ /dev/null
-resource_registry:
-
- # This initiates the upgrades for ceilometer api to run under apache wsgi
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
-
- # no-op the rest
- OS::TripleO::PostDeploySteps: OS::Heat::None
--- /dev/null
+resource_registry:
+ # FIXME(shardy) do we need to break major_upgrade_steps.yaml apart to
+ # enable docker specific logic, or is just overridding PostUpgradeSteps
+ # enough (as we want to share the ansible tasks steps etc)
+ OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
+parameter_defaults:
+ EnableConfigPurge: false
+ StackUpdateType: UPGRADE
+ UpgradeLevelNovaCompute: auto
+ UpgradeInitCommonCommand: |
+ #!/bin/bash
+ # Ocata to Pike, put any needed host-level workarounds here
resource_registry:
OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
parameter_defaults:
+ EnableConfigPurge: true
+ StackUpdateType: UPGRADE
UpgradeLevelNovaCompute: auto
UpgradeInitCommonCommand: |
#!/bin/bash
# Newton to Ocata, we need to remove old hiera hook data and
# install ansible heat agents and ansible-pacemaker
set -eu
- yum install -y openstack-heat-agents
yum install -y python-heat-agent-*
yum install -y ansible-pacemaker
rm -f /usr/libexec/os-apply-config/templates/etc/puppet/hiera.yaml
--- /dev/null
+# Use this to reset any mappings only used for upgrades after the
+# update of all nodes is completed
+resource_registry:
+ OS::TripleO::PostDeploySteps: ../docker/post.yaml
+parameter_defaults:
+ EnableConfigPurge: false
+ StackUpdateType: ''
+ UpgradeLevelNovaCompute: ''
+ UpgradeInitCommonCommand: ''
+ UpgradeInitCommand: ''
resource_registry:
OS::TripleO::PostDeploySteps: ../puppet/post.yaml
parameter_defaults:
+ EnableConfigPurge: false
+ StackUpdateType: ''
UpgradeLevelNovaCompute: ''
UpgradeInitCommonCommand: ''
+ UpgradeInitCommand: ''
+++ /dev/null
-parameter_defaults:
- UpgradeLevelNovaCompute: ''
-
-resource_registry:
- OS::TripleO::Services::SaharaApi: ../puppet/services/sahara-api.yaml
- OS::TripleO::Services::SaharaEngine: ../puppet/services/sahara-engine.yaml
+++ /dev/null
-parameter_defaults:
- UpgradeLevelNovaCompute: mitaka
-
-resource_registry:
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
- OS::TripleO::PostDeploySteps: OS::Heat::None
+++ /dev/null
-parameter_defaults:
- UpgradeLevelNovaCompute: mitaka
-
-resource_registry:
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml
- OS::TripleO::PostDeploySteps: OS::Heat::None
+++ /dev/null
-parameter_defaults:
- KeepSaharaServicesOnUpgrade: false
-resource_registry:
- OS::TripleO::Services::SaharaApi: OS::Heat::None
- OS::TripleO::Services::SaharaEngine: OS::Heat::None
-
ManilaCephFSNativeCephFSConfPath: '/etc/ceph/ceph.conf'
ManilaCephFSNativeCephFSAuthId: 'manila'
ManilaCephFSNativeCephFSClusterName: 'ceph'
- ManilaCephFSNativeCephFSEnableSnapshots: true
+ ManilaCephFSNativeCephFSEnableSnapshots: false
# CIDR subnet mask length for provisioning network
ControlPlaneSubnetCidr: '24'
# Gateway router for the provisioning network (or Undercloud IP)
- ControlPlaneDefaultRoute: 192.0.2.254
- EC2MetadataIp: 192.0.2.1 # Generally the IP of the Undercloud
+ ControlPlaneDefaultRoute: 192.168.24.254
+ EC2MetadataIp: 192.168.24.1 # Generally the IP of the Undercloud
# Customize the IP subnets to match the local environment
InternalApiNetCidr: 172.17.0.0/24
StorageNetCidr: 172.18.0.0/24
--- /dev/null
+# A Heat environment file that can be used to deploy Neutron BGPVPN service
+#
+# Currently there are four types of service provider for Neutron BGPVPN
+# The default option is a dummy driver that allows to enable the API.
+# In order to enable other backend, replace the content of BgpvpnServiceProvider
+#
+# - Bagpipe: BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe.BaGPipeBGPVPNDriver:default
+# - OpenContrail: BGPVPN:OpenContrail:networking_bgpvpn.neutron.services.service_drivers.opencontrail.opencontrail.OpenContrailBGPVPNDriver:default
+# - OpenDaylight: BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default
+# - Nuage: BGPVPN:Nuage:nuage_neutron.bgpvpn.services.service_drivers.driver.NuageBGPVPNDriver:default
+resource_registry:
+ OS::TripleO::Services::NeutronBgpVpnApi: ../puppet/services/neutron-bgpvpn-api.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+ BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
--- /dev/null
+# A Heat environment file that can be used to deploy Neutron L2 Gateway service
+#
+# Currently there are only two service provider for Neutron L2 Gateway
+# The default option is a dummy driver that allows to enable the API.
+# In order to enable other backend, replace the content of L2gwServiceProvider
+#
+# - L2 gateway agent: L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.rpc_l2gw.L2gwRpcDriver:default
+# - OpenDaylight: L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
+resource_registry:
+ OS::TripleO::Services::NeutronL2gwApi: ../puppet/services/neutron-l2gw-api.yaml
+ OS::TripleO::Services::NeutronL2gwAgent: ../puppet/services/neutron-l2gw-agent.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: "networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
+ L2gwServiceProvider: ['L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default']
+
+ # Optional
+ # L2gwServiceDefaultInterfaceName: "FortyGigE1/0/1"
+ # L2gwServiceDefaultDeviceName: "Switch1"
+ # L2gwServiceQuotaL2Gateway: 10
+ # L2gwServicePeriodicMonitoringInterval: 5
+ # L2gwAgentOvsdbHosts: ["ovsdb1:127.0.0.1:6632"]
+ # L2gwAgentEnableManager: False
+ # L2gwAgentManagerTableListeningPort: "6633"
+ # L2gwAgentPeriodicInterval: 20
+ # L2gwAgentMaxConnectionRetries: 10
+ # L2gwAgentSocketTimeout: 30
resource_registry:
OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-bigswitch.yaml
OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/compute/neutron-ml2-bigswitch.yaml
+ OS::TripleO::NeutronBigswitchAgent: ../puppet/services/neutron-bigswitch-agent.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
parameter_defaults:
# Required to fill in:
NeutronBigswitchRestproxyServers:
NeutronBigswitchRestproxyServerAuth:
- NeutronMechanismDrivers: bsn_ml2
+ NeutronMechanismDrivers: openvswitch,bsn_ml2
+ NeutronServicePlugins: bsn_l3,bsn_service_plugin
+ KeystoneNotificationDriver: messaging
# Optional:
# NeutronBigswitchRestproxyAutoSyncOnFailure:
# NeutronBigswitchAgentEnabled:
# NeutronBigswitchLLDPEnabled:
+ ControllerExtraConfig:
+ neutron::agents::l3::enabled: false
+ neutron::agents::dhcp::enable_force_metadata: true
+ neutron::agents::dhcp::enable_isolated_metadata: true
+ neutron::agents::dhcp::enable_metadata_network: false
+ neutron::server::l3_ha: false
OS::TripleO::ComputeExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/neutron-ml2-cisco-n1kv.yaml
parameter_defaults:
- N1000vVSMIP: '192.0.2.50'
- N1000vMgmtGatewayIP: '192.0.2.1'
+ N1000vVSMIP: '192.168.24.50'
+ N1000vMgmtGatewayIP: '192.168.24.1'
N1000vVSMDomainID: '100'
N1000vVSMHostMgmtIntf: 'br-ex'
# a Cisco Neutron plugin.
resource_registry:
OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
parameter_defaults:
NetworkUCSMIp: '127.0.0.1'
--- /dev/null
+# Environment file used to enable networking-vpp ML2 mechanism driver
+
+resource_registry:
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronVppAgent: ../puppet/services/neutron-vpp-agent.yaml
+ OS::TripleO::Services::Etcd: ../puppet/services/etcd.yaml
+ OS::TripleO::Services::Vpp: ../puppet/services/vpp.yaml
+
+parameter_defaults:
+ #Comma delimited list of <physical_network>:<VPP Interface>.
+ #Example: "datacentre:GigabitEthernet2/2/0"
+ #NeutronVPPAgentPhysnets: ""
+
+ NeutronMechanismDrivers: vpp
+ NeutronNetworkType: vlan
+ NeutronServicePlugins: router
+ NeutronTypeDrivers: vlan,flat
+ ExtraConfig:
+ # Use Linux Bridge driver for DHCP and L3 agent.
+ neutron::agents::dhcp::interface_driver: "neutron.agent.linux.interface.BridgeInterfaceDriver"
+ neutron::agents::l3::interface_driver: "neutron.agent.linux.interface.BridgeInterfaceDriver"
--- /dev/null
+# A Heat environment that can be used to deploy NSX Services
+# extensions, configured via puppet
+resource_registry:
+ # NSX doesn't require dhcp, l3, metadata, and ovs agents
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ # Override the Neutron core plugin to use NSX
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginNSX
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+
+parameter_defaults:
+ NeutronCorePlugin: vmware_nsx.plugin.NsxV3Plugin
OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-nuage.yaml
parameter_defaults:
- NeutronNuageOSControllerIp: '0.0.0.0'
NeutronNuageNetPartitionName: 'default_name'
NeutronNuageVSDIp: '0.0.0.0:0'
NeutronNuageVSDUsername: 'username'
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: ../puppet/services/neutron-plugin-ml2-odl.yaml
OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
--- /dev/null
+# A Heat environment file which can be used to configure access policies for
+# Nova API resources. It is here for example and doesn't cover all services
+# but just Nova here.
+# While recipes for editing policy.json files is supported, modifying the
+# policy can have unexpected side effects and is not encouraged.
+
+parameter_defaults:
+ # The target is "compute:get_all", the "list all instances" API of the Compute service.
+ # The rule is an empty string meaning "always". This policy allows anybody to list instances.
+ NovaApiPolicies: { nova-context_is_admin: { key: 'compute:get_all', value: '' } }
# An environment which enables configuration of an
# Overcloud controller with Pacemaker.
resource_registry:
- OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
# custom pacemaker services
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::Securetty: ../puppet/services/securetty.yaml
+
+parameter_defaults:
+ TtyValues:
+ - console
+ - tty1
+ - tty2
+ - tty3
+ - tty4
+ - tty5
+ - tty6
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::Etcd: ../../docker/services/etcd.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::IronicApi: ../../docker/services/ironic-api.yaml
+ OS::TripleO::Services::IronicConductor: ../../docker/services/ironic-conductor.yaml
+ OS::TripleO::Services::IronicPxe: ../../docker/services/ironic-pxe.yaml
+ OS::TripleO::Services::NovaIronic: ../../docker/services/nova-ironic.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+ OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::UndercloudAodhApi: ../../docker/services/aodh-api.yaml
+ OS::TripleO::Services::UndercloudAodhEvaluator: ../../docker/services/aodh-evaluator.yaml
+ OS::TripleO::Services::UndercloudAodhNotifier: ../../docker/services/aodh-notifier.yaml
+ OS::TripleO::Services::UndercloudAodhListener: ../../docker/services/aodh-listener.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::UndercloudCeilometerAgentCentral: ../../docker/services/ceilometer-agent-central.yaml
+ OS::TripleO::Services::UndercloudCeilometerAgentNotification: ../../docker/services/ceilometer-agent-notification.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::UndercloudGnocchiApi: ../../docker/services/gnocchi-api.yaml
+ OS::TripleO::Services::UndercloudGnocchiMetricd: ../../docker/services/gnocchi-metricd.yaml
+ OS::TripleO::Services::UndercloudGnocchiStatsd: ../../docker/services/gnocchi-statsd.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::UndercloudPankoApi: ../../docker/services/panko-api.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::CeilometerApi: ../../puppet/services/ceilometer-api.yaml
+
+parameter_defaults:
+ CeilometerApiEndpoint: true
+
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::CeilometerCollector: ../../puppet/services/ceilometer-collector.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::CeilometerExpirer: ../../puppet/services/ceilometer-expirer.yaml
+++ /dev/null
-resource_registry:
- OS::TripleO::Services::CeilometerApi: OS::Heat::None
--- /dev/null
+# This is an example template on how to configure keystone domain specific LDAP
+# backends. This will configure a domain called tripleoldap will the attributes
+# specified.
+parameter_defaults:
+ KeystoneLDAPDomainEnable: true
+ KeystoneLDAPBackendConfigs:
+ tripleoldap:
+ url: ldap://192.168.24.251
+ user: cn=openstack,ou=Users,dc=tripleo,dc=example,dc=com
+ password: Secrete
+ suffix: dc=tripleo,dc=example,dc=com
+ user_tree_dn: ou=Users,dc=tripleo,dc=example,dc=com
+ user_filter: "(memberOf=cn=OSuser,ou=Groups,dc=tripleo,dc=example,dc=com)"
+ user_objectclass: person
+ user_id_attribute: cn
+ user_allow_create: false
+ user_allow_update: false
+ user_allow_delete: false
+++ /dev/null
-resource_registry:
- OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::Qdr: ../../puppet/services/qdr.yaml
-resource_registry:
- OS::TripleO::Services::Sshd: ../puppet/services/sshd.yaml
-
parameter_defaults:
BannerText: |
******************************************************************
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
+ MessageOfTheDay: |
+ ALERT! You are entering into a secured area!
+ This service is restricted to authorized users only.
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::ExternalSwiftProxy: ../puppet/services/external-swift-proxy.yaml
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+
+parameter_defaults:
+ ExternalPublicUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalInternalUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalAdminUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalSwiftUserTenant: 'service'
+
NeutronBridgeMappings: ctlplane:br-ctlplane
NeutronAgentExtensions: []
NeutronFlatNetworks: '*'
+ NeutronDnsDomain: ''
NovaSchedulerAvailableFilters: 'tripleo_common.filters.list.tripleo_filters'
NovaSchedulerDefaultFilters: ['RetryFilter', 'TripleOCapabilitiesFilter', 'ComputeCapabilitiesFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
NeutronDhcpAgentsPerNetwork: 2
--- /dev/null
+parameter_defaults:
+ ControlPlaneDefaultRoute: 192.0.2.1
+ EC2MetadataIp: 192.0.2.1
-heat_template_version: ocata
+heat_template_version: pike
description: >
Example extra config for cluster config
-heat_template_version: ocata
+heat_template_version: pike
description: >
Example extra config for cluster config
-heat_template_version: ocata
+heat_template_version: pike
description: Template file to add a swap partition to a node.
-heat_template_version: ocata
+heat_template_version: pike
description: Template file to add a swap file to a node.
-heat_template_version: ocata
+heat_template_version: pike
description: 'Generates the relevant service principals for a server'
parameters:
# Filter null values and values that contain don't contain
# 'metadata_settings', get the values from that key and get the
# unique ones.
- expression: list($.data.where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
+ expression: list(coalesce($.data, []).where($ != null).where($.containsKey('metadata_settings')).metadata_settings.flatten().distinct())
data: {get_param: RoleData}
# Generates entries for nova metadata with the following format:
properties:
value:
yaql:
- expression: let(fqdns => $.data.fqdns) -> dict($.data.metadata.where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
+ expression: let(fqdns => $.data.fqdns) -> dict(coalesce($.data.metadata, []).where($ != null and $.type = 'vip').select([concat('managed_service_', $.service, $.network), concat($.service, '/', $fqdns.get($.network))]))
data:
metadata: {get_attr: [IncomingMetadataSettings, value]}
fqdns:
properties:
value:
yaql:
- expression: dict($.data.where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
+ expression: dict(coalesce($.data, []).where($ != null and $.type = 'node').select([$.service, $.network.replace('_', '')]).groupBy($[0], $[1]))
data: {get_attr: [IncomingMetadataSettings, value]}
outputs:
-heat_template_version: ocata
+heat_template_version: pike
description: 'Extra Post Deployment Config'
parameters:
servers:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Example extra config for post-deployment
-heat_template_version: ocata
+heat_template_version: pike
description: >
Example extra config for post-deployment, this re-runs every update
-heat_template_version: ocata
+heat_template_version: pike
description: >
Post-deployment for the TripleO undercloud
auth_url:
if:
- ssl_disabled
- - list_join:
- - ''
- - - 'http://'
- - {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
- - ':5000/v2.0'
- - list_join:
- - ''
- - - 'https://'
- - {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]}
- - ':13000/v2.0'
+ - make_url:
+ scheme: http
+ host: {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
+ port: 5000
+ path: /v2.0
+ - make_url:
+ scheme: https
+ host: {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]}
+ port: 13000
+ path: /v2.0
-heat_template_version: ocata
+heat_template_version: pike
description: >
RHEL Registration and unregistration software deployments.
type: string
rhel_reg_http_proxy_password:
type: string
+ UpdateOnRHELRegistration:
+ type: boolean
+ default: false
+ description: |
+ When enabled, the system will perform a yum update after performing the
+ RHEL Registration process.
resources:
input_values:
REG_METHOD: {get_param: rhel_reg_method}
+ YumUpdateConfigurationAfterRHELRegistration:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -x
+ num_updates=$(yum list -q updates | wc -l)
+ if [ "$num_updates" -eq "0" ]; then
+ echo "No packages require updating"
+ exit 0
+ fi
+ full_command="yum -q -y update"
+ echo "Running: $full_command"
+ result=$($full_command)
+ return_code=$?
+ echo "$result"
+ echo "yum return code: $return_code"
+ exit $return_code
+
+ UpdateDeploymentAfterRHELRegistration:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: RHELRegistrationDeployment
+ conditions:
+ update_requested: {get_param: UpdateOnRHELRegistration}
+ properties:
+ name: UpdateDeploymentAfterRHELRegistration
+ config: {get_resource: YumUpdateConfigurationAfterRHELRegistration}
+ server: {get_param: server}
+ actions: ['CREATE'] # Only do this on CREATE
+
outputs:
deploy_stdout:
description: Deployment reference, used to trigger puppet apply on changes
exit 0
fi
-retryCount=0
+retry_max_count=10
opts=
config_opts=
attach_opts=
fi
function retry() {
- if [[ $retryCount < 3 ]]; then
- $@
- if ! [[ $? == 0 ]]; then
- retryCount=$(echo $retryCount + 1 | bc)
- echo "WARN: Failed to connect when running '$@', retrying..."
- retry $@
- else
- retryCount=0
+ # Inhibit -e since we want to retry without exiting..
+ set +e
+ # Retry delay (seconds)
+ retry_delay=2.0
+ retry_count=0
+ mycli="$@"
+ while [ $retry_count -lt ${retry_max_count} ]
+ do
+ echo "INFO: Sleeping ${retry_delay} ..."
+ sleep ${retry_delay}
+ echo "INFO: Executing '${mycli}' ..."
+ ${mycli}
+ if [ $? -eq 0 ]; then
+ echo "INFO: Ran '${mycli}' successfully, not retrying..."
+ break
+ else
+ echo "WARN: Failed to connect when running '${mycli}', retrying (attempt #$retry_count )..."
+ retry_count=$(echo $retry_count + 1 | bc)
+ fi
+ done
+
+ if [ $retry_count -ge ${retry_max_count} ]; then
+ echo "ERROR: Failed to connect after ${retry_max_count} attempts when running '${mycli}'"
+ exit 1
fi
- else
- echo "ERROR: Failed to connect after 3 attempts when running '$@'"
- exit 1
- fi
+ # Re-enable -e when exiting retry()
+ set -e
}
function detect_satellite_version {
ping_api=$REG_SAT_URL/katello/api/ping
- if curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
+ if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $ping_api | grep "200 OK"; then
echo Satellite 6 detected at $REG_SAT_URL
satellite_version=6
- elif curl --retry 3 --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 detected at $REG_SAT_URL
satellite_version=5
else
detect_satellite_version
if [ "$satellite_version" = "6" ]; then
repos="$repos --enable ${satellite_repo}"
- curl --retry 3 --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
retry subscription-manager register $opts
retry subscription-manager $repos
retry yum install -y katello-agent || true # needed for errata reporting to satellite6
katello-package-upload
- retry subscription-manager repos --disable ${satellite_repo}
else
pushd /usr/share/rhn/
- curl --retry 3 --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
-heat_template_version: ocata
+heat_template_version: pike
description: >
Do some configuration, then reboot - sometimes needed for early-boot
-heat_template_version: ocata
+heat_template_version: pike
description: >
Do some configuration, then reboot - sometimes needed for early-boot
+++ /dev/null
-#!/bin/bash
-#
-# This delivers the aodh data migration script to be invoked as part of the tripleo
-# major upgrade workflow to migrate all the alarm data from mongodb to mysql.
-# This needs to run post controller node upgrades so new aodh mysql db configured and
-# running.
-#
-set -eu
-
-#Get existing mongodb connection
-MONGO_DB_CONNECTION="$(crudini --get /etc/ceilometer/ceilometer.conf database connection)"
-
-# Get the aodh database string from hiera data
-MYSQL_DB_CONNECTION="$(crudini --get /etc/aodh/aodh.conf database connection)"
-
-#Run migration
-/usr/bin/aodh-data-migration --nosql-conn $MONGO_DB_CONNECTION --sql-conn $MYSQL_DB_CONNECTION
-
-
+++ /dev/null
-heat_template_version: ocata
-
-description: >
- Software-config for ceilometer configuration under httpd during upgrades
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
- CeilometerWsgiMitakaNewtonPreUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: puppet
- config:
- get_file: mitaka_to_newton_ceilometer_wsgi_upgrade.pp
-
- CeilometerWsgiMitakaNewtonUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - "#!/bin/bash\n\nset -e\n\n"
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - "disable_standalone_ceilometer_api\n\n"
-
- CeilometerWsgiMitakaNewtonPostUpgradeConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: |
- #!/bin/bash
- set -e
- /usr/bin/systemctl reload httpd
-
- CeilometerWsgiMitakaNewtonPreUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPreUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonUpgradeConfigDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonPreUpgradeDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonUpgradeConfig}
-
- CeilometerWsgiMitakaNewtonPostUpgradeDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: CeilometerWsgiMitakaNewtonUpgradeConfigDeployment
- properties:
- name: CeilometerWsgiMitakaNewtonPostUpgradeDeployment
- servers: {get_param: [servers, Controller]}
- config: {get_resource: CeilometerWsgiMitakaNewtonPostUpgradeConfig}
+++ /dev/null
-#!/bin/bash
-
-set -eu
-
-check_cluster()
-{
- if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
- fi
-}
-
-check_pcsd()
-{
- if pcs status 2>&1 | grep -E 'Offline'; then
- echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
- exit 1
- fi
-}
-
-mysql_need_update()
-{
- # Shall we upgrade mysql data directory during the stack upgrade?
- if [ "$mariadb_do_major_upgrade" = "auto" ]; then
- ret=$(is_mysql_upgrade_needed)
- if [ $ret = "1" ]; then
- DO_MYSQL_UPGRADE=1
- else
- DO_MYSQL_UPGRADE=0
- fi
- echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
- elif [ "$mariadb_do_major_upgrade" = "no" ]; then
- DO_MYSQL_UPGRADE=0
- else
- DO_MYSQL_UPGRADE=1
- fi
-}
-
-check_disk_for_mysql_dump()
-{
- # Where to backup current database if mysql need to be upgraded
- MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
- MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
- # Spare disk ratio for extra safety
- MYSQL_BACKUP_SIZE_RATIO=1.2
-
- mysql_need_update
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-
- if [ -d "$MYSQL_BACKUP_DIR" ]; then
- echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
- exit 1
- fi
- mkdir "$MYSQL_BACKUP_DIR"
- if [ $? -ne 0 ]; then
- echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
- exit 1
- fi
-
- # the /root/.my.cnf is needed because we set the mysql root
- # password from liberty onwards
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- # While not ideal, this step allows us to calculate exactly how much space the dump
- # will need. Our main goal here is avoiding any chance of corruption due to disk space
- # exhaustion
- backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
- database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
- free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
- # we need at least space for a new mysql database + dump of the existing one,
- # times a small factor for additional safety room
- # note: bash doesn't do floating point math or floats in if statements,
- # so use python to apply the ratio and cast it back to integer
- required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
- if [ $required_space -ge $free_space ]; then
- echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
- exit 1
- fi
- fi
- fi
-}
-
-check_python_rpm()
-{
- # If for some reason rpm-python are missing we want to error out early enough
- if ! rpm -q rpm-python &> /dev/null; then
- echo_error "ERROR: upgrade cannot start without rpm-python installed"
- exit 1
- fi
-}
-
-check_clean_cluster()
-{
- if pcs status | grep -q Stopped:; then
- echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
- exit 1
- fi
-}
-
-check_galera_root_password()
-{
- # BZ: 1357112
- if [ ! -e /root/.my.cnf ]; then
- echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
- exit 1
- fi
-}
+++ /dev/null
-#!/bin/bash
-
-set -eu
-
-check_cluster
-check_pcsd
-if [[ -n $(is_bootstrap_node) ]]; then
- check_clean_cluster
-fi
-check_python_rpm
-check_galera_root_password
-check_disk_for_mysql_dump
-
-# We want to disable fencing during the cluster --stop as it might fence
-# nodes where a service fails to stop, which could be fatal during an upgrade
-# procedure. So we remember the stonith state. If it was enabled we reenable it
-# at the end of this script
-if [[ -n $(is_bootstrap_node) ]]; then
- STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
- # We create this empty file if stonith was set to true so we can reenable stonith in step2
- rm -f /var/tmp/stonith-true
- if [ $STONITH_STATE == "true" ]; then
- touch /var/tmp/stonith-true
- fi
- pcs property set stonith-enabled=false
-fi
-
-# Migrate to HA NG and fix up rabbitmq queues
-# We fix up the rabbitmq ha queues after the migration because it will
-# restart the rabbitmq resource. Doing it after the migration means no other
-# services will be restart as there are no other constraints
-if [[ -n $(is_bootstrap_node) ]]; then
- migrate_full_to_ng_ha
- rabbitmq_newton_ocata_upgrade
-fi
-
+++ /dev/null
-#!/bin/bash
-
-set -eu
-
-cluster_sync_timeout=1800
-
-# After migrating the cluster to HA-NG the services not under pacemaker's control
-# are still up and running. We need to stop them explicitely otherwise during the yum
-# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
-# is going to take a long time because rabbit is down. By having the service stopped
-# systemctl try-restart is a noop
-
-for service in $(services_to_migrate); do
- manage_systemd_service stop "${service%%-clone}"
- # So the reason for not reusing check_resource_systemd is that
- # I have observed systemctl is-active returning unknown with at least
- # one service that was stopped (See LP 1627254)
- timeout=600
- tstart=$(date +%s)
- tend=$(( $tstart + $timeout ))
- check_interval=3
- while (( $(date +%s) < $tend )); do
- if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
- echo "$service still active, sleeping $check_interval seconds."
- sleep $check_interval
- else
- # we do not care if it is inactive, unknown or failed as long as it is
- # not running
- break
- fi
-
- done
-done
-
-# In case the mysql package is updated, the database on disk must be
-# upgraded as well. This typically needs to happen during major
-# version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
-#
-# Because in-place upgrades are not supported across 2+ major versions
-# (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
-# https://bugzilla.redhat.com/show_bug.cgi?id=1341968
-#
-# The default is to determine automatically if upgrade is needed based
-# on mysql package versionning, but this can be overriden manually
-# to support specific upgrade scenario
-
-# Calling this function will set the DO_MYSQL_UPGRADE variable which is used
-# later
-mysql_need_update
-
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
- mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
- cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
- fi
-
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- pcs resource disable openstack-cinder-volume
- check_resource openstack-cinder-volume stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-
-# Swift isn't controlled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
- fi
-done
-
-# The reason we do an sql dump *and* we move the old dir out of
-# the way is because it gives us an extra level of safety in case
-# something goes wrong during the upgrade. Once the restore is
-# successful we go ahead and remove it. If the directory exists
-# we bail out as it means the upgrade process had issues in the last
-# run.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
- echo_error "ERROR: mysql backup dir already exist"
- exit 1
- fi
- mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
-special_case_ovs_upgrade_if_needed
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
-# We need to ensure at least those two configuration settings, otherwise
-# mariadb 10.1+ won't activate galera replication.
-# wsrep_cluster_address must only be set though, its value does not
-# matter because it's overriden by the galera resource agent.
-cat >> /etc/my.cnf.d/galera.cnf <<EOF
-[mysqld]
-wsrep_on = ON
-wsrep_cluster_address = gcomm://localhost
-EOF
-
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- # Scripts run via heat have no HOME variable set and this confuses
- # mysqladmin
- export HOME=/root
-
- mkdir /var/lib/mysql || /bin/true
- chown mysql:mysql /var/lib/mysql
- chmod 0755 /var/lib/mysql
- restorecon -R /var/lib/mysql/
- mysql_install_db --datadir=/var/lib/mysql --user=mysql
- chown -R mysql:mysql /var/lib/mysql/
-
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- mysqld_safe --wsrep-new-cluster &
- # We have a populated /root/.my.cnf with root/password here so
- # we need to temporarily rename it because the newly created
- # db is empty and no root password is set
- mv /root/.my.cnf /root/.my.cnf.temporary
- timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
- mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
- mv /root/.my.cnf.temporary /root/.my.cnf
- mysqladmin -u root shutdown
- # The import was successful so we may remove the folder
- rm -r "$MYSQL_BACKUP_DIR"
- fi
-fi
-
-# If we reached here without error we can safely blow away the origin
-# mysql dir from every controller
-
-# TODO: What if the upgrade fails on the bootstrap node, but not on
-# this controller. Data may be lost.
-if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
- rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
-fi
-
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [[ -n $(is_bootstrap_node) ]]; then
- if [ -f /var/tmp/stonith-true ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
- fi
- rm -f /var/tmp/stonith-true
-fi
-
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
-# LP: 1615035, required only for M/N upgrade.
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
-# LP: 1627450, required only for M/N upgrade
-crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
-
-crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
-
+++ /dev/null
-#!/bin/bash
-
-set -eu
-
-cluster_form_timeout=600
-cluster_settle_timeout=1800
-galera_sync_timeout=600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- pcs cluster start --all
-
- tstart=$(date +%s)
- while pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_form_timeout )) ; then
- echo_error "ERROR: timed out forming the cluster"
- exit 1
- fi
- done
-
- if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
- echo_error "ERROR: timed out waiting for cluster to finish transition"
- exit 1
- fi
-
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
- pcs resource enable $vip
- check_resource_pacemaker $vip started 60
- done
-fi
-
-start_or_enable_service galera
-check_resource galera started 600
-start_or_enable_service redis
-check_resource redis started 600
-# We need mongod which is now a systemd service up and running before calling
-# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
-# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
-# we should be good.
-# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
-systemctl start mongod
-check_resource mongod started 600
-
-if [[ -n $(is_bootstrap_node) ]]; then
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo_error "ERROR galera sync timed out"
- exit 1
- fi
- done
-
- # Run all the db syncs
- # TODO: check if this can be triggered in puppet and removed from here
- ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
- cinder-manage db sync
- glance-manage db_sync
- heat-manage --config-file /etc/heat/heat.conf db_sync
- keystone-manage db_sync
- neutron-db-manage upgrade heads
- nova-manage db sync
- nova-manage api_db sync
- nova-manage db online_data_migrations
- sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
-fi
+++ /dev/null
-#!/bin/bash
-
-set -eu
-
-start_or_enable_service rabbitmq
-check_resource rabbitmq started 600
-start_or_enable_service redis
-check_resource redis started 600
-start_or_enable_service openstack-cinder-volume
-check_resource openstack-cinder-volume started 600
-
-# start httpd so keystone is available for gnocchi
-# upgrade to run.
-systemctl start httpd
-
-# Swift isn't controled by pacemaker
-systemctl_swift start
+++ /dev/null
-#!/bin/bash
-
-set -eu
-
-if [[ -n $(is_bootstrap_node) ]]; then
- # run gnocchi upgrade
- gnocchi-upgrade
-fi
+++ /dev/null
-#!/bin/bash
-
-set -eu
-
-# We need to start the systemd services we explicitely stopped at step _1.sh
-# FIXME: Should we let puppet during the convergence step do the service enabling or
-# should we add it here?
-services=$(services_to_migrate)
-if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
- services=${services%%openstack-sahara*}
-fi
-for service in $services; do
- manage_systemd_service start "${service%%-clone}"
- check_resource_systemd "${service%%-clone}" started 600
-done
+++ /dev/null
-heat_template_version: ocata
-description: 'Upgrade for Pacemaker deployments'
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-
- UpgradeLevelNovaCompute:
- type: string
- description: Nova Compute upgrade level
- default: ''
- MySqlMajorUpgrade:
- type: string
- description: Can be auto,yes,no and influences if the major upgrade should do or detect an automatic mysql upgrade
- constraints:
- - allowed_values: ['auto', 'yes', 'no']
- default: 'auto'
- KeepSaharaServicesOnUpgrade:
- type: boolean
- default: true
- description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
-
-
-resources:
- # TODO(jistr): for Mitaka->Newton upgrades and further we can use
- # map_merge with input_values instead of feeding params into scripts
- # via str_replace on bash snippets
-
- ControllerPacemakerUpgradeConfig_Step1:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_1.sh
-
- ControllerPacemakerUpgradeDeployment_Step1:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step2:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- upgrade_level_nova_compute='UPGRADE_LEVEL_NOVA_COMPUTE'
- params:
- UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
- - str_replace:
- template: |
- #!/bin/bash
- mariadb_do_major_upgrade='MYSQL_MAJOR_UPGRADE'
- params:
- MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_check.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_2.sh
-
- ControllerPacemakerUpgradeDeployment_Step2:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step1
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step3:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_3.sh
-
- ControllerPacemakerUpgradeDeployment_Step3:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step2
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step4:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_4.sh
-
- ControllerPacemakerUpgradeDeployment_Step4:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step3
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step4}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step5:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_5.sh
-
- ControllerPacemakerUpgradeDeployment_Step5:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step4
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step5}
- input_values: {get_param: input_values}
-
- ControllerPacemakerUpgradeConfig_Step6:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config:
- list_join:
- - ''
- - - str_replace:
- template: |
- #!/bin/bash
- keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
- params:
- KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
- - get_file: pacemaker_common_functions.sh
- - get_file: major_upgrade_pacemaker_migrations.sh
- - get_file: major_upgrade_controller_pacemaker_6.sh
-
- ControllerPacemakerUpgradeDeployment_Step6:
- type: OS::Heat::SoftwareDeploymentGroup
- depends_on: ControllerPacemakerUpgradeDeployment_Step5
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: ControllerPacemakerUpgradeConfig_Step6}
- input_values: {get_param: input_values}
+++ /dev/null
-#!/bin/bash
-
-# Special pieces of upgrade migration logic go into this
-# file. E.g. Pacemaker cluster transitions for existing deployments,
-# matching changes to overcloud_controller_pacemaker.pp (Puppet
-# handles deployment, this file handles migrations).
-#
-# This file shouldn't execute any action on its own, all logic should
-# be wrapped into bash functions. Upgrade scripts will source this
-# file and call the functions defined in this file where appropriate.
-#
-# The migration functions should be idempotent. If the migration has
-# been already applied, it should be possible to call the function
-# again without damaging the deployment or failing the upgrade.
-
-# If the major version of mysql is going to change after the major
-# upgrade, the database must be upgraded on disk to avoid failures
-# due to internal incompatibilities between major mysql versions
-# https://bugs.launchpad.net/tripleo/+bug/1587449
-# This function detects whether a database upgrade is required
-# after a mysql package upgrade. It returns 0 when no major upgrade
-# has to take place, 1 otherwise.
-function is_mysql_upgrade_needed {
- # The name of the package which provides mysql might differ
- # after the upgrade. Consider the generic package name, which
- # should capture the major version change (e.g. 5.5 -> 10.1)
- local name="mariadb"
- local output
- local ret
- set +e
- output=$(yum -q check-update $name)
- ret=$?
- set -e
- if [ $ret -ne 100 ]; then
- # no updates so we exit
- echo "0"
- return
- fi
-
- local currentepoch=$(rpm -q --qf "%{epoch}" $name)
- local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
- local currentrelease=$(rpm -q --qf "%{release}" $name)
- local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
- local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
- local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
- local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
-
- # With this we trigger the dump restore/path if we change either epoch or
- # version in the package If only the release tag changes we do not do it
- # FIXME: we could refine this by trying to parse the mariadb version
- # into X.Y.Z and trigger the update only if X and/or Y change.
- output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
- if [ "$output" != "-1" ]; then
- echo "0"
- return
- fi
- echo "1"
-}
-
-# This function returns the list of services to be migrated away from pacemaker
-# and to systemd. The reason to have these services in a separate function is because
-# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
-# and in the function to migrate the cluster from full HA to HA NG
-function services_to_migrate {
- # The following PCMK resources the ones the we are going to delete
- PCMK_RESOURCE_TODELETE="
- httpd-clone
- memcached-clone
- mongod-clone
- neutron-dhcp-agent-clone
- neutron-l3-agent-clone
- neutron-metadata-agent-clone
- neutron-netns-cleanup-clone
- neutron-openvswitch-agent-clone
- neutron-ovs-cleanup-clone
- neutron-server-clone
- openstack-aodh-evaluator-clone
- openstack-aodh-listener-clone
- openstack-aodh-notifier-clone
- openstack-ceilometer-central-clone
- openstack-ceilometer-collector-clone
- openstack-ceilometer-notification-clone
- openstack-cinder-api-clone
- openstack-cinder-scheduler-clone
- openstack-glance-api-clone
- openstack-gnocchi-metricd-clone
- openstack-gnocchi-statsd-clone
- openstack-heat-api-cfn-clone
- openstack-heat-api-clone
- openstack-heat-api-cloudwatch-clone
- openstack-heat-engine-clone
- openstack-nova-api-clone
- openstack-nova-conductor-clone
- openstack-nova-consoleauth-clone
- openstack-nova-novncproxy-clone
- openstack-nova-scheduler-clone
- openstack-sahara-api-clone
- openstack-sahara-engine-clone
- "
- echo $PCMK_RESOURCE_TODELETE
-}
-
-# This function will migrate a mitaka system where all the resources are managed
-# via pacemaker to a newton setup where only a few services will be managed by pacemaker
-# On a high-level it will operate as follows:
-# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
-# during the conversion
-# 2. Remove all the colocation constraints and then the ordering constraints, except the
-# ones related to haproxy/VIPs which exist in Newton as well
-# 3. Take the cluster out of maintenance-mode
-# 4. Remove all the resources that won't be managed by pacemaker in newton. The
-# outcome will be
-# that they are stopped and removed from pacemakers control
-# 5. Do a resource cleanup to make sure the cluster is in a clean state
-function migrate_full_to_ng_ha {
- if [[ -n $(pcmk_running) ]]; then
- pcs property set maintenance-mode=true
-
- # First we go through all the colocation constraints (except the ones
- # we want to keep, i.e. the haproxy/ip ones) and we remove those
- COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $COL_CONSTRAINTS; do
- log_debug "Deleting colocation constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
-
- # Now we kill all the ordering constraints (except the haproxy/ip ones)
- ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
- for constraint in $ORD_CONSTRAINTS; do
- log_debug "Deleting ordering constraint $constraint from CIB"
- pcs constraint remove "$constraint"
- done
- # At this stage all the pacemaker resources are removed from the CIB.
- # Once we remove the maintenance-mode those systemd resources will keep
- # on running. They shall be systemd enabled via the puppet converge
- # step later on
- pcs property set maintenance-mode=false
-
- # At this stage there are no constraints whatsoever except the haproxy/ip ones
- # which we want to keep. We now disable and then delete each resource
- # that will move to systemd.
- # We want the systemd resources be stopped before doing "yum update",
- # that way "systemctl try-restart <service>" is no-op because the
- # service was down already
- PCS_STATUS_OUTPUT="$(pcs status)"
- for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
- if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
- log_debug "Deleting $resource from the CIB"
- if ! pcs resource disable "$resource" --wait=600; then
- echo_error "ERROR: resource $resource failed to be disabled"
- exit 1
- fi
- pcs resource delete --force "$resource"
- else
- log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
- fi
- done
-
- # We need to do a pcs resource cleanup here + crm_resource --wait to
- # make sure the cluster is in a clean state before we stop everything,
- # upgrade and restart everything
- pcs resource cleanup
- # We are making sure here that the cluster is stable before proceeding
- if ! timeout -k 10 600 crm_resource --wait; then
- echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
- exit 1
- fi
- fi
-}
-
-function disable_standalone_ceilometer_api {
- if [[ -n $(is_bootstrap_node) ]]; then
- if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
- # Disable pacemaker resources for ceilometer-api
- manage_pacemaker_service disable openstack-ceilometer-api
- check_resource_pacemaker openstack-ceilometer-api stopped 600
- pcs resource delete openstack-ceilometer-api --wait=600
- fi
- fi
-}
-
-
-# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
-# In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
-# In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
-# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
-# Note that changing an attribute like this makes the rabbitmq resource restart
-function rabbitmq_newton_ocata_upgrade {
- if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
- # Number of controller is obtained by counting how many hostnames we
- # have in controller_node_names hiera key
- nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
- nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
- if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
- echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
- exit 1
- fi
- pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
- fi
-}
+++ /dev/null
-heat_template_version: ocata
-
-description: >
- Software-config for performing aodh data migration
-
-parameters:
- servers:
- type: json
- input_values:
- type: json
- description: input values for the software deployments
-resources:
-
- AodhMysqlMigrationScriptConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: aodh_data_migration.sh}
-
- AodhMysqlMigrationScriptDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, Controller]}
- config: {get_resource: AodhMysqlMigrationScriptConfig}
- input_values: {get_param: input_values}
+++ /dev/null
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This puppet manifest is to be used only during a Mitaka->Newton upgrade
-# It configures ceilometer to be run under httpd but it makes sure to not
-# restart any services. This snippet needs to be called before init as a
-# pre upgrade migration.
-
-Service <|
- tag == 'ceilometer-service'
-|> {
- hasrestart => true,
- restart => '/bin/true',
- start => '/bin/true',
- stop => '/bin/true',
-}
-
-if $::hostname == downcase(hiera('bootstrap_nodeid')) {
- $pacemaker_master = true
- $sync_db = true
-} else {
- $pacemaker_master = false
- $sync_db = false
-}
-
-include ::tripleo::packages
-
-
-if str2bool(hiera('mongodb::server::ipv6', false)) {
- $mongo_node_ips_with_port_prefixed = prefix(hiera('mongodb_node_ips'), '[')
- $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
-} else {
- $mongo_node_ips_with_port = suffix(hiera('mongodb_node_ips'), ':27017')
-}
-$mongodb_replset = hiera('mongodb::server::replset')
-$mongo_node_string = join($mongo_node_ips_with_port, ',')
-$database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-
-$rabbit_hosts = hiera('rabbitmq_node_ips', undef)
-$rabbit_port = hiera('ceilometer::rabbit_port', 5672)
-$rabbit_endpoints = suffix(any2array(normalize_ip_for_uri($rabbit_hosts)), ":${rabbit_port}")
-
-class { '::ceilometer' :
- rabbit_hosts => $rabbit_endpoints,
-}
-
-class {'::ceilometer::db':
- database_connection => $database_connection,
-}
-
-if $sync_db {
- include ::ceilometer::db::sync
-}
-
-include ::ceilometer::config
-
-class { '::ceilometer::api':
- enabled => true,
- service_name => 'httpd',
- keystone_password => hiera('ceilometer::keystone::auth::password'),
- identity_uri => hiera('ceilometer::keystone::authtoken::auth_url'),
- auth_uri => hiera('ceilometer::keystone::authtoken::auth_uri'),
- keystone_tenant => hiera('ceilometer::keystone::authtoken::project_name'),
-}
-
-class { '::apache' :
- service_enable => false,
- service_manage => true,
- service_restart => '/bin/true',
- purge_configs => false,
- purge_vhost_dir => false,
-}
-
-# To ensure existing ports are not overridden
-class { '::aodh::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::gnocchi::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-
-class { '::keystone::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
-class { '::ceilometer::wsgi::apache':
- servername => $::hostname,
- ssl => false,
-}
}
# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+# Update condition and add --notriggerun for +bug/1669714
function special_case_ovs_upgrade_if_needed {
- if [[ -n $(rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart") ]]; then
- echo "Manual upgrade of openvswitch - restart in postun detected"
+ if rpm -qa | grep "^openvswitch-2.5.0-14" || rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep "systemctl.*try-restart" ; then
+ echo "Manual upgrade of openvswitch - ovs-2.5.0-14 or restart in postun detected"
rm -rf OVS_UPGRADE
mkdir OVS_UPGRADE && pushd OVS_UPGRADE
echo "Attempting to downloading latest openvswitch with yumdownloader"
if rpm -U --test $pkg 2>&1 | grep "already installed" ; then
echo "Looks like newer version of $pkg is already installed, skipping"
else
- echo "Updating $pkg with nopostun option"
- rpm -U --replacepkgs --nopostun $pkg
+ echo "Updating $pkg with --nopostun --notriggerun"
+ rpm -U --replacepkgs --nopostun --notriggerun $pkg
fi
done
popd
}
+# This code is meant to fix https://bugs.launchpad.net/tripleo/+bug/1686357 on
+# existing setups via a minor update workflow and be idempotent. We need to
+# run this before the yum update because we fix this up even when there are no
+# packages to update on the system (in which case the script exits).
+# This code must be called with set +eu (due to the ocf scripts being sourced)
+function fixup_wrong_ipv6_vip {
+ # This XPath query identifies of all the VIPs in pacemaker with netmask /64. Those are IPv6 only resources that have the wrong netmask
+ # This gives the address of the resource in the CIB, one address per line. For example:
+ # /cib/configuration/resources/primitive[@id='ip-2001.db8.ca2.4..10']/instance_attributes[@id='ip-2001.db8.ca2.4..10-instance_attributes']\
+ # /nvpair[@id='ip-2001.db8.ca2.4..10-instance_attributes-cidr_netmask']
+ vip_xpath_query="//resources/primitive[@type='IPaddr2']/instance_attributes/nvpair[@name='cidr_netmask' and @value='64']"
+ vip_xpath_xml_addresses=$(cibadmin --query --xpath "$vip_xpath_query" -e 2>/dev/null)
+ # The following extracts the @id value of the resource
+ vip_resources_to_fix=$(echo -e "$vip_xpath_xml_addresses" | sed -n "s/.*primitive\[@id='\([^']*\)'.*/\1/p")
+ # Runnning this in a subshell so that sourcing files cannot possibly affect the running script
+ (
+ OCF_PATH="/usr/lib/ocf/lib/heartbeat"
+ if [ -n "$vip_resources_to_fix" -a -f $OCF_PATH/ocf-shellfuncs -a -f $OCF_PATH/findif.sh ]; then
+ source $OCF_PATH/ocf-shellfuncs
+ source $OCF_PATH/findif.sh
+ for resource in $vip_resources_to_fix; do
+ echo "Updating IPv6 VIP $resource with a /128 and a correct addrlabel"
+ # The following will give us something like:
+ # <nvpair id="ip-2001.db8.ca2.4..10-instance_attributes-ip" name="ip" value="2001:db8:ca2:4::10"/>
+ ip_cib_nvpair=$(cibadmin --query --xpath "//resources/primitive[@type='IPaddr2' and @id='$resource']/instance_attributes/nvpair[@name='ip']")
+ # Let's filter out the value of the nvpair to get the ip address
+ ip_address=$(echo $ip_cib_nvpair | xmllint --xpath 'string(//nvpair/@value)' -)
+ OCF_RESKEY_cidr_netmask="64"
+ OCF_RESKEY_ip="$ip_address"
+ # Unfortunately due to https://bugzilla.redhat.com/show_bug.cgi?id=1445628
+ # we need to find out the appropiate nic given the ip address.
+ nic=$(findif $ip_address | awk '{ print $1 }')
+ ret=$?
+ if [ -z "$nic" -o $ret -ne 0 ]; then
+ echo "NIC autodetection failed for VIP $ip_address, not updating VIPs"
+ # Only exits the subshell
+ exit 1
+ fi
+ ocf_run -info pcs resource update --wait "$resource" ip="$ip_address" cidr_netmask=128 nic="$nic" lvs_ipv6_addrlabel=true lvs_ipv6_addrlabel_value=99
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ echo "pcs resource update for VIP $resource failed, not updating VIPs"
+ # Only exits the subshell
+ exit 1
+ fi
+ done
+ fi
+ )
+}
-heat_template_version: ocata
+heat_template_version: pike
description: 'Post-Puppet Config for Pacemaker deployments'
parameters:
resources:
- ControllerPostPuppetMaintenanceModeConfig:
+{%- for role in roles -%}
+{% if "controller" in role.tags %}
+ {{role.name}}PostPuppetMaintenanceModeConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
pcs property set maintenance-mode=false
fi
- ControllerPostPuppetMaintenanceModeDeployment:
+ {{role.name}}PostPuppetMaintenanceModeDeployment:
type: OS::Heat::SoftwareDeployments
properties:
- servers: {get_param: servers}
- config: {get_resource: ControllerPostPuppetMaintenanceModeConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}PostPuppetMaintenanceModeConfig}
input_values: {get_param: input_values}
- ControllerPostPuppetRestart:
- type: OS::TripleO::Tasks::ControllerPostPuppetRestart
- depends_on: ControllerPostPuppetMaintenanceModeDeployment
+ {{role.name}}PostPuppetRestart:
+ type: OS::TripleO::Tasks::{{role.name}}PostPuppetRestart
+ depends_on: {{role.name}}PostPuppetMaintenanceModeDeployment
properties:
- servers: {get_param: servers}
+ servers: {get_param: [servers, {{role.name}}]}
input_values: {get_param: input_values}
+{%- endif -%}
+{% endfor %}
+
-heat_template_version: ocata
+heat_template_version: pike
description: 'Post-Puppet restart config for Pacemaker deployments'
parameters:
ControllerPostPuppetRestartDeployment:
type: OS::Heat::SoftwareDeployments
properties:
- servers: {get_param: servers}
+ servers: {get_param: servers}
config: {get_resource: ControllerPostPuppetRestartConfig}
input_values: {get_param: input_values}
-heat_template_version: ocata
+heat_template_version: pike
description: 'Pre-Puppet Config for Pacemaker deployments'
parameters:
ControllerPrePuppetMaintenanceModeDeployment:
type: OS::Heat::SoftwareDeployments
properties:
- servers: {get_param: servers}
+ servers: {get_param: servers}
config: {get_resource: ControllerPrePuppetMaintenanceModeConfig}
input_values: {get_param: input_values}
export FACTER_deploy_config_name="${role}Deployment_Step${step}"
if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
set +e
- puppet apply --detailed-exitcodes "${manifest}"
+ puppet apply --detailed-exitcodes \
+ --modulepath \
+ /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ "${manifest}"
rc=$?
echo "puppet apply exited with exit code $rc"
else
--- /dev/null
+heat_template_version: pike
+
+description: >
+ This is a template which will fetch the ssh host public key.
+
+parameters:
+ server:
+ description: ID of the node to apply this config to
+ type: string
+
+resources:
+ SshHostPubKeyConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ outputs:
+ - name: rsa
+ - name: ecdsa
+ - name: ed25519
+ config: |
+ #!/bin/sh -x
+ test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa
+ test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa
+ test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519
+
+ SshHostPubKeyDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: SshHostPubKeyConfig}
+ server: {get_param: server}
+
+
+outputs:
+ ecdsa:
+ description: Host ssh public key (ecdsa)
+ value: {get_attr: [SshHostPubKeyDeployment, ecdsa]}
+ rsa:
+ description: Host ssh public key (rsa)
+ value: {get_attr: [SshHostPubKeyDeployment, rsa]}
+ ed25519:
+ description: Host ssh public key (ed25519)
+ value: {get_attr: [SshHostPubKeyDeployment, ed25519]}
--- /dev/null
+heat_template_version: pike
+description: 'SSH Known Hosts Config'
+
+parameters:
+ known_hosts:
+ type: string
+
+resources:
+
+ SSHKnownHostsConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: known_hosts
+ default: {get_param: known_hosts}
+ config: |
+ #!/bin/bash
+ set -eux
+ set -o pipefail
+
+ echo "Creating ssh known hosts file"
+
+ if [ ! -z "${known_hosts}" ]; then
+ echo "${known_hosts}"
+ echo -ne "${known_hosts}" > /etc/ssh/ssh_known_hosts
+ chmod 0644 /etc/ssh/ssh_known_hosts
+ else
+ rm -f /etc/ssh/ssh_known_hosts
+ echo "No ssh known hosts"
+ fi
+
+outputs:
+ OS::stack_id:
+ description: The SSHKnownHostsConfig resource.
+ value: {get_resource: SSHKnownHostsConfig}
\ No newline at end of file
+++ /dev/null
-heat_template_version: ocata
-
-parameters:
- servers:
- type: json
- SwiftRingGetTempurl:
- default: ''
- description: A temporary Swift URL to download rings from.
- type: string
-
-resources:
- SwiftRingDeployConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: swift_ring_get_tempurl
- config: |
- #!/bin/sh
- pushd /
- curl --insecure --silent "${swift_ring_get_tempurl}" | tar xz || true
- popd
-
- SwiftRingDeploy:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: SwiftRingDeploy
- config: {get_resource: SwiftRingDeployConfig}
- servers: {get_param: servers}
- input_values:
- swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
+++ /dev/null
-heat_template_version: ocata
-
-parameters:
- servers:
- type: json
- SwiftRingPutTempurl:
- default: ''
- description: A temporary Swift URL to upload rings to.
- type: string
-
-resources:
- SwiftRingUpdateConfig:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- inputs:
- - name: swift_ring_put_tempurl
- config: |
- #!/bin/sh
- TMP_DATA=$(mktemp -d)
- function cleanup {
- rm -Rf "$TMP_DATA"
- }
- trap cleanup EXIT
- # sanity check in case rings are not consistent within cluster
- swift-recon --md5 | grep -q "doesn't match" && exit 1
- pushd ${TMP_DATA}
- tar -cvzf swift-rings.tar.gz /etc/swift/*.builder /etc/swift/*.ring.gz /etc/swift/backups/*
- resp=`curl --insecure --silent -X PUT "${swift_ring_put_tempurl}" --write-out "%{http_code}" --data-binary @swift-rings.tar.gz`
- popd
- if [ "$resp" != "201" ]; then
- exit 1
- fi
-
- SwiftRingUpdate:
- type: OS::Heat::SoftwareDeployments
- properties:
- name: SwiftRingUpdate
- config: {get_resource: SwiftRingUpdateConfig}
- servers: {get_param: servers}
- input_values:
- swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
$(declare -f log_debug)
$(declare -f manage_systemd_service)
$(declare -f systemctl_swift)
+$(declare -f special_case_ovs_upgrade_if_needed)
# pin nova messaging +-1 for the nova-compute service
if [[ -n \$NOVA_COMPUTE ]]; then
crudini --set /etc/nova/nova.conf upgrade_levels compute auto
fi
-$(declare -f special_case_ovs_upgrade_if_needed)
special_case_ovs_upgrade_if_needed
-yum -y install python-zaqarclient # needed for os-collect-config
if [[ -n \$SWIFT_STORAGE ]]; then
systemctl_swift stop
fi
+
yum -y update
+
if [[ -n \$SWIFT_STORAGE ]]; then
systemctl_swift start
fi
# Due to bug#1640177 we need to restart compute agent
if [[ -n \$NOVA_COMPUTE ]]; then
- echo "Restarting openstack ceilometer agent compute"
+ log_debug "Restarting openstack ceilometer agent compute"
systemctl restart openstack-ceilometer-compute
+ yum install -y openstack-nova-migration
fi
# Apply puppet manifest to converge just right after the ${ROLE} upgrade
$(declare -f run_puppet)
for step in 1 2 3 4 5 6; do
+ log_debug "Running puppet step \$step for ${ROLE}"
if ! run_puppet /root/${ROLE}_puppet_config.pp ${ROLE} \${step}; then
- echo "Puppet failure at step \${step}"
+ log_debug "Puppet failure at step \${step}"
exit 1
fi
+ log_debug "Completed puppet step \$step"
done
+
+log_debug "TripleO upgrade run completed."
+
ENDOFCAT
# ensure the permissions are OK
fi
touch "$timestamp_file"
-command_arguments=${command_arguments:-}
-
-list_updates=$(yum list updates)
-
-if [[ "$list_updates" == "" ]]; then
- echo "No packages require updating"
- exit 0
-fi
-
pacemaker_status=""
-if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then
+# We include word boundaries in order to not match pacemaker_remote
+if hiera -c /etc/puppet/hiera.yaml service_names | grep -q '\bpacemaker\b'; then
pacemaker_status=$(systemctl is-active pacemaker)
fi
-# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
-# and https://bugs.launchpad.net/tripleo/+bug/1634851
+# (NB: when backporting this s/pacemaker_short_bootstrap_node_name/bootstrap_nodeid)
+# This runs before the yum_update so we are guaranteed to run it even in the absence
+# of packages to update (the check for -z "$update_identifier" guarantees that this
+# is run only on overcloud stack update -i)
if [[ "$pacemaker_status" == "active" && \
- "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]] ; then
- if pcs resource show rabbitmq | grep -E "start.*timeout=100"; then
- pcs resource update rabbitmq op start timeout=200s
- fi
- if pcs resource show rabbitmq | grep -E "stop.*timeout=90"; then
- pcs resource update rabbitmq op stop timeout=200s
- fi
- if pcs resource show redis | grep -E "start.*timeout=120"; then
- pcs resource update redis op start timeout=200s
- fi
- if pcs resource show redis | grep -E "stop.*timeout=120"; then
- pcs resource update redis op stop timeout=200s
+ "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \
+ # OCF scripts don't cope with -eu
+ echo "Verifying if we need to fix up any IPv6 VIPs"
+ set +eu
+ fixup_wrong_ipv6_vip
+ ret=$?
+ set -eu
+ if [ $ret -ne 0 ]; then
+ echo "Fixing up IPv6 VIPs failed. Stopping here. (See https://bugs.launchpad.net/tripleo/+bug/1686357 for more info)"
+ exit 1
fi
fi
-# Special-case OVS for https://bugs.launchpad.net/tripleo/+bug/1635205
+command_arguments=${command_arguments:-}
+
+# yum check-update exits 100 if updates are available
+set +e
+check_update=$(yum check-update 2>&1)
+check_update_exit=$?
+set -e
+
+if [[ "$check_update_exit" == "1" ]]; then
+ echo "Failed to check for package updates"
+ echo "$check_update"
+ exit 1
+elif [[ "$check_update_exit" != "100" ]]; then
+ echo "No packages require updating"
+ exit 0
+fi
+
+# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
special_case_ovs_upgrade_if_needed
if [[ "$pacemaker_status" == "active" ]] ; then
echo "$result"
echo "yum return code: $return_code"
-# Writes any changes caused by alterations to os-net-config and bounces the
-# interfaces *before* restarting the cluster.
-os-net-config -c /etc/os-net-config/config.json -v --detailed-exit-codes
-RETVAL=$?
-if [[ $RETVAL == 2 ]]; then
- echo "os-net-config: interface configuration files updated successfully"
-elif [[ $RETVAL != 0 ]]; then
- echo "ERROR: os-net-config configuration failed"
- exit $RETVAL
-fi
-
if [[ "$pacemaker_status" == "active" ]] ; then
echo "Starting cluster node"
pcs cluster start
fi
done
- tstart=$(date +%s)
- while ! clustercheck; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > galera_sync_timeout )) ; then
- echo "ERROR galera sync timed out"
- exit 1
- fi
- done
+ RETVAL=$( pcs resource show galera-master | grep wsrep_cluster_address | grep -q `crm_node -n` ; echo $? )
+
+ if [[ $RETVAL -eq 0 && -e /etc/sysconfig/clustercheck ]]; then
+ tstart=$(date +%s)
+ while ! clustercheck; do
+ sleep 5
+ tnow=$(date +%s)
+ if (( tnow-tstart > galera_sync_timeout )) ; then
+ echo "ERROR galera sync timed out"
+ exit 1
+ fi
+ done
+ fi
echo "Waiting for pacemaker cluster to settle"
if ! timeout -k 10 $cluster_settle_timeout crm_resource --wait; then
pcs status
fi
-echo "Finished yum_update.sh on server $deploy_server_id at `date`"
+
+echo "Finished yum_update.sh on server $deploy_server_id at `date` with return code: $return_code"
exit $return_code
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software-config for performing package updates using yum
-heat_template_version: ocata
+heat_template_version: pike
description: 'No-op yum update task'
resources:
-heat_template_version: ocata
+heat_template_version: pike
parameters:
ContrailRepo:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure os-net-config mappings for specific nodes
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is a default no-op template which provides empty user-data
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is first boot configuration for development purposes. It allows
-heat_template_version: ocata
+heat_template_version: pike
# NOTE: You don't need to pass the parameter explicitly from the
# parent template, it can be specified via the parameter_defaults
-heat_template_version: ocata
+heat_template_version: pike
parameters:
# Can be overridden via parameter_defaults in the environment
-heat_template_version: ocata
+heat_template_version: pike
description: >
Uses cloud-init to enable root logins and set the root password.
-heat_template_version: ocata
+heat_template_version: pike
description: 'All Hosts Config'
parameters:
The content that should be appended to your /etc/hosts if you want to get
hostname-based access to the deployed nodes (useful for testing without
setting up a DNS).
- value: {get_attr: [hostsConfigImpl, config, hosts]}
+ value: {get_param: hosts}
OS::stack_id:
description: The ID of the hostsConfigImpl resource.
value: {get_resource: hostsConfigImpl}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
ControlPlaneDefaultRoute: # Override this via parameter_defaults
description: The default route of the control plane network.
type: string
- default: 192.0.2.1
+ default: 192.168.24.1
EC2MetadataIp: # Override this via parameter_defaults
description: The IP address of the EC2 metadata server.
type: string
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to no-op for os-net-config. Using this will allow you
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the ceph storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the cinder storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role with IPv6
on the External network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the swift storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the ceph storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the cinder storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the
compute role with external bridge for DVR.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the controller role with IPv6 on the External
network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the swift storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the ceph storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the cinder storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the swift storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the ceph storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the cinder storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role. No external IP is configured.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the swift storage role.
parameters:
def generate_endpoint_map_template(config):
return collections.OrderedDict([
- ('heat_template_version', 'ocata'),
+ ('heat_template_version', 'pike'),
('description', 'A map of OpenStack endpoints. Since the endpoints '
'are URLs, we need to have brackets around IPv6 IP addresses. The '
'inputs to these parameters come from net_ip_uri_map, which will '
net_param: KeystonePublicApi
uri_suffixes:
'': /v2.0
- EC2: /v2.0/ec2tokens
V3: /v3
names:
EC2: KeystoneEC2
### This file is automatically generated from endpoint_data.yaml
### by the script build_endpoint_map.py
-heat_template_version: ocata
+heat_template_version: pike
description: A map of OpenStack endpoints. Since the endpoints are URLs,
we need to have brackets around IPv6 IP addresses. The inputs to these
parameters come from net_ip_uri_map, which will include these brackets
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, KeystoneAdmin, port]
- KeystoneEC2:
- host:
- str_replace:
- template:
- get_param: [EndpointMap, KeystoneInternal, host]
- params:
- CLOUDNAME:
- get_param:
- - CloudEndpoints
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- IP_ADDRESS:
- get_param:
- - NetIpMap
- - str_replace:
- params:
- NETWORK:
- get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- template: NETWORK_uri
- host_nobrackets:
- str_replace:
- template:
- get_param: [EndpointMap, KeystoneInternal, host]
- params:
- CLOUDNAME:
- get_param:
- - CloudEndpoints
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- IP_ADDRESS:
- get_param:
- - NetIpMap
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- port:
- get_param: [EndpointMap, KeystoneInternal, port]
- protocol:
- get_param: [EndpointMap, KeystoneInternal, protocol]
- uri:
- list_join:
- - ''
- - - get_param: [EndpointMap, KeystoneInternal, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, KeystoneInternal, host]
- params:
- CLOUDNAME:
- get_param:
- - CloudEndpoints
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- IP_ADDRESS:
- get_param:
- - NetIpMap
- - str_replace:
- params:
- NETWORK:
- get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- template: NETWORK_uri
- - ':'
- - get_param: [EndpointMap, KeystoneInternal, port]
- - /v2.0/ec2tokens
- uri_no_suffix:
- list_join:
- - ''
- - - get_param: [EndpointMap, KeystoneInternal, protocol]
- - ://
- - str_replace:
- template:
- get_param: [EndpointMap, KeystoneInternal, host]
- params:
- CLOUDNAME:
- get_param:
- - CloudEndpoints
- - get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- IP_ADDRESS:
- get_param:
- - NetIpMap
- - str_replace:
- params:
- NETWORK:
- get_param: [ServiceNetMap, KeystonePublicApiNetwork]
- template: NETWORK_uri
- - ':'
- - get_param: [EndpointMap, KeystoneInternal, port]
KeystoneInternal:
host:
str_replace:
-heat_template_version: ocata
+heat_template_version: pike
description: >
External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
-heat_template_version: ocata
+heat_template_version: pike
description: >
External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Internal API network. Used for most APIs, Database, RPC.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Internal API network. Used for most APIs, Database, RPC.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Management network. System administration, SSH, DNS, NTP, etc. This network
-heat_template_version: ocata
+heat_template_version: pike
description: >
Management network. System administration, SSH, DNS, NTP, etc. This network
--- /dev/null
+heat_template_version: pike
+
+description: Create networks to split out Overcloud traffic
+
+resources:
+
+ {%- for network in networks %}
+ {%- if network.name != 'InternalApi' %}
+ {{network.name}}Network:
+ {%- else %}
+ InternalNetwork:
+ {%- endif %}
+ type: OS::TripleO::Network::{{network.name}}
+ {%- endfor %}
+
+ NetworkExtraConfig:
+ type: OS::TripleO::Network::ExtraConfig
+++ /dev/null
-heat_template_version: ocata
-
-description: Create networks to split out Overcloud traffic
-
-resources:
-
- ExternalNetwork:
- type: OS::TripleO::Network::External
-
- InternalNetwork:
- type: OS::TripleO::Network::InternalApi
-
- StorageMgmtNetwork:
- type: OS::TripleO::Network::StorageMgmt
-
- StorageNetwork:
- type: OS::TripleO::Network::Storage
-
- TenantNetwork:
- type: OS::TripleO::Network::Tenant
-
- ManagementNetwork:
- type: OS::TripleO::Network::Management
-
- NetworkExtraConfig:
- type: OS::TripleO::Network::ExtraConfig
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port for a VIP on the undercloud ctlplane network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the external network. The IP address will be chosen
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the external network. The IP address will be chosen
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a service mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a service mapped list of IPv6 IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the internal_api network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the internal_api network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the management network. The IP address will be chosen
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the management network. The IP address will be chosen
-heat_template_version: ocata
+heat_template_version: pike
parameters:
ControlPlaneIpList:
-heat_template_version: ocata
+heat_template_version: pike
parameters:
ControlPlaneIp:
-heat_template_version: ocata
+heat_template_version: pike
parameters:
# Set these via parameter defaults to configure external VIPs
-heat_template_version: ocata
+heat_template_version: pike
parameters:
# Set these via parameter defaults to configure external VIPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns the control plane port (provisioning network) as the ip_address.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the storage network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the storage_mgmt API network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the storage_mgmt API network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the storage network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the tenant network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the tenant network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port for a VIP on the isolated network NetworkName.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port for a VIP on the isolated network NetworkName.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Mapping of service_name_network -> network name
HeatApiCfnNetwork: internal_api
HeatApiCloudwatchNetwork: internal_api
NovaApiNetwork: internal_api
+ NovaColdMigrationNetwork: ctlplane
NovaPlacementNetwork: internal_api
NovaMetadataNetwork: internal_api
NovaVncProxyNetwork: internal_api
HorizonNetwork: internal_api
MemcachedNetwork: internal_api
RabbitmqNetwork: internal_api
+ QdrNetwork: internal_api
RedisNetwork: internal_api
MysqlNetwork: internal_api
CephClusterNetwork: storage_mgmt
-heat_template_version: ocata
+heat_template_version: pike
description: >
Storage network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Storage management network. Storage replication, etc.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Storage management network. Storage replication, etc.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Storage network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Tenant network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Tenant IPv6 network.
--- /dev/null
+# List of networks, used for j2 templating of enabled networks
+#
+# Supported values:
+#
+# name: Name of the network (mandatory)
+# name_lower: lowercase version of name used for filenames
+# (optional, defaults to name.lower())
+# vlan: vlan for the network (optional)
+# gateway: gateway for the network (optional)
+# enabled: Is the network enabled (optional, defaults to true)
+# vip: Enable creation of a virtual IP on this network
+# [TODO] (dsneddon@redhat.com) - Enable dynamic creation of VIP ports, to support
+# VIPs on non-default networks. See https://bugs.launchpad.net/tripleo/+bug/1667104
+#
+- name: External
+ vip: true
+- name: InternalApi
+ name_lower: internal_api
+ vip: true
+- name: Storage
+ vip: true
+- name: StorageMgmt
+ name_lower: storage_mgmt
+ vip: true
+- name: Tenant
+ vip: false # Tenant network does not use VIPs
+- name: Management
+ # Management network is disabled by default
+ enabled: false
+ vip: false # Management network does not use VIPs
OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
+ OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
+ OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
- OS::TripleO::Tasks::SwiftRingDeploy: extraconfig/tasks/swift-ring-deploy.yaml
- OS::TripleO::Tasks::SwiftRingUpdate: extraconfig/tasks/swift-ring-update.yaml
-
{% for role in roles %}
OS::TripleO::{{role.name}}::PreNetworkConfig: OS::Heat::None
OS::TripleO::{{role.name}}PostDeploySteps: puppet/post.yaml
OS::TripleO::Tasks::{{role.name}}PostConfig: OS::Heat::None
OS::TripleO::{{role.name}}ExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
# Port assignments for the {{role.name}} role
+ {%- if role.name != 'ObjectStorage' %}
+ {%- for network in networks %}
+ OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: network/ports/noop.yaml
+ {%- endfor %}
+ {%- else %}
# Note we have to special-case ObjectStorage for backwards compatibility
- {% if role.name != 'ObjectStorage' %}
- OS::TripleO::{{role.name}}::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::ManagementPort: network/ports/noop.yaml
- {% else %}
- OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml
- {% endif %}
+ {%- for network in networks %}
+ OS::TripleO::SwiftStorage::Ports::{{network.name}}Port: network/ports/noop.yaml
+ {%- endfor %}
+ {%- endif %}
OS::TripleO::{{role.name}}::Net::SoftwareConfig: net-config-noop.yaml
{% endfor %}
OS::TripleO::ServiceServerMetadataHook: OS::Heat::None
OS::TripleO::Server: OS::Nova::Server
+{% for role in roles %}
+ OS::TripleO::{{role.name}}Server: OS::TripleO::Server
+{% endfor %}
# This creates the "heat-admin" user for all OS images by default
# To disable, replace with firstboot/userdata_default.yaml
OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
-{% for role in roles %}
- OS::TripleO::Tasks::{{role.name}}PrePuppet: OS::Heat::None
- OS::TripleO::Tasks::{{role.name}}PostPuppet: OS::Heat::None
-{% endfor %}
-
# "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
# configuration with knowledge of all nodes in the cluster is required vs single
# TripleO overcloud networks
OS::TripleO::Network: network/networks.yaml
- OS::TripleO::Network::External: OS::Heat::None
- OS::TripleO::Network::InternalApi: OS::Heat::None
- OS::TripleO::Network::StorageMgmt: OS::Heat::None
- OS::TripleO::Network::Storage: OS::Heat::None
- OS::TripleO::Network::Tenant: OS::Heat::None
- OS::TripleO::Network::Management: OS::Heat::None
+ {%- for network in networks %}
+ OS::TripleO::Network::{{network.name}}: OS::Heat::None
+ {%- endfor %}
OS::TripleO::Network::ExtraConfig: OS::Heat::None
OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
# Port assignments for the VIPs
- OS::TripleO::Network::Ports::ExternalVipPort: network/ports/noop.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
+ {%- for network in networks if network.vip|default(false) %}
+ OS::TripleO::Network::Ports::{{network.name}}VipPort: network/ports/noop.yaml
+ {%- endfor %}
+
OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
OS::TripleO::Network::Ports::ControlPlaneVipPort: OS::Neutron::Port
# services
OS::TripleO::Services: puppet/services/services.yaml
OS::TripleO::Services::Apache: puppet/services/apache.yaml
- OS::TripleO::Services::ApacheTLS: OS::Heat::None
OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
OS::TripleO::Services::CephMds: OS::Heat::None
OS::TripleO::Services::CephMon: OS::Heat::None
OS::TripleO::Services::HeatEngine: puppet/services/heat-engine.yaml
OS::TripleO::Services::Kernel: puppet/services/kernel.yaml
OS::TripleO::Services::MySQL: puppet/services/database/mysql.yaml
- OS::TripleO::Services::MySQLTLS: OS::Heat::None
+ OS::TripleO::Services::NeutronBgpVpnApi: OS::Heat::None
OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
+ OS::TripleO::Services::NeutronL2gwApi: OS::Heat::None
OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
+ OS::TripleO::Services::NeutronL2gwAgent: OS::Heat::None
OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
# FIXME(shardy) the duplicate NeutronServer line can be removed when we've updated
# the multinode job ControllerServices after this patch merges
OS::TripleO::Services::NeutronCorePluginML2OVN: puppet/services/neutron-plugin-ml2-ovn.yaml
OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml
+ OS::TripleO::Services::NeutronCorePluginNSX: puppet/services/neutron-plugin-nsx.yaml
OS::TripleO::Services::OVNDBs: OS::Heat::None
OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::NeutronSriovAgent: OS::Heat::None
OS::TripleO::Services::RabbitMQ: puppet/services/rabbitmq.yaml
+ OS::TripleO::Services::Qdr: OS::Heat::None
OS::TripleO::Services::HAproxy: puppet/services/haproxy.yaml
OS::TripleO::Services::HAProxyPublicTLS: OS::Heat::None
OS::TripleO::Services::HAProxyInternalTLS: OS::Heat::None
OS::TripleO::Services::Memcached: puppet/services/memcached.yaml
OS::TripleO::Services::SaharaApi: OS::Heat::None
OS::TripleO::Services::SaharaEngine: OS::Heat::None
- OS::TripleO::Services::Sshd: OS::Heat::None
+ OS::TripleO::Services::Securetty: OS::Heat::None
+ OS::TripleO::Services::Sshd: puppet/services/sshd.yaml
OS::TripleO::Services::Redis: puppet/services/database/redis.yaml
OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
OS::TripleO::Services::NovaLibvirt: puppet/services/nova-libvirt.yaml
OS::TripleO::Services::Ntp: puppet/services/time/ntp.yaml
OS::TripleO::Services::SwiftProxy: puppet/services/swift-proxy.yaml
+ OS::TripleO::Services::ExternalSwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: puppet/services/swift-storage.yaml
OS::TripleO::Services::SwiftRingBuilder: puppet/services/swift-ringbuilder.yaml
OS::TripleO::Services::Snmp: puppet/services/snmp.yaml
OS::TripleO::Services::Tacker: OS::Heat::None
OS::TripleO::Services::Timezone: puppet/services/time/timezone.yaml
OS::TripleO::Services::CeilometerApi: puppet/services/ceilometer-api.yaml
- OS::TripleO::Services::CeilometerCollector: puppet/services/ceilometer-collector.yaml
- OS::TripleO::Services::CeilometerExpirer: puppet/services/ceilometer-expirer.yaml
+ OS::TripleO::Services::CeilometerCollector: puppet/services/disabled/ceilometer-collector.yaml
+ OS::TripleO::Services::CeilometerExpirer: puppet/services/disabled/ceilometer-expirer.yaml
OS::TripleO::Services::CeilometerAgentCentral: puppet/services/ceilometer-agent-central.yaml
OS::TripleO::Services::CeilometerAgentNotification: puppet/services/ceilometer-agent-notification.yaml
OS::TripleO::Services::ComputeCeilometerAgent: puppet/services/ceilometer-agent-compute.yaml
+ OS::TripleO::Services::CeilometerAgentIpmi: puppet/services/ceilometer-agent-ipmi.yaml
OS::TripleO::Services::Horizon: puppet/services/horizon.yaml
+ # Undercloud Telemetry services
+ OS::TripleO::Services::UndercloudCeilometerAgentCentral: OS::Heat::None
+ OS::TripleO::Services::UndercloudCeilometerAgentNotification: OS::Heat::None
+
#Gnocchi services
OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
OS::TripleO::Services::GnocchiMetricd: puppet/services/gnocchi-metricd.yaml
OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::UndercloudGnocchiApi: OS::Heat::None
+ OS::TripleO::Services::UndercloudGnocchiMetricd: OS::Heat::None
+ OS::TripleO::Services::UndercloudGnocchiStatsd: OS::Heat::None
# Services that are disabled by default (use relevant environment files):
OS::TripleO::Services::FluentdClient: OS::Heat::None
OS::TripleO::Services::Collectd: OS::Heat::None
OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml
OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml
OS::TripleO::Services::AodhListener: puppet/services/aodh-listener.yaml
+ OS::TripleO::Services::UndercloudAodhApi: OS::Heat::None
+ OS::TripleO::Services::UndercloudAodhEvaluator: OS::Heat::None
+ OS::TripleO::Services::UndercloudAodhNotifier: OS::Heat::None
+ OS::TripleO::Services::UndercloudAodhListener: OS::Heat::None
OS::TripleO::Services::PankoApi: puppet/services/panko-api.yaml
+ OS::TripleO::Services::UndercloudPankoApi: OS::Heat::None
OS::TripleO::Services::MistralEngine: OS::Heat::None
OS::TripleO::Services::MistralApi: OS::Heat::None
OS::TripleO::Services::MistralExecutor: OS::Heat::None
OS::TripleO::Services::Zaqar: OS::Heat::None
OS::TripleO::Services::NeutronML2FujitsuCfab: OS::Heat::None
OS::TripleO::Services::NeutronML2FujitsuFossw: OS::Heat::None
+ OS::TripleO::Services::CinderBackendDellPs: OS::Heat::None
+ OS::TripleO::Services::CinderBackendDellSc: OS::Heat::None
+ OS::TripleO::Services::CinderBackendNetApp: OS::Heat::None
+ OS::TripleO::Services::CinderBackendScaleIO: OS::Heat::None
OS::TripleO::Services::CinderHPELeftHandISCSI: OS::Heat::None
OS::TripleO::Services::Etcd: OS::Heat::None
OS::TripleO::Services::Ec2Api: OS::Heat::None
OS::TripleO::Services::OctaviaWorker: OS::Heat::None
OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml
OS::TripleO::Services::Vpp: OS::Heat::None
+ OS::TripleO::Services::NeutronVppAgent: OS::Heat::None
+ OS::TripleO::Services::Docker: OS::Heat::None
+ OS::TripleO::Services::CertmongerUser: OS::Heat::None
parameter_defaults:
EnablePackageInstall: false
-{% set primary_role_name = roles[0].name -%}
-heat_template_version: ocata
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
+heat_template_version: pike
description: >
Deploy an OpenStack environment, consisting of several node types (roles),
type: string
ControlFixedIPs:
default: []
- description: Should be used for arbitrary ips.
+ description: >
+ Control the IP allocation for the ControlVirtualIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
type: json
InternalApiVirtualFixedIPs:
default: []
description: What interface to add to the HypervisorNeutronPhysicalBridge.
type: string
+ NodeCreateBatchSize:
+ default: 30
+ description: Maxiumum batch size for creating nodes
+ type: number
+
# Jinja loop for Role in role_data.yaml
{% for role in roles %}
# Parameters generated for {{role.name}} Role
type: json
description: Optional scheduler hints to pass to nova
default: {}
+
+ {{role.name}}Parameters:
+ type: json
+ description: Optional Role Specific parameters to be provided to service
+ default: {}
{% endfor %}
# Identifiers to trigger tasks on nodes
type: json
value: {get_attr: [EndpointMap, endpoint_map]}
+ SshKnownHostsConfig:
+ type: OS::TripleO::Ssh::KnownHostsConfig
+ properties:
+ known_hosts:
+ list_join:
+ - ''
+ {% for role in roles %}
+ - {get_attr: [{{role.name}}, known_hosts_entry]}
+ {% endfor %}
+
# Jinja loop for Role in roles_data.yaml
{% for role in roles %}
# Resources generated for {{role.name}} Role
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
+ RoleName: {{role.name}}
+ RoleParameters: {get_param: {{role.name}}Parameters}
# Filter any null/None service_names which may be present due to mapping
# of services to OS::Heat::None
config: {get_attr: [hostsConfig, config_id]}
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}SshKnownHostsDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ name: {{role.name}}SshKnownHostsDeployment
+ config: {get_resource: SshKnownHostsConfig}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+
{{role.name}}AllNodesDeployment:
type: OS::Heat::StructuredDeployments
depends_on:
{{role.name}}:
type: OS::Heat::ResourceGroup
depends_on: Networks
+ update_policy:
+ batch_create:
+ max_batch_size: {get_param: NodeCreateBatchSize}
properties:
count: {get_param: {{role.name}}Count}
removal_policies: {get_param: {{role.name}}RemovalPolicies}
-
{% for role in roles %}
- list_join:
- - "\n"
+ - ""
- {get_attr: [{{role.name}}, hosts_entry]}
{% endfor %}
PingTestIps:
list_join:
- ' '
- - - {get_attr: [{{primary_role_name}}, resource.0.external_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.internal_api_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.storage_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.storage_mgmt_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.tenant_ip_address]}
- - {get_attr: [{{primary_role_name}}, resource.0.management_ip_address]}
+ - - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, external_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, internal_api_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, storage_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, storage_mgmt_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, tenant_ip_address]}
+ - yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, management_ip_address]}
UpdateWorkflow:
type: OS::TripleO::Tasks::UpdateWorkflow
AllNodesDeploySteps:
type: OS::TripleO::PostDeploySteps
depends_on:
+ - AllNodesExtraConfig
{% for role in roles %}
- {{role.name}}AllNodesDeployment
{% endfor %}
value:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
+ RoleNetIpMap:
+ description: Mapping of each network to a list of IPs for each role
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
{% endfor %}
-version: 1.0\r
-\r
-template: overcloud.yaml\r
-environments:\r
-- path: overcloud-resource-registry-puppet.yaml\r
+version: 1.0
+
+name: overcloud
+description: >
+ Default Deployment plan
+template: overcloud.yaml
+environments:
+ - path: overcloud-resource-registry-puppet.yaml
-heat_template_version: ocata
+heat_template_version: pike
description: 'All Nodes Config for Puppet'
parameters:
StackAction:
type: string
description: >
- Heat action on performed top-level stack.
+ Heat action on performed top-level stack. Note StackUpdateType is
+ set to UPGRADE when a major-version upgrade is in progress.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
+ StackUpdateType:
+ type: string
+ description: >
+ Type of update, to differentiate between UPGRADE and UPDATE cases
+ when StackAction is UPDATE (both are the same stack action).
+ constraints:
+ - allowed_values: ['', 'UPGRADE']
+ default: ''
# NOTE(jaosorior): This is being set as IPA as it's the first
# CA we'll actually be testing out. But we can change this if
# people request it.
deploy_identifier: {get_param: DeployIdentifier}
update_identifier: {get_param: UpdateIdentifier}
stack_action: {get_param: StackAction}
+ stack_update_type: {get_param: StackUpdateType}
vip_data:
map_merge:
# Dynamically generate per-service VIP data based on enabled_services
-heat_template_version: ocata
+heat_template_version: pike
description: 'OpenStack cinder storage configured by Puppet'
parameters:
BlockStorageImage:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
resources:
BlockStorage:
- type: OS::TripleO::Server
+ type: OS::TripleO::BlockStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image:
{get_param: BlockStorageImage}
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: BlockStorageDeployment
+ properties:
+ server: {get_resource: BlockStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [BlockStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the block storage server
value:
-heat_template_version: ocata
+heat_template_version: pike
description: 'OpenStack ceph storage node configured by Puppet'
parameters:
OvercloudCephStorageFlavor:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
resources:
CephStorage:
- type: OS::TripleO::Server
+ type: OS::TripleO::CephStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: CephStorageImage}
image_update_policy: {get_param: ImageUpdatePolicy}
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: CephStorageDeployment
+ properties:
+ server: {get_resource: CephStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [CephStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack hypervisor node configured via Puppet.
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
resources:
NovaCompute:
- type: OS::TripleO::Server
+ type: OS::TripleO::ComputeServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: NovaImage}
image_update_policy:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: NovaComputeDeployment
+ properties:
+ server: {get_resource: NovaCompute}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [NovaCompute, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
- {get_resource: NovaCompute}
+ {get_resource: NovaCompute}
\ No newline at end of file
-heat_template_version: ocata
+heat_template_version: pike
description: >
A software config which runs puppet on the {{role}} role
- ''
- list_join:
- ','
- - ['file,concat,file_line', {get_param: PuppetTags}]
+ - ['file,concat,file_line,augeas', {get_param: PuppetTags}]
outputs:
- name: result
inputs:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack controller node configured by Puppet.
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
resources:
Controller:
- type: OS::TripleO::Server
+ type: OS::TripleO::ControllerServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: controllerImage}
image_update_policy: {get_param: ImageUpdatePolicy}
- all_nodes # provided by allNodesConfig
- vip_data # provided by allNodesConfig
- '"%{::osfamily}"'
- - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
- neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
- neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
- cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: ControllerDeployment
+ properties:
+ server: {get_resource: Controller}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [Controller, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to install deployment artifacts (tarball's and/or
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for all MidoNet nodes
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Network Cisco configuration
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Big Switch agents on compute node
mapped_data:
neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ # NOTE(aschultz): required for the puppet module but we don't
+ # actually want them defined on the compute nodes so we're
+ # relying on the puppet module's handling of <SERVICE DEFAULT>
+ # to just not set these but still accept that they were defined.
+ # This will should be fixed in puppet-neutron and removed here,
+ # but for backportability, we need to define something.
+ neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
+
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Nuage configuration on the Compute
+++ /dev/null
-heat_template_version: ocata
-
-description: Configure hieradata for Cinder Netapp configuration
-
-parameters:
- server:
- description: ID of the controller node to apply this config to
- type: string
-
- # Config specific parameters, to be provided via parameter_defaults
- CinderEnableNetappBackend:
- type: boolean
- default: true
- CinderNetappBackendName:
- type: string
- default: 'tripleo_netapp'
- CinderNetappLogin:
- type: string
- CinderNetappPassword:
- type: string
- hidden: true
- CinderNetappServerHostname:
- type: string
- CinderNetappServerPort:
- type: string
- default: '80'
- CinderNetappSizeMultiplier:
- type: string
- default: '1.2'
- CinderNetappStorageFamily:
- type: string
- default: 'ontap_cluster'
- CinderNetappStorageProtocol:
- type: string
- default: 'nfs'
- CinderNetappTransportType:
- type: string
- default: 'http'
- CinderNetappVfiler:
- type: string
- default: ''
- CinderNetappVolumeList:
- type: string
- default: ''
- CinderNetappVserver:
- type: string
- default: ''
- CinderNetappPartnerBackendName:
- type: string
- default: ''
- CinderNetappNfsShares:
- type: string
- default: ''
- CinderNetappNfsSharesConfig:
- type: string
- default: '/etc/cinder/shares.conf'
- CinderNetappNfsMountOptions:
- type: string
- default: ''
- CinderNetappCopyOffloadToolPath:
- type: string
- default: ''
- CinderNetappControllerIps:
- type: string
- default: ''
- CinderNetappSaPassword:
- type: string
- default: ''
- hidden: true
- CinderNetappStoragePools:
- type: string
- default: ''
- CinderNetappHostType:
- type: string
- default: ''
- CinderNetappWebservicePath:
- type: string
- default: '/devmgr/v2'
- # DEPRECATED options for compatibility with older versions
- CinderNetappEseriesHostType:
- type: string
- default: 'linux_dm_mp'
-
-parameter_groups:
-- label: deprecated
- description: Do not use deprecated params, they will be removed.
- parameters:
- - CinderNetappEseriesHostType
-
-resources:
- CinderNetappConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: hiera
- config:
- datafiles:
- cinder_netapp_data:
- mapped_data:
- tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_input: EnableNetappBackend}
- cinder::backend::netapp::title: {get_input: NetappBackendName}
- cinder::backend::netapp::netapp_login: {get_input: NetappLogin}
- cinder::backend::netapp::netapp_password: {get_input: NetappPassword}
- cinder::backend::netapp::netapp_server_hostname: {get_input: NetappServerHostname}
- cinder::backend::netapp::netapp_server_port: {get_input: NetappServerPort}
- cinder::backend::netapp::netapp_size_multiplier: {get_input: NetappSizeMultiplier}
- cinder::backend::netapp::netapp_storage_family: {get_input: NetappStorageFamily}
- cinder::backend::netapp::netapp_storage_protocol: {get_input: NetappStorageProtocol}
- cinder::backend::netapp::netapp_transport_type: {get_input: NetappTransportType}
- cinder::backend::netapp::netapp_vfiler: {get_input: NetappVfiler}
- cinder::backend::netapp::netapp_volume_list: {get_input: NetappVolumeList}
- cinder::backend::netapp::netapp_vserver: {get_input: NetappVserver}
- cinder::backend::netapp::netapp_partner_backend_name: {get_input: NetappPartnerBackendName}
- cinder::backend::netapp::nfs_shares: {get_input: NetappNfsShares}
- cinder::backend::netapp::nfs_shares_config: {get_input: NetappNfsSharesConfig}
- cinder::backend::netapp::nfs_mount_options: {get_input: NetappNfsMountOptions}
- cinder::backend::netapp::netapp_copyoffload_tool_path: {get_input: NetappCopyOffloadToolPath}
- cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps}
- cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword}
- cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools}
- cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType}
- cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath}
-
- CinderNetappDeployment:
- type: OS::Heat::StructuredDeployment
- properties:
- name: CinderNetappDeployment
- config: {get_resource: CinderNetappConfig}
- server: {get_param: server}
- input_values:
- EnableNetappBackend: {get_param: CinderEnableNetappBackend}
- NetappBackendName: {get_param: CinderNetappBackendName}
- NetappLogin: {get_param: CinderNetappLogin}
- NetappPassword: {get_param: CinderNetappPassword}
- NetappServerHostname: {get_param: CinderNetappServerHostname}
- NetappServerPort: {get_param: CinderNetappServerPort}
- NetappSizeMultiplier: {get_param: CinderNetappSizeMultiplier}
- NetappStorageFamily: {get_param: CinderNetappStorageFamily}
- NetappStorageProtocol: {get_param: CinderNetappStorageProtocol}
- NetappTransportType: {get_param: CinderNetappTransportType}
- NetappVfiler: {get_param: CinderNetappVfiler}
- NetappVolumeList: {get_param: CinderNetappVolumeList}
- NetappVserver: {get_param: CinderNetappVserver}
- NetappPartnerBackendName: {get_param: CinderNetappPartnerBackendName}
- NetappNfsShares: {get_param: CinderNetappNfsShares}
- NetappNfsSharesConfig: {get_param: CinderNetappNfsSharesConfig}
- NetappNfsMountOptions: {get_param: CinderNetappNfsMountOptions}
- NetappCopyOffloadToolPath: {get_param: CinderNetappCopyOffloadToolPath}
- NetappControllerIps: {get_param: CinderNetappControllerIps}
- NetappSaPassword: {get_param: CinderNetappSaPassword}
- NetappStoragePools: {get_param: CinderNetappStoragePools}
- NetappHostType: {get_param: CinderNetappHostType}
- NetappWebservicePath: {get_param: CinderNetappWebservicePath}
-
-outputs:
- deploy_stdout:
- description: Deployment reference, used to trigger puppet apply on changes
- value: {get_attr: [CinderNetappDeployment, deploy_stdout]}
-heat_template_version: ocata
+heat_template_version: pike
description: 'Extra Pre-Deployment Config, multiple'
parameters:
server:
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Neutron Big Switch configuration
server:
description: ID of the controller node to apply this config to
type: string
+ NeutronBigswitchAgentEnabled:
+ description: The state of the neutron-bsn-agent service.
+ type: boolean
+ default: true
+ NeutronBigswitchLLDPEnabled:
+ description: The state of the neutron-bsn-lldp service.
+ type: boolean
+ default: false
NeutronBigswitchRestproxyServers:
description: 'Big Switch controllers ("IP:port,IP:port")'
type: string
datafiles:
neutron_bigswitch_data:
mapped_data:
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
config: {get_resource: NeutronBigswitchConfig}
server: {get_param: server}
input_values:
+ neutron_enable_bigswitch_agent: {get_param: NeutronBigswitchAgentEnabled}
+ neutron_enable_bigswitch_lldp: {get_param: NeutronBigswitchLLDPEnabled}
restproxy_servers: {get_param: NeutronBigswitchRestproxyServers}
restproxy_server_auth: {get_param: NeutronBigswitchRestproxyServerAuth }
restproxy_auto_sync_on_failure: {get_param: NeutronBigswitchRestproxyAutoSyncOnFailure}
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Cisco N1KV configuration
# Config specific parameters, to be provided via parameter_defaults
N1000vVSMIP:
type: string
- default: '192.0.2.50'
+ default: '192.168.24.50'
N1000vVSMDomainID:
type: number
default: 100
default: '255.255.255.0'
N1000vMgmtGatewayIP:
type: string
- default: '192.0.2.1'
+ default: '192.168.24.1'
N1000vPacemakerControl:
type: boolean
default: true
-heat_template_version: ocata
+heat_template_version: pike
description: 'Noop Extra Pre-Deployment Config'
parameters:
server:
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata overrides for specific nodes
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is a template which will inject the trusted anchor.
-heat_template_version: ocata
+heat_template_version: pike
description: Enroll nodes to FreeIPA
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is a template which will build the TLS Certificates necessary
{% set batch_upgrade_steps_max = 3 -%}
{% set upgrade_steps_max = 6 -%}
{% set deliver_script = {'deliver': False} -%}
-heat_template_version: ocata
+heat_template_version: pike
description: 'Upgrade steps for all roles'
parameters:
type: string
hidden: true
-conditions:
- # Conditions to disable any steps where the task list is empty
-{%- for role in roles %}
- {{role.name}}UpgradeBatchConfigEnabled:
- not:
- equals:
- - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
- - []
- {{role.name}}UpgradeConfigEnabled:
- not:
- equals:
- - {get_param: [role_data, {{role.name}}, upgrade_tasks]}
- - []
-{%- endfor %}
-
resources:
{% for role in roles if role.disable_upgrade_deployment|default(false) %}
- " crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n"
- " crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n"
- " crudini --set /etc/nova/nova.conf placement project_name service\n\n"
- - " systemctl restart openstack-nova-compute\n\n"
- - "fi\n\n"
+ - " crudini --set /etc/nova/nova.conf placement os_interface internal\n\n"
- str_replace:
template: |
crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD'
- crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME'
+ crudini --set /etc/nova/nova.conf placement os_region_name 'REGION_NAME'
crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL'
- ROLE='ROLE_NAME'
params:
SERVICE_PASSWORD: { get_param: NovaPassword }
REGION_NAME: { get_param: KeystoneRegion }
AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
+ - " systemctl restart openstack-nova-compute\n\n"
+ - "fi\n\n"
+ - str_replace:
+ template: |
+ ROLE='ROLE_NAME'
+ params:
ROLE_NAME: {{role.name}}
- get_file: ../extraconfig/tasks/pacemaker_common_functions.sh
- get_file: ../extraconfig/tasks/run_puppet.sh
{%- for role in roles %}
{{role.name}}UpgradeBatchConfig_Step{{step}}:
type: OS::TripleO::UpgradeConfig
- {%- if step > 0 %}
- condition: {{role.name}}UpgradeBatchConfigEnabled
- {% if role.name in enabled_roles %}
+ {%- if step > 0 %}
depends_on:
- - {{role.name}}UpgradeBatch_Step{{step -1}}
- {%- endif %}
- {% else %}
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+ {%- endfor %}
+ {% else %}
{% for role in roles if role.disable_upgrade_deployment|default(false) %}
{% if deliver_script.update({'deliver': True}) %} {% endif %}
{% endfor %}
{% if deliver_script.deliver %}
depends_on:
- {% endif %}
{% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
- {{dep.name}}DeliverUpgradeScriptDeployment
{% endfor %}
- {% endif %}
+ {% endif %}
+ {% endif %}
properties:
UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
step: {{step}}
{%- for role in enabled_roles %}
{{role.name}}UpgradeBatch_Step{{step}}:
type: OS::Heat::SoftwareDeploymentGroup
- condition: {{role.name}}UpgradeBatchConfigEnabled
{%- if step > 0 %}
depends_on:
- - {{role.name}}UpgradeBatch_Step{{step -1}}
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+ {%- endfor %}
{% else %}
+ {% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {% if deliver_script.update({'deliver': True}) %} {% endif %}
+ {% endfor %}
+ {% if deliver_script.deliver %}
depends_on:
- - {{role.name}}UpgradeBatchConfig_Step{{step}}
- {%- endif %}
+ {% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
+ - {{dep.name}}DeliverUpgradeScriptDeployment
+ {% endfor %}
+ {% endif %}
+ {% endif %}
update_policy:
batch_create:
max_batch_size: {{role.upgrade_batch_size|default(1)}}
rolling_update:
max_batch_size: {{role.upgrade_batch_size|default(1)}}
properties:
- name: {{role.name}}UpgradeBatch_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}UpgradeBatchConfig_Step{{step}}}
input_values:
{%- for role in roles %}
{{role.name}}UpgradeConfig_Step{{step}}:
type: OS::TripleO::UpgradeConfig
- # The UpgradeConfig resources could actually be created without
- # serialization, but the event output is easier to follow if we
- # do, and there should be minimal performance hit (creating the
- # config is cheap compared to the time to apply the deployment).
- {%- if step > 0 %}
- condition: {{role.name}}UpgradeConfigEnabled
- {% if role.name in enabled_roles %}
+ # The UpgradeConfig resources could actually be created without
+ # serialization, but the event output is easier to follow if we
+ # do, and there should be minimal performance hit (creating the
+ # config is cheap compared to the time to apply the deployment).
depends_on:
- - {{role.name}}Upgrade_Step{{step -1}}
- {% endif %}
- {%- endif %}
+ {%- for role_inside in enabled_roles %}
+ {%- if step > 0 %}
+ - {{role_inside.name}}Upgrade_Step{{step -1}}
+ {%- else %}
+ - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+ {%- endif %}
+ {%- endfor %}
properties:
UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
step: {{step}}
{%- for role in enabled_roles %}
{{role.name}}Upgrade_Step{{step}}:
type: OS::Heat::SoftwareDeploymentGroup
- {%- if step > 0 %}
- condition: {{role.name}}UpgradeConfigEnabled
depends_on:
- - {{role.name}}Upgrade_Step{{step -1}}
- {%- endif %}
+ {%- for role_inside in enabled_roles %}
+ {%- if step > 0 %}
+ - {{role_inside.name}}Upgrade_Step{{step -1}}
+ {%- else %}
+ - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+ {%- endif %}
+ {%- endfor %}
properties:
- name: {{role.name}}Upgrade_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}UpgradeConfig_Step{{step}}}
input_values:
-heat_template_version: ocata
+heat_template_version: pike
description: 'OpenStack swift storage node configured by Puppet'
parameters:
OvercloudSwiftStorageFlavor:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
resources:
SwiftStorage:
- type: OS::Nova::Server
+ type: OS::TripleO::ObjectStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: SwiftStorageImage}
flavor: {get_param: OvercloudSwiftStorageFlavor}
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: SwiftStorageHieraDeploy
+ properties:
+ server: {get_resource: SwiftStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [SwiftStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the swift storage server
value:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Post-upgrade configuration steps via puppet for all roles
-heat_template_version: ocata
+heat_template_version: pike
description: >
Post-deploy configuration steps via puppet for all roles,
properties:
StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
- {{role.name}}PrePuppet:
- type: OS::TripleO::Tasks::{{role.name}}PrePuppet
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- {% if role.name in ['Controller', 'ObjectStorage'] %}
- {{role.name}}SwiftRingDeploy:
- type: OS::TripleO::Tasks::SwiftRingDeploy
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- {% endif %}
-
# Step through a series of configuration steps
{% for step in range(1, 6) %}
{{role.name}}Deployment_Step{{step}}:
- {{dep.name}}Deployment_Step5
{% endfor %}
properties:
- servers: {get_param: servers}
+ servers: {get_param: servers}
input_values:
update_identifier: {get_param: DeployIdentifier}
properties:
servers: {get_param: [servers, {{role.name}}]}
- {{role.name}}PostPuppet:
- depends_on:
- - {{role.name}}ExtraConfigPost
- type: OS::TripleO::Tasks::{{role.name}}PostPuppet
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- {% if role.name in ['Controller', 'ObjectStorage'] %}
- {{role.name}}SwiftRingUpdate:
- type: OS::TripleO::Tasks::SwiftRingUpdate
- depends_on:
- {% for dep in roles %}
- - {{dep.name}}Deployment_Step5
- {% endfor %}
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- {% endif %}
{% endfor %}
-heat_template_version: ocata
+heat_template_version: pike
description: 'OpenStack {{role}} node configured by Puppet'
parameters:
Overcloud{{role}}Flavor:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
LoggingSources:
type: json
default: []
resources:
{{role}}:
- type: OS::TripleO::Server
+ type: OS::TripleO::{{role.name}}Server
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: {{role}}Image}
image_update_policy: {get_param: ImageUpdatePolicy}
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
properties:
+ name: UpdateDeployment
config: {get_resource: UpdateConfig}
server: {get_resource: {{role}}}
input_values:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: {{role}}Deployment
+ properties:
+ server: {get_resource: {{role}}}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [{{role}}, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for {{role}} server
value:
Operators will use the parameter_defaults section of any Heat
environment to set per service parameters.
+Apart from sevice specific inputs, there are few default parameters for all
+the services. Following are the list of default parameters:
+
+ * ServiceNetMap: Mapping of service_name -> network name. Default mappings
+ for service to network names are defined in
+ ../network/service_net_map.j2.yaml, which may be overridden via
+ ServiceNetMap values added to a user environment file via
+ parameter_defaults.
+
+ * EndpointMap: Mapping of service endpoint -> protocol. Contains a mapping of
+ endpoint data generated for all services, based on the data included in
+ ../network/endpoints/endpoint_data.yaml.
+
+ * DefaultPasswords: Mapping of service -> default password. Used to pass some
+ passwords from the parent templates, this is a legacy interface and should
+ not be used by new services.
+
+ * RoleName: Name of the role on which this service is deployed. A service can
+ be deployed in multiple roles. This is an internal parameter (should not be
+ set via environment file), which is fetched from the name attribute of the
+ roles_data.yaml template.
+
+ * RoleParameters: Parameter specific to a role on which the service is
+ applied. Using the format "<RoleName>Parameters" in the parameter_defaults
+ of user environment file, parameters can be provided for a specific role.
+ For example, in order to provide a parameter specific to "Compute" role,
+ below is the format::
+
+ parameter_defaults:
+ ComputeParameters:
+ Param1: value
+
+
Config Settings
---------------
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ AodhApiPolicies:
+ description: |
+ A hash of policies to configure for Aodh API.
+ e.g. { aodh-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
AodhBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ./apache.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
aodh::wsgi::apache::wsgi_process_display_name: 'aodh_wsgi'
aodh::api::service_name: 'httpd'
aodh::api::enable_proxy_headers_parsing: true
+ aodh::policy::policies: {get_param: AodhApiPolicies}
tripleo.aodh_api.firewall_rules:
'128 aodh-api':
dport:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Stop aodh_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ yaql:
+ expression: $.data.apache_upgrade + $.data.aodh_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ aodh_api_upgrade:
+ - name: Stop aodh_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_settings:
aodh_redis_password: {get_param: RedisPassword}
aodh::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://aodh:'
- - {get_param: AodhPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/aodh'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: aodh
+ password: {get_param: AodhPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /aodh
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
aodh::debug: {get_param: Debug}
aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::rabbit_userid: {get_param: RabbitUserName}
aodh::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
aodh::rabbit_port: {get_param: RabbitClientPort}
aodh::keystone::authtoken::project_name: 'service'
+ aodh::keystone::authtoken::user_domain_name: 'Default'
+ aodh::keystone::authtoken::project_domain_name: 'Default'
aodh::keystone::authtoken::password: {get_param: AodhPassword}
- aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::auth::auth_password: {get_param: AodhPassword}
- aodh::auth::auth_region: 'regionOne'
+ aodh::auth::auth_region: {get_param: KeystoneRegion}
aodh::auth::auth_tenant_name: 'service'
service_config_settings:
keystone:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh Evaluator service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh Listener service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh Notifier service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
+++ /dev/null
-heat_template_version: ocata
-
-description: >
- Apache service TLS configurations.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- # The following parameters are not needed by the template but are
- # required to pass the pep8 tests
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-resources:
-
- ApacheNetworks:
- type: OS::Heat::Value
- properties:
- value:
- # NOTE(jaosorior) Get unique network names to create
- # certificates for those. We skip the tenant network since
- # we don't need a certificate for that, and the external
- # network will be handled in another template.
- yaql:
- expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
- data:
- map:
- get_param: ServiceNetMap
-
-outputs:
- role_data:
- description: Role data for the Apache role.
- value:
- service_name: apache_internal_tls_certmonger
- config_settings:
- generate_service_certificates: true
- apache_certificates_specs:
- map_merge:
- repeat:
- template:
- httpd-NETWORK:
- service_certificate: '/etc/pki/tls/certs/httpd-NETWORK.crt'
- service_key: '/etc/pki/tls/private/httpd-NETWORK.key'
- hostname: "%{hiera('fqdn_NETWORK')}"
- principal: "HTTP/%{hiera('fqdn_NETWORK')}"
- for_each:
- NETWORK: {get_attr: [ApacheNetworks, value]}
- metadata_settings:
- repeat:
- template:
- - service: HTTP
- network: $NETWORK
- type: node
- for_each:
- $NETWORK: {get_attr: [ApacheNetworks, value]}
- upgrade_tasks:
- - name: Check if httpd is deployed
- command: systemctl is-enabled httpd
- tags: common
- ignore_errors: True
- register: httpd_enabled
- - name: "PreUpgrade step0,validation: Check service httpd is running"
- shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
- when: httpd_enabled.rc == 0
- tags: step0,validation
-heat_template_version: ocata
+heat_template_version: pike
description: >
Apache service configured with Puppet. Note this is typically included
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
- ApacheTLS:
- type: OS::TripleO::Services::ApacheTLS
+ ApacheNetworks:
+ type: OS::Heat::Value
properties:
- ServiceNetMap: {get_param: ServiceNetMap}
+ value:
+ # NOTE(jaosorior) Get unique network names to create
+ # certificates for those. We skip the tenant network since
+ # we don't need a certificate for that, and the external
+ # is for HAProxy so it isn't used for apache either.
+ yaql:
+ expression: list($.data.map.items().map($1[1])).distinct().where($ != external and $ != tenant)
+ data:
+ map:
+ get_param: ServiceNetMap
outputs:
role_data:
service_name: apache
config_settings:
map_merge:
- - get_attr: [ApacheTLS, role_data, config_settings]
-
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
apache::ip: {get_param: [ServiceNetMap, ApacheNetwork]}
+ apache::default_vhost: false
apache::server_signature: 'Off'
apache::server_tokens: 'Prod'
apache_remote_proxy_ips_network:
apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit }
apache::mod::remoteip::proxy_ips:
- "%{hiera('apache_remote_proxy_ips_network')}"
+ - if:
+ - internal_tls_enabled
+ -
+ generate_service_certificates: true
+ apache::mod::ssl::ssl_ca: {get_param: InternalTLSCAFile}
+ tripleo::certmonger::apache_dirs::certificate_dir: '/etc/pki/tls/certs/httpd'
+ tripleo::certmonger::apache_dirs::key_dir: '/etc/pki/tls/private/httpd'
+ apache_certificates_specs:
+ map_merge:
+ repeat:
+ template:
+ httpd-NETWORK:
+ service_certificate: '/etc/pki/tls/certs/httpd/httpd-NETWORK.crt'
+ service_key: '/etc/pki/tls/private/httpd/httpd-NETWORK.key'
+ hostname: "%{hiera('fqdn_NETWORK')}"
+ principal: "HTTP/%{hiera('fqdn_NETWORK')}"
+ for_each:
+ NETWORK: {get_attr: [ApacheNetworks, value]}
+ - {}
metadata_settings:
- get_attr: [ApacheTLS, role_data, metadata_settings]
+ if:
+ - internal_tls_enabled
+ -
+ repeat:
+ template:
+ - service: HTTP
+ network: $NETWORK
+ type: node
+ for_each:
+ $NETWORK: {get_attr: [ApacheNetworks, value]}
+ - null
upgrade_tasks:
- name: Check if httpd is deployed
command: systemctl is-enabled httpd
shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
when: httpd_enabled.rc == 0
tags: step0,validation
+ - name: Ensure mod_ssl package is installed
+ tags: step3
+ yum: name=mod_ssl state=latest
-heat_template_version: ocata
+heat_template_version: pike
description: >
AuditD configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Barbican API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ BarbicanPolicies:
+ description: |
+ A hash of policies to configure for Barbican.
+ e.g. { barbican-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [ApacheServiceBase, role_data, config_settings]
- barbican::keystone::authtoken::password: {get_param: BarbicanPassword}
- barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ barbican::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
barbican::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
barbican::keystone::authtoken::project_name: 'service'
+ barbican::policy::policies: {get_param: BarbicanPolicies}
barbican::api::host_href: {get_param: [EndpointMap, BarbicanPublic, uri]}
barbican::api::db_auto_create: false
barbican::api::enabled_certificate_plugins: ['simple_certificate']
params:
$NETWORK: {get_param: [ServiceNetMap, BarbicanApiNetwork]}
barbican::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://barbican:'
- - {get_param: BarbicanPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/barbican'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: barbican
+ password: {get_param: BarbicanPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /barbican
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
tripleo.barbican_api.firewall_rules:
'117 barbican':
dport:
nova::compute::barbican_endpoint:
get_param: [EndpointMap, BarbicanInternal, uri]
nova::compute::barbican_auth_endpoint:
- get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]
cinder_api:
cinder::api::keymgr_api_class: >
castellan.key_manager.barbican_key_manager.BarbicanKeyManager
cinder::api::keymgr_encryption_api_url:
get_param: [EndpointMap, BarbicanInternal, uri]
cinder::api::keymgr_encryption_auth_url:
- get_param: [EndpointMap, KeystoneV3Internal, uri_no_suffix]
+ get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Check if barbican_api is deployed
- command: systemctl is-enabled openstack-barbican-api
- tags: common
- ignore_errors: True
- register: barbican_api_enabled
- - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
- shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
- when: barbican_api_enabled.rc == 0
- tags: step0,validation
- - name: Install openstack-barbican-api package if it was disabled
- tags: step3
- yum: name=openstack-barbican-api state=latest
- when: barbican_api_enabled.rc != 0
+ yaql:
+ expression: $.data.apache_upgrade + $.data.barbican_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ barbican_api_upgrade:
+ - name: Check if barbican_api is deployed
+ command: systemctl is-enabled openstack-barbican-api
+ tags: common
+ ignore_errors: True
+ register: barbican_api_enabled
+ - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
+ shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
+ when: barbican_api_enabled.rc == 0
+ tags: step0,validation
+ - name: Install openstack-barbican-api package if it was disabled
+ tags: step3
+ yum: name=openstack-barbican-api state=latest
+ when: barbican_api_enabled.rc != 0
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAproxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Central Agent service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
- ceilometer_redis_password: {get_param: RedisPassword}
central_namespace: true
+ service_config_settings:
+ get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::agent::polling
upgrade_tasks:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Compute Agent service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
- ceilometer::agent::compute::instance_discovery_method: {get_param: InstanceDiscoveryMethod}
compute_namespace: true
+ service_config_settings:
+ get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::agent::polling
upgrade_tasks:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Ceilometer Ipmi Agent service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RedisPassword:
+ description: The password for the redis service account.
+ type: string
+ hidden: true
+ MonitoringSubscriptionCeilometerIpmi:
+ default: 'overcloud-ceilometer-agent-ipmi'
+ type: string
+ CeilometerAgentIpmiLoggingSource:
+ type: json
+ default:
+ tag: openstack.ceilometer.agent.ipmi
+ path: /var/log/ceilometer/ipmi.log
+
+resources:
+ CeilometerServiceBase:
+ type: ./ceilometer-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Ipmi role.
+ value:
+ service_name: ceilometer_agent_ipmi
+ monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerIpmi}
+ logging_source: {get_param: CeilometerAgentIpmiLoggingSource}
+ logging_groups:
+ - ceilometer
+ config_settings:
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer_redis_password: {get_param: RedisPassword}
+ ipmi_namespace: true
+ step_config: |
+ include ::tripleo::profile::base::ceilometer::agent::polling
+ upgrade_tasks:
+ - name: Check if ceilometer-agent-ipmi is deployed
+ command: systemctl is-enabled openstack-ceilometer-ipmi
+ tags: common
+ ignore_errors: True
+ register: ceilometer_ipmi_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-ceilometer-ipmi is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-ceilometer-ipmi' --property ActiveState |
+ grep '\bactive\b'
+ when: ceilometer_ipmi_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop openstack-ceilometer-ipmi service
+ tags: step1
+ when: ceilometer_ipmi_enabled.rc == 0
+ service: name=openstack-ceilometer-ipmi state=stopped
+ - name: Install openstack-ceilometer-ipmi package if it was disabled
+ tags: step3
+ yum: name=openstack-ceilometer-ipmi state=latest
+ when: ceilometer_ipmi_enabled.rc != 0
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Notification Agent service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- ceilometer
config_settings:
get_attr: [CeilometerServiceBase, role_data, config_settings]
+ service_config_settings:
+ get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::agent::notification
upgrade_tasks:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ CeilometerApiPolicies:
+ description: |
+ A hash of policies to configure for Ceilometer API.
+ e.g. { ceilometer-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
CeilometerServiceBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ./apache.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
+ ceilometer::policy::policies: {get_param: CeilometerApiPolicies}
ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
ceilometer::wsgi::apache::ssl: {get_param: EnableInternalTLS}
ceilometer::wsgi::apache::servername:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Stop ceilometer_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ yaql:
+ expression: $.data.apache_upgrade + $.data.ceilometer_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ ceilometer_api_upgrade:
+ - name: Stop ceilometer_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- CeilometerBackend:
- default: 'mongodb'
- description: The ceilometer backend type.
- type: string
CeilometerMeteringSecret:
description: Secret shared by the ceilometer services.
type: string
description: The password for the ceilometer service account.
type: string
hidden: true
- CeilometerMeterDispatcher:
- default: ['gnocchi']
- description: Comma-seperated list of Dispatcher to process meter data
- type: comma_delimited_list
- constraints:
- - allowed_values: ['gnocchi', 'database']
- CeilometerEventDispatcher:
- default: ['gnocchi']
- description: Comma-separated list of Dispatchers to process events data
- type: comma_delimited_list
- constraints:
- - allowed_values: ['panko', 'gnocchi', 'database']
CeilometerWorkers:
default: 0
description: Number of workers for Ceilometer service.
type: number
+ ManageEventPipeline:
+ default: false
+ description: Whether to manage event_pipeline.yaml.
+ type: boolean
EventPipelinePublishers:
- default: ['notifier://?topic=alarm.all']
- description: A list of publishers to put in event_pipeline.yaml.
+ default: ['gnocchi://']
+ description: >
+ A list of publishers to put in event_pipeline.yaml. When the
+ collector is used, override this with notifier:// publisher.
+ Set ManageEventPipeline to true for override to take effect.
+ type: comma_delimited_list
+ ManagePipeline:
+ default: false
+ description: Whether to manage pipeline.yaml.
+ type: boolean
+ PipelinePublishers:
+ default: ['gnocchi://']
+ description: >
+ A list of publishers to put in pipeline.yaml. When the
+ collector is used, override this with notifier:// publisher.
+ Set ManagePipeline to true for override to take effect.
type: comma_delimited_list
Debug:
default: ''
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ CeilometerApiEndpoint:
+ default: false
+ description: Whether to create or skip API endpoint. Set this to
+ false, if you choose to disable Ceilometer API service.
+ type: boolean
+ SnmpdReadonlyUserName:
+ default: ro_snmp_user
+ description: The user name for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ SnmpdReadonlyUserPassword:
+ description: The user password for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ hidden: true
outputs:
role_data:
service_name: ceilometer_base
config_settings:
ceilometer::debug: {get_param: Debug}
- ceilometer::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - - '://ceilometer:'
- - {get_param: CeilometerPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ceilometer'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- ceilometer_backend: {get_param: CeilometerBackend}
- # we include db_sync class in puppet-tripleo
- ceilometer::db::sync_db: false
ceilometer::keystone::authtoken::project_name: 'service'
+ ceilometer::keystone::authtoken::user_domain_name: 'Default'
+ ceilometer::keystone::authtoken::project_domain_name: 'Default'
ceilometer::keystone::authtoken::password: {get_param: CeilometerPassword}
- ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ceilometer::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
+ ceilometer::agent::notification::manage_event_pipeline: {get_param: ManageEventPipeline}
ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
+ ceilometer::agent::notification::manage_pipeline: {get_param: ManagePipeline}
+ ceilometer::agent::notification::pipeline_publishers: {get_param: PipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
+ ceilometer::agent::auth::auth_user_domain_name: 'Default'
+ ceilometer::agent::auth::auth_project_domain_name: 'Default'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
- ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
- ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
ceilometer::dispatcher::gnocchi::filter_project: 'service'
ceilometer::dispatcher::gnocchi::archive_policy: 'low'
ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
ceilometer::rabbit_port: {get_param: RabbitClientPort}
ceilometer::rabbit_heartbeat_timeout_threshold: 60
- ceilometer::db::database_db_max_retries: -1
- ceilometer::db::database_max_retries: -1
ceilometer::telemetry_secret: {get_param: CeilometerMeteringSecret}
+ ceilometer::snmpd_readonly_username: {get_param: SnmpdReadonlyUserName}
+ ceilometer::snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
service_config_settings:
keystone:
+ ceilometer_auth_enabled: true
ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
ceilometer::keystone::auth::tenant: 'service'
+ ceilometer::keystone::auth::configure_endpoint: {get_param: CeilometerApiEndpoint}
mysql:
ceilometer::db::mysql::password: {get_param: CeilometerPassword}
ceilometer::db::mysql::user: ceilometer
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Collector service configured with Puppet
+ This service is deprecated and will be removed in future releases.
parameters:
ServiceNetMap:
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ CeilometerBackend:
+ default: 'mongodb'
+ description: The ceilometer backend type.
+ type: string
+ CeilometerPassword:
+ description: The password for the ceilometer service account.
+ type: string
+ hidden: true
MonitoringSubscriptionCeilometerCollector:
default: 'overcloud-ceilometer-collector'
type: string
default:
tag: openstack.ceilometer.collector
path: /var/log/ceilometer/collector.log
-
+ CeilometerMeterDispatcher:
+ default: ['gnocchi']
+ description: Comma-seperated list of Dispatcher to process meter data
+ Note that database option is deprecated and will not be
+ supported in future.
+ type: comma_delimited_list
+ constraints:
+ - allowed_values: ['gnocchi', 'database']
+ CeilometerEventDispatcher:
+ default: ['panko', 'gnocchi']
+ description: Comma-separated list of Dispatchers to process events data
+ Note that database option is deprecated and will not be
+ supported in future.
+ type: comma_delimited_list
+ constraints:
+ - allowed_values: ['panko', 'gnocchi', 'database']
+ CeilometerEventTTL:
+ default: '86400'
+ description: Number of seconds that events are kept in the database for
+ (<= 0 means forever)
+ type: string
+ CeilometerMeteringTTL:
+ default: '86400'
+ description: Number of seconds that samples are kept in the database for
+ (<= 0 means forever)
+ type: string
resources:
CeilometerServiceBase:
type: ./ceilometer-base.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
MongoDbBase:
type: ./database/mongodb-base.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [MongoDbBase, role_data, config_settings]
- get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::db::database_connection:
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: ceilometer
+ password: {get_param: CeilometerPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ceilometer
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
+ ceilometer_backend: {get_param: CeilometerBackend}
+ ceilometer::event_time_to_live: {get_param: CeilometerEventTTL}
+ ceilometer::metering_time_to_live: {get_param: CeilometerMeteringTTL}
+ # we include db_sync class in puppet-tripleo
+ ceilometer::db::sync_db: false
+ ceilometer::db::database_db_max_retries: -1
+ ceilometer::db::database_max_retries: -1
+ ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
+ ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
service_config_settings:
get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Expirer service configured with Puppet
+ Note, This service is deprecated and will be removed in
+ future releases.
parameters:
ServiceNetMap:
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph base service. Shared by all Ceph services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph Client service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph External service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph MDS service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph Monitor service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph OSD service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph RadosGW service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
- ceph::rgw::keystone::auth::roles: [ 'admin', 'member', '_member_' ]
+ ceph::rgw::keystone::auth::roles: [ 'admin', 'Member', '_member_' ]
ceph::rgw::keystone::auth::tenant: service
ceph::rgw::keystone::auth::user: swift
ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Requests certificates using certmonger through Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the certmonger-user service
+ value:
+ service_name: certmonger_user
+ step_config: |
+ include ::tripleo::profile::base::certmonger_user
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ CinderApiPolicies:
+ description: |
+ A hash of policies to configure for Cinder API.
+ e.g. { cinder-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
conditions:
cinder_workers_zero: {equals : [{get_param: CinderWorkers}, 0]}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
CinderBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [CinderBase, role_data, config_settings]
- get_attr: [ApacheServiceBase, role_data, config_settings]
- - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ - cinder::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
cinder::keystone::authtoken::password: {get_param: CinderPassword}
cinder::keystone::authtoken::project_name: 'service'
+ cinder::keystone::authtoken::user_domain_name: 'Default'
+ cinder::keystone::authtoken::project_domain_name: 'Default'
+ cinder::policy::policies: {get_param: CinderApiPolicies}
cinder::api::enable_proxy_headers_parsing: true
cinder::api::nova_catalog_info: 'compute:nova:internalURL'
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Check if cinder_api is deployed
- command: systemctl is-enabled openstack-cinder-api
- tags: common
- ignore_errors: True
- register: cinder_api_enabled
- - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
- shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
- when: cinder_api_enabled.rc == 0
- tags: step0,validation
- - name: check for cinder running under apache (post upgrade)
- tags: step1
- shell: "apachectl -t -D DUMP_VHOSTS | grep -q cinder"
- register: cinder_apache
- ignore_errors: true
- - name: Stop cinder_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
- when: "cinder_apache.rc == 0"
- - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
- tags: step1
- when: cinder_api_enabled.rc == 0
- service: name=openstack-cinder-api state=stopped enabled=no
+ yaql:
+ expression: $.data.apache_upgrade + $.data.cinder_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ cinder_api_upgrade:
+ - name: Check if cinder_api is deployed
+ command: systemctl is-enabled openstack-cinder-api
+ tags: common
+ ignore_errors: True
+ register: cinder_api_enabled
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
+ when: cinder_api_enabled.rc == 0
+ tags: step0,validation
+ - name: check for cinder running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder"
+ register: cinder_apache
+ ignore_errors: true
+ - name: Stop cinder_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: cinder_apache.rc == 0
+ - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
+ tags: step1
+ when: cinder_api_enabled.rc == 0
+ service: name=openstack-cinder-api state=stopped enabled=no
# See the License for the specific language governing permissions and
# limitations under the License.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Cinder Dell EMC PS Series backend
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Cinder Dell EMC Storage Center backend
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
--- /dev/null
+heat_template_version: pike
+
+description: Openstack Cinder Netapp backend
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ CinderEnableNetappBackend:
+ type: boolean
+ default: true
+ CinderNetappBackendName:
+ type: string
+ default: 'tripleo_netapp'
+ CinderNetappLogin:
+ type: string
+ CinderNetappPassword:
+ type: string
+ hidden: true
+ CinderNetappServerHostname:
+ type: string
+ CinderNetappServerPort:
+ type: string
+ default: '80'
+ CinderNetappSizeMultiplier:
+ type: string
+ default: '1.2'
+ CinderNetappStorageFamily:
+ type: string
+ default: 'ontap_cluster'
+ CinderNetappStorageProtocol:
+ type: string
+ default: 'nfs'
+ CinderNetappTransportType:
+ type: string
+ default: 'http'
+ CinderNetappVfiler:
+ type: string
+ default: ''
+ CinderNetappVolumeList:
+ type: string
+ default: ''
+ CinderNetappVserver:
+ type: string
+ default: ''
+ CinderNetappPartnerBackendName:
+ type: string
+ default: ''
+ CinderNetappNfsShares:
+ type: string
+ default: ''
+ CinderNetappNfsSharesConfig:
+ type: string
+ default: '/etc/cinder/shares.conf'
+ CinderNetappNfsMountOptions:
+ type: string
+ default: ''
+ CinderNetappCopyOffloadToolPath:
+ type: string
+ default: ''
+ CinderNetappControllerIps:
+ type: string
+ default: ''
+ CinderNetappSaPassword:
+ type: string
+ default: ''
+ hidden: true
+ CinderNetappStoragePools:
+ type: string
+ default: ''
+ CinderNetappHostType:
+ type: string
+ default: ''
+ CinderNetappWebservicePath:
+ type: string
+ default: '/devmgr/v2'
+ # DEPRECATED options for compatibility with older versions
+ CinderNetappEseriesHostType:
+ type: string
+ default: 'linux_dm_mp'
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - CinderNetappEseriesHostType
+
+outputs:
+ role_data:
+ description: Role data for the Cinder NetApp backend.
+ value:
+ service_name: cinder_backend_netapp
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_netapp_backend: {get_param: CinderEnableNetappBackend}
+ cinder::backend::netapp::title: {get_param: CinderNetappBackendName}
+ cinder::backend::netapp::netapp_login: {get_param: CinderNetappLogin}
+ cinder::backend::netapp::netapp_password: {get_param: CinderNetappPassword}
+ cinder::backend::netapp::netapp_server_hostname: {get_param: CinderNetappServerHostname}
+ cinder::backend::netapp::netapp_server_port: {get_param: CinderNetappServerPort}
+ cinder::backend::netapp::netapp_size_multiplier: {get_param: CinderNetappSizeMultiplier}
+ cinder::backend::netapp::netapp_storage_family: {get_param: CinderNetappStorageFamily}
+ cinder::backend::netapp::netapp_storage_protocol: {get_param: CinderNetappStorageProtocol}
+ cinder::backend::netapp::netapp_transport_type: {get_param: CinderNetappTransportType}
+ cinder::backend::netapp::netapp_vfiler: {get_param: CinderNetappVfiler}
+ cinder::backend::netapp::netapp_volume_list: {get_param: CinderNetappVolumeList}
+ cinder::backend::netapp::netapp_vserver: {get_param: CinderNetappVserver}
+ cinder::backend::netapp::netapp_partner_backend_name: {get_param: CinderNetappPartnerBackendName}
+ cinder::backend::netapp::nfs_shares: {get_param: CinderNetappNfsShares}
+ cinder::backend::netapp::nfs_shares_config: {get_param: CinderNetappNfsSharesConfig}
+ cinder::backend::netapp::nfs_mount_options: {get_param: CinderNetappNfsMountOptions}
+ cinder::backend::netapp::netapp_copyoffload_tool_path: {get_param: CinderNetappCopyOffloadToolPath}
+ cinder::backend::netapp::netapp_controller_ips: {get_param: CinderNetappControllerIps}
+ cinder::backend::netapp::netapp_sa_password: {get_param: CinderNetappSaPassword}
+ cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
+ cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
+ cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
--- /dev/null
+# Copyright (c) 2017 Pure Storage Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+heat_template_version: pike
+
+description: >
+ Openstack Cinder Pure Storage FlashArray backend
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ CinderEnablePureBackend:
+ type: boolean
+ default: true
+ CinderPureBackendName:
+ type: string
+ default: 'tripleo_pure'
+ CinderPureStorageProtocol:
+ type: string
+ default: 'iSCSI'
+ CinderPureSanIp:
+ type: string
+ CinderPureAPIToken:
+ type: string
+ CinderPureUseChap:
+ type: boolean
+ default: false
+ CinderPureMultipathXfer:
+ type: boolean
+ default: true
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Pure Storage FlashArray backend.
+ value:
+ service_name: cinder_backend_pure
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_pure_backend: {get_param: CinderEnablePureBackend}
+ cinder::backend::pure::volume_backend_name: {get_param: CinderPureBackendName}
+ cinder::backend::pure::pure_storage_protocol: {get_param: CinderPureStorageProtocol}
+ cinder::backend::pure::san_ip: {get_param: CinderPureSanIp}
+ cinder::backend::pure::pure_api_token: {get_input: PureAPIToken}
+ cinder::backend::pure::use_chap_auth: {get_input: PureUseChap}
+ cinder::backend::pure::use_multipath_for_image_xfer: {get_input: PureMultipathXfer}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Cinder Dell EMC ScaleIO backend
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
cinder::backend::scaleio::sio_round_volume_capacity: {get_param: CinderScaleIORoundVolumeCapacity}
cinder::backend::scaleio::sio_unmap_volume_before_deletion: {get_param: CinderScaleIOUnmapVolumeBeforeDeletion}
cinder::backend::scaleio::sio_max_over_subscription_ratio: {get_param: CinderScaleIOMaxOverSubscriptionRatio}
- cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOThinProvision}
+ cinder::backend::scaleio::sio_thin_provision: {get_param: CinderScaleIOSanThinProvision}
step_config: |
include ::tripleo::profile::base::cinder::volume
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Backup service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder base service. Shared by all Cinder services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: cinder_base
config_settings:
cinder::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://cinder:'
- - {get_param: CinderPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/cinder'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: cinder
+ password: {get_param: CinderPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /cinder
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
cinder::debug: {get_param: Debug}
cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
cinder::rabbit_userid: {get_param: RabbitUserName}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure Cinder HPELeftHandISCSIDriver
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Scheduler service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Volume service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
tripleo::profile::base::cinder::volume::cinder_enable_nfs_backend: {get_param: CinderEnableNfsBackend}
tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
- tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers:
- str_replace:
- template: SERVERS
- params:
- SERVERS: {get_param: CinderNfsServers}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Congress service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ CongressPolicies:
+ description: |
+ A hash of policies to configure for Congress.
+ e.g. { congress-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
outputs:
role_data:
config_settings:
congress_password: {get_param: CongressPassword}
congress::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://congress:'
- - {get_param: CongressPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/congress'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: congress
+ password: {get_param: CongressPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /congress
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
congress::debug: {get_param: Debug}
congress::rpc_backend: rabbit
congress::rabbit_userid: {get_param: RabbitUserName}
congress::rabbit_port: {get_param: RabbitClientPort}
congress::server::bind_host: {get_param: [ServiceNetMap, CongressApiNetwork]}
+ congress::keystone::authtoken::password: {get_param: CongressPassword}
congress::keystone::authtoken::project_name: 'service'
- congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ congress::keystone::authtoken::user_domain_name: 'Default'
+ congress::keystone::authtoken::project_domain_name: 'Default'
+ congress::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ congress::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
congress::db::mysql::password: {get_param: CongressPassword}
congress::db::mysql::user: congress
congress::db::mysql::allowed_hosts:
- '%'
- {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ congress::policy::policies: {get_param: CongressPolicies}
service_config_settings:
keystone:
congress::keystone::auth::tenant: 'service'
+ congress::keystone::auth::region: {get_param: KeystoneRegion}
congress::keystone::auth::password: {get_param: CongressPassword}
congress::keystone::auth::public_url: {get_param: [EndpointMap, CongressPublic, uri]}
congress::keystone::auth::internal_url: {get_param: [EndpointMap, CongressInternal, uri]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configuration details for MongoDB service using composable roles
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
MongoDb service deployment using puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ MongodbMemoryLimit:
+ default: '20G'
+ description: Limit the amount of memory mongodb uses with systemd.
+ type: string
MongoDbLoggingSource:
type: json
description: Fluentd logging configuration for mongodb.
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [MongoDbBase, role_data, config_settings]
- tripleo::profile::base::database::mongodb::mongodb_replset: {get_attr: [MongoDbBase, aux_parameters, rplset_name]}
+ tripleo::profile::base::database::mongodb::memory_limit: {get_param: MongodbMemoryLimit}
mongodb::server::service_manage: True
tripleo.mongodb.firewall_rules:
'101 mongodb_config':
-heat_template_version: ocata
+heat_template_version: pike
description: >
Mysql client settings
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
outputs:
role_data:
config_settings:
tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+ tripleo::profile::base::database::mysql::client::ssl_ca: {get_param: InternalTLSCAFile}
step_config: |
include ::tripleo::profile::base::database::mysql::client
+++ /dev/null
-heat_template_version: ocata
-
-description: >
- MySQL configurations for using TLS via certmonger.
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- # The following parameters are not needed by the template but are
- # required to pass the pep8 tests
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
-
-outputs:
- role_data:
- description: MySQL configurations for using TLS via certmonger.
- value:
- service_name: mysql_internal_tls_certmonger
- config_settings:
- generate_service_certificates: true
- tripleo::profile::base::database::mysql::certificate_specs:
- service_certificate: '/etc/pki/tls/certs/mysql.crt'
- service_key: '/etc/pki/tls/private/mysql.key'
- hostname:
- str_replace:
- template: "%{hiera('cloud_name_NETWORK')}"
- params:
- NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- principal:
- str_replace:
- template: "mysql/%{hiera('cloud_name_NETWORK')}"
- params:
- NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- metadata_settings:
- - service: mysql
- network: {get_param: [ServiceNetMap, MysqlNetwork]}
- type: vip
-heat_template_version: ocata
+heat_template_version: pike
description: >
MySQL service deployment using puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: Configures MySQL max_connections config setting
type: number
default: 4096
+ MysqlIncreaseFileLimit:
+ description: Flag to increase MySQL open-files-limit to 16384
+ type: boolean
+ default: true
MysqlRootPassword:
type: string
hidden: true
description: The password for the nova db account
type: string
hidden: true
+ EnableInternalTLS:
+ type: boolean
+ default: false
-resources:
+conditions:
- MySQLTLS:
- type: OS::TripleO::Services::MySQLTLS
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
service_name: mysql
config_settings:
map_merge:
- - get_attr: [MySQLTLS, role_data, config_settings]
-
# The Galera package should work in cluster and
# non-cluster modes based on the config file.
$NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
tripleo::profile::base::database::mysql::client_bind_address:
{get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::generate_dropin_file_limit:
+ {get_param: MysqlIncreaseFileLimit}
+ - generate_service_certificates: true
+ tripleo::profile::base::database::mysql::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/mysql.crt'
+ service_key: '/etc/pki/tls/private/mysql.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ principal:
+ str_replace:
+ template: "mysql/%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
step_config: |
include ::tripleo::profile::base::database::mysql
metadata_settings:
- get_attr: [MySQLTLS, role_data, metadata_settings]
+ if:
+ - internal_tls_enabled
+ -
+ - service: mysql
+ network: {get_param: [ServiceNetMap, MysqlNetwork]}
+ type: vip
+ - null
upgrade_tasks:
- name: Check for galera root password
tags: step0
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Redis service configured with Puppet
description: The password for Redis
type: string
hidden: true
+ RedisFDLimit:
+ description: Configure Redis FD limit
+ type: string
+ default: 10240
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
redis::sentinel::master_name: "%{hiera('bootstrap_nodeid')}"
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
+ redis::sentinel::sentinel_bind: {get_param: [ServiceNetMap, RedisNetwork]}
+ redis::ulimit: {get_param: RedisFDLimit}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Redis service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Ceilometer Collector service, disabled since pike
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the disabled Ceilometer Collector role.
+ value:
+ service_name: ceilometer_collector
+ upgrade_tasks:
+ - name: Stop and disable ceilometer_collector service on upgrade
+ tags: step1
+ service: name=openstack-ceilometer-collector state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Ceilometer Expirer service, disabled since pike
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the disabled Ceilometer Expirer role.
+ value:
+ service_name: ceilometer_expirer
+ upgrade_tasks:
+ - name: Stop and disable ceilometer_expirer service on upgrade
+ tags: step1
+ service: name=openstack-ceilometer-expirer state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Glance Registry service, disabled since ocata
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Configures docker on the host
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: tripleoupstream
+ type: string
+ DockerNamespaceIsRegistry:
+ type: boolean
+ default: false
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the docker service
+ value:
+ service_name: docker
+ config_settings:
+ tripleo::profile::base::docker::docker_namespace: {get_param: DockerNamespace}
+ tripleo::profile::base::docker::insecure_registry: {get_param: DockerNamespaceIsRegistry}
+ step_config: |
+ include ::tripleo::profile::base::docker
+ upgrade_tasks:
+ - name: Install docker packages on upgrade if missing
+ tags: step3
+ yum: name=docker state=latest
+
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack EC2-API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ Ec2ApiExternalNetwork:
+ type: string
+ default: ''
+ description: Name of the external network, which is used to connect VPCs to
+ Internet and to allocate Elastic IPs
+ NovaDefaultFloatingPool:
+ default: 'public'
+ description: Default pool for floating IP addresses
+ type: string
MonitoringSubscriptionEc2Api:
default: 'overcloud-ec2-api'
type: string
default: 'false'
description: Set to true to enable package installation via Puppet
type: boolean
+ Ec2ApiPolicies:
+ description: |
+ A hash of policies to configure for EC2-API.
+ e.g. { ec2api-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
conditions:
nova_workers_zero: {equals : [{get_param: Ec2ApiWorkers}, 0]}
+ external_network_unset: {equals : [{get_param: Ec2ApiExternalNetwork}, '']}
outputs:
role_data:
ec2api::keystone::authtoken::password: {get_param: Ec2ApiPassword}
ec2api::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
ec2api::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ec2api::policy::policies: {get_param: Ec2ApiPolicies}
ec2api::api::enabled: true
ec2api::package_manage: {get_param: EnablePackageInstall}
ec2api::api::ec2api_listen:
params:
$NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]}
ec2api::db::database_connection:
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: ec2_api
+ password: {get_param: Ec2ApiPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ec2_api
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
+ ec2api::api::keystone_ec2_tokens_url:
list_join:
- ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://ec2_api:'
- - {get_param: Ec2ApiPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ec2_api'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+ - '/ec2tokens'
-
if:
- nova_workers_zero
- {}
- ec2api::api::ec2api_workers: {get_param: Ec2ApiWorkers}
ec2api::metadata::metadata_workers: {get_param: Ec2ApiWorkers}
+ -
+ if:
+ - external_network_unset
+ - ec2api::api::external_network: {get_param: NovaDefaultFloatingPool}
+ - ec2api::api::external_network: {get_param: Ec2ApiExternalNetwork}
step_config: |
include tripleo::profile::base::nova::ec2api
service_config_settings:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Etcd service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
EtcdInitialClusterToken:
- default: 'etcd-tripleo'
description: Initial cluster token for the etcd cluster during bootstrap.
type: string
+ hidden: true
MonitoringSubscriptionEtcd:
default: 'overcloud-etcd'
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
service_name: etcd
monitoring_subscription: {get_param: MonitoringSubscriptionEtcd}
config_settings:
- etcd::etcd_name:
- str_replace:
- template:
- "%{hiera('fqdn_$NETWORK')}"
- params:
- $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
- # NOTE: bind IP is found in Heat replacing the network name with the local node IP
- # for the given network; replacement examples (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]}
- tripleo::profile::base::etcd::client_port: '2379'
- tripleo::profile::base::etcd::peer_port: '2380'
- etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken}
- etcd::manage_package: false
- tripleo.etcd.firewall_rules:
- '141 etcd':
- dport:
- - 2379
- - 2380
+ map_merge:
+ -
+ etcd::etcd_name:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]}
+ tripleo::profile::base::etcd::client_port: '2379'
+ tripleo::profile::base::etcd::peer_port: '2380'
+ etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken}
+ etcd::manage_package: false
+ tripleo.etcd.firewall_rules:
+ '141 etcd':
+ dport:
+ - 2379
+ - 2380
+ -
+ if:
+ - internal_tls_enabled
+ - generate_service_certificates: true
+ tripleo::profile::base::etcd::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/etcd.crt'
+ service_key: '/etc/pki/tls/private/etcd.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+ principal:
+ str_replace:
+ template: "etcd/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+ - {}
step_config: |
include ::tripleo::profile::base::etcd
upgrade_tasks:
- name: Stop etcd service
tags: step2
service: name=etcd state=stopped
+ metadata_settings:
+ if:
+ - internal_tls_enabled
+ -
+ - service: etcd
+ network: {get_param: [ServiceNetMap, EtcdNetwork]}
+ type: node
+ - null
--- /dev/null
+heat_template_version: pike
+
+description: >
+ External Swift Proxy endpoint configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ExternalPublicUrl:
+ description: Public endpoint url for the external swift proxy
+ type: string
+ ExternalInternalUrl:
+ description: Internal endpoint url for the external swift proxy
+ type: string
+ ExternalAdminUrl:
+ description: External endpoint url for the external swift proxy
+ type: string
+ ExternalSwiftUserTenant:
+ description: Tenant where swift user will be set as admin
+ type: string
+ default: 'service'
+ SwiftPassword:
+ description: The password for the swift service account, used by the swift proxy services.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+
+resources:
+
+outputs:
+ role_data:
+ description: Role data for External Swift proxy.
+ value:
+ service_name: external_swift_proxy
+ config_settings:
+
+ step_config:
+
+ service_config_settings:
+ keystone:
+ swift::keystone::auth::public_url: {get_param: ExternalPublicUrl}
+ swift::keystone::auth::internal_url: {get_param: ExternalInternalUrl}
+ swift::keystone::auth::admin_url: {get_param: ExternalAdminUrl}
+ swift::keystone::auth::public_url_s3: ''
+ swift::keystone::auth::internal_url_s3: ''
+ swift::keystone::auth::admin_url_s3: ''
+ swift::keystone::auth::password: {get_param: SwiftPassword}
+ swift::keystone::auth::region: {get_param: KeystoneRegion}
+ swift::keystone::auth::tenant: {get_param: ExternalSwiftUserTenant}
+ swift::keystone::auth::configure_s3_endpoint: false
+ swift::keystone::auth::operator_roles:
+ - admin
+ - swiftoperator
+ - ResellerAdmin
+
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Glance API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ CephClientUserName:
+ default: openstack
+ type: string
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ GlanceNotifierStrategy:
+ description: Strategy to use for Glance notification queue
+ type: string
+ default: noop
+ GlanceLogFile:
+ description: The filepath of the file to use for logging messages from Glance.
+ type: string
+ default: ''
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd']
+ GlanceNfsEnabled:
+ default: false
+ description: >
+ When using GlanceBackend 'file', mount NFS share for image storage.
+ type: boolean
+ GlanceNfsShare:
+ default: ''
+ description: >
+ NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceNfsOptions:
+ default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+ description: >
+ NFS mount options for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceRbdPoolName:
+ default: images
+ type: string
+ RabbitPassword:
+ description: The password for RabbitMQ
+ type: string
+ hidden: true
+ RabbitUserName:
+ default: guest
+ description: The username for RabbitMQ
+ type: string
+ RabbitClientPort:
+ default: 5672
+ description: Set rabbit subscriber port, change this if using SSL
+ type: number
+ RabbitClientUseSSL:
+ default: false
+ description: >
+ Rabbit client subscriber parameter to specify
+ an SSL connection to the RabbitMQ host.
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ GlanceApiPolicies:
+ description: |
+ A hash of policies to configure for Glance API.
+ e.g. { glance-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
conditions:
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
+ glance_workers_unset: {equals : [{get_param: GlanceWorkers}, '']}
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
- GlanceBase:
- type: ./glance-base.yaml
- properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- EndpointMap: {get_param: EndpointMap}
-
outputs:
role_data:
description: Role data for the Glance API role.
- glance
config_settings:
map_merge:
- - get_attr: [GlanceBase, role_data, config_settings]
- get_attr: [TLSProxyBase, role_data, config_settings]
- glance::api::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://glance:'
- - {get_param: GlancePassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/glance'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: glance
+ password: {get_param: GlancePassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /glance
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]}
- glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
glance::api::enable_v1_api: false
glance::api::enable_v2_api: true
glance::api::authtoken::password: {get_param: GlancePassword}
glance::api::enable_proxy_headers_parsing: true
glance::api::debug: {get_param: Debug}
- glance::api::workers: {get_param: GlanceWorkers}
+ glance::policy::policies: {get_param: GlanceApiPolicies}
tripleo.glance_api.firewall_rules:
'112 glance_api':
dport:
- 9292
- 13292
glance::api::authtoken::project_name: 'service'
+ glance::keystone::authtoken::user_domain_name: 'Default'
+ glance::keystone::authtoken::project_domain_name: 'Default'
glance::api::pipeline: 'keystone'
glance::api::show_image_direct_url: true
# NOTE: bind IP is found in Heat replacing the network name with the
- use_tls_proxy
- 'localhost'
- {get_param: [ServiceNetMap, GlanceApiNetwork]}
+ glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
+ glance_log_file: {get_param: GlanceLogFile}
+ glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneV3Internal, uri] }
+ glance::backend::swift::swift_store_user: service:glance
+ glance::backend::swift::swift_store_key: {get_param: GlancePassword}
+ glance::backend::swift::swift_store_create_container_on_put: true
+ glance::backend::swift::swift_store_auth_version: 3
+ glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
+ glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
+ glance_backend: {get_param: GlanceBackend}
+ glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
+ glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
+ glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
+ glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ glance::notify::rabbitmq::notification_driver: messagingv2
+ tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
+ tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
+ tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
+ -
+ if:
+ - glance_workers_unset
+ - {}
+ - glance::api::workers: {get_param: GlanceWorkers}
+ service_config_settings:
+ keystone:
+ glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
+ glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
+ glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
+ glance::keystone::auth::password: {get_param: GlancePassword }
+ glance::keystone::auth::region: {get_param: KeystoneRegion}
+ glance::keystone::auth::tenant: 'service'
+ mysql:
+ glance::db::mysql::password: {get_param: GlancePassword}
+ glance::db::mysql::user: glance
+ glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ glance::db::mysql::dbname: glance
+ glance::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
step_config: |
include ::tripleo::profile::base::glance::api
- service_config_settings:
- get_attr: [GlanceBase, role_data, service_config_settings]
upgrade_tasks:
- name: Check if glance_api is deployed
command: systemctl is-enabled openstack-glance-api
+++ /dev/null
-heat_template_version: ocata
-
-description: >
- OpenStack Glance Common settings with Puppet
-
-parameters:
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- CephClientUserName:
- default: openstack
- type: string
- Debug:
- default: ''
- description: Set to True to enable debugging on all services.
- type: string
- GlanceNotifierStrategy:
- description: Strategy to use for Glance notification queue
- type: string
- default: noop
- GlanceLogFile:
- description: The filepath of the file to use for logging messages from Glance.
- type: string
- default: ''
- GlancePassword:
- description: The password for the glance service and db account, used by the glance services.
- type: string
- hidden: true
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
- GlanceNfsEnabled:
- default: false
- description: >
- When using GlanceBackend 'file', mount NFS share for image storage.
- type: boolean
- GlanceNfsShare:
- default: ''
- description: >
- NFS share to mount for image storage (when GlanceNfsEnabled is true)
- type: string
- GlanceNfsOptions:
- default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
- description: >
- NFS mount options for image storage (when GlanceNfsEnabled is true)
- type: string
- GlanceRbdPoolName:
- default: images
- type: string
- RabbitPassword:
- description: The password for RabbitMQ
- type: string
- hidden: true
- RabbitUserName:
- default: guest
- description: The username for RabbitMQ
- type: string
- RabbitClientPort:
- default: 5672
- description: Set rabbit subscriber port, change this if using SSL
- type: number
- RabbitClientUseSSL:
- default: false
- description: >
- Rabbit client subscriber parameter to specify
- an SSL connection to the RabbitMQ host.
- type: string
- KeystoneRegion:
- type: string
- default: 'regionOne'
- description: Keystone region for endpoint
-
-outputs:
- role_data:
- description: Role data for the Glance common role.
- value:
- service_name: glance_base
- config_settings:
- glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
- glance_log_file: {get_param: GlanceLogFile}
- glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
- glance::backend::swift::swift_store_user: service:glance
- glance::backend::swift::swift_store_key: {get_param: GlancePassword}
- glance::backend::swift::swift_store_create_container_on_put: true
- glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
- glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
- glance_backend: {get_param: GlanceBackend}
- glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
- glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
- glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
- glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- glance::notify::rabbitmq::notification_driver: messagingv2
- tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
- tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
- tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
- service_config_settings:
- keystone:
- glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
- glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
- glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
- glance::keystone::auth::password: {get_param: GlancePassword }
- glance::keystone::auth::region: {get_param: KeystoneRegion}
- glance::keystone::auth::tenant: 'service'
- mysql:
- glance::db::mysql::password: {get_param: GlancePassword}
- glance::db::mysql::user: glance
- glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
- glance::db::mysql::dbname: glance
- glance::db::mysql::allowed_hosts:
- - '%'
- - "%{hiera('mysql_bind_host')}"
-heat_template_version: ocata
+heat_template_version: pike
description: >
Gnocchi service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ GnocchiApiPolicies:
+ description: |
+ A hash of policies to configure for Gnocchi API.
+ e.g. { gnocchi-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ./apache.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
gnocchi::api::enabled: true
gnocchi::api::enable_proxy_headers_parsing: true
gnocchi::api::service_name: 'httpd'
- gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ gnocchi::policy::policies: {get_param: GnocchiApiPolicies}
+ gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
gnocchi::keystone::authtoken::project_name: 'service'
+ gnocchi::keystone::authtoken::user_domain_name: 'Default'
+ gnocchi::keystone::authtoken::project_domain_name: 'Default'
gnocchi::wsgi::apache::ssl: {get_param: EnableInternalTLS}
gnocchi::wsgi::apache::servername:
str_replace:
# internal_api_subnet - > IP/CIDR
gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi'
-
- gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]}
step_config: |
include ::tripleo::profile::base::gnocchi::api
service_config_settings:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Stop gnocchi_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ yaql:
+ expression: $.data.apache_upgrade + $.data.gnocchi_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ gnocchi_api_upgrade:
+ - name: Stop gnocchi_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
Gnocchi service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 'mysql'
description: The short name of the Gnocchi indexer backend to use.
type: string
+ MetricProcessingDelay:
+ default: 60
+ description: Delay between processing metrics.
+ type: number
GnocchiPassword:
description: The password for the gnocchi service and db account.
type: string
CephClientUserName:
default: openstack
type: string
- KeystoneRegion:
- type: string
- default: 'regionOne'
- description: Keystone region for endpoint
RedisPassword:
description: The password for the redis service account.
type: string
gnocchi_redis_password: {get_param: RedisPassword}
gnocchi::debug: {get_param: Debug}
gnocchi::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://gnocchi:'
- - {get_param: GnocchiPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/gnocchi'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: gnocchi
+ password: {get_param: GnocchiPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /gnocchi
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
gnocchi::db::sync::extra_opts: '--skip-storage'
+ gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
gnocchi::storage::swift::swift_user: 'service:gnocchi'
- gnocchi::storage::swift::swift_auth_version: 2
+ gnocchi::storage::swift::swift_auth_version: 3
gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
+ gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneV3Internal, uri]}
gnocchi::storage::ceph::ceph_pool: {get_param: GnocchiRbdPoolName}
gnocchi::storage::ceph::ceph_username: {get_param: CephClientUserName}
gnocchi::storage::ceph::ceph_keyring:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Gnocchi service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Gnocchi service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAProxy deployment with TLS enabled, powered by certmonger
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAProxy deployment with TLS enabled, powered by certmonger
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAproxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
MonitoringSubscriptionHaproxy:
default: 'overcloud-haproxy'
type: string
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
HAProxyInternalTLS:
type: OS::TripleO::Services::HAProxyInternalTLS
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
tripleo::haproxy::haproxy_stats_user: {get_param: HAProxyStatsUser}
tripleo::haproxy::haproxy_stats_password: {get_param: HAProxyStatsPassword}
tripleo::haproxy::redis_password: {get_param: RedisPassword}
+ tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
when: haproxy_enabled.rc == 0
service: name=haproxy state=started
metadata_settings:
- yaql:
- expression: '[].concat(coalesce($.data.internal, []), coalesce($.data.public, []))'
- data:
- public: {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]}
- internal: {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]}
+ list_concat:
+ - {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]}
+ - {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat CloudFormation API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default:
tag: openstack.heat.api.cfn
path: /var/log/heat/heat-api-cfn.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api_cfn::workers: {get_param: HeatWorkers}
- tripleo.heat_api_cfn.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api_cfn.firewall_rules:
'125 heat_cfn':
dport:
- 8000
- 13800
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ heat::wsgi::apache_api_cfn::ssl: {get_param: EnableInternalTLS}
+ heat::api_cfn::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ heat::wsgi::apache_api_cfn::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiCfnNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api_cfn::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api_cfn
service_config_settings:
shell: /usr/bin/systemctl show 'openstack-heat-api-cfn' --property ActiveState | grep '\bactive\b'
when: heat_api_cfn_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api_cfn service
+ - name: check for heat_api_cfn running under apache (post upgrade)
tags: step1
- when: heat_api_cfn_enabled.rc == 0
- service: name=openstack-heat-api-cfn state=stopped
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cfn_wsgi"
+ register: heat_api_cfn_apache
+ ignore_errors: true
+ - name: Stop heat_api_cfn service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_cfn_apache.rc == 0
+ - name: Stop and disable heat_api_cfn service (pre-upgrade not under httpd)
+ tags: step1
+ when: heat_api_cfn_apache.rc == 0
+ service: name=openstack-heat-api-cfn state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat CloudWatch API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default:
tag: openstack.heat.api.cloudwatch
path: /var/log/heat/heat-api-cloudwatch.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api_cloudwatch::workers: {get_param: HeatWorkers}
- tripleo.heat_api_cloudwatch.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api_cloudwatch.firewall_rules:
'125 heat_cloudwatch':
dport:
- 8003
- 13003
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api_cloudwatch::bind_host:
+ get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+ heat::wsgi::apache_api_cloudwatch::ssl: {get_param: EnableInternalTLS}
+ heat::api_cloudwatch::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api_cloudwatch::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api_cloudwatch::bind_host:
+ get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]
+ heat::wsgi::apache_api_cloudwatch::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiCloudwatchNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api_cloudwatch::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api_cloudwatch
upgrade_tasks:
shell: /usr/bin/systemctl show 'openstack-heat-api-cloudwatch' --property ActiveState | grep '\bactive\b'
when: heat_api_cloudwatch_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api_cloudwatch service
+ - name: check for heat_api_cloudwatch running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_cloudwatch_wsgi"
+ register: heat_api_cloudwatch_apache
+ ignore_errors: true
+ - name: Stop heat_api_cloudwatch service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_cloudwatch_apache.rc == 0
+ - name: Stop and disable heat_api_cloudwatch service (pre-upgrade not under httpd)
tags: step1
when: heat_api_cloudwatch_enabled.rc == 0
- service: name=openstack-heat-api-cloudwatch state=stopped
+ service: name=openstack-heat-api-cloudwatch state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default:
tag: openstack.heat.api
path: /var/log/heat/heat-api.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ HeatApiPolicies:
+ description: |
+ A hash of policies to configure for Heat API.
+ e.g. { heat-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
+
+conditions:
+ heat_workers_zero: {equals : [{get_param: HeatWorkers}, 0]}
resources:
+
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
HeatBase:
type: ./heat-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
config_settings:
map_merge:
- get_attr: [HeatBase, role_data, config_settings]
- - heat::api::workers: {get_param: HeatWorkers}
- tripleo.heat_api.firewall_rules:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - tripleo.heat_api.firewall_rules:
'125 heat_api':
dport:
- 8004
- 13004
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
+ heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+ heat::policy::policies: {get_param: HeatApiPolicies}
+ heat::api::service_name: 'httpd'
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ heat::wsgi::apache_api::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, HeatApiNetwork]}
+ -
+ if:
+ - heat_workers_zero
+ - {}
+ - heat::wsgi::apache_api::workers: {get_param: HeatWorkers}
step_config: |
include ::tripleo::profile::base::heat::api
service_config_settings:
shell: /usr/bin/systemctl show 'openstack-heat-api' --property ActiveState | grep '\bactive\b'
when: heat_api_enabled.rc == 0
tags: step0,validation
- - name: Stop heat_api service
+ - name: check for heat_api running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q heat_api_wsgi"
+ register: heat_api_apache
+ ignore_errors: true
+ - name: Stop heat_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: heat_api_apache.rc == 0
+ - name: Stop and disable heat_api service (pre-upgrade not under httpd)
tags: step1
when: heat_api_enabled.rc == 0
- service: name=openstack-heat-api state=stopped
+ service: name=openstack-heat-api state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat base service. Shared for all Heat services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
value: 'role:admin'
heat::rabbit_heartbeat_timeout_threshold: 60
heat::keystone::authtoken::project_name: 'service'
- heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ heat::keystone::authtoken::user_domain_name: 'Default'
+ heat::keystone::authtoken::project_domain_name: 'Default'
+ heat::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
heat::keystone::authtoken::password: {get_param: HeatPassword}
heat::keystone::domain::domain_name: 'heat_stack'
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat Engine service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
heat::engine::max_nested_stack_depth: 6
heat::engine::max_resources_per_stack: {get_param: HeatMaxResourcesPerStack}
heat::engine::heat_metadata_server_url:
- list_join:
- - ''
- - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
- - '://'
- - {get_param: [EndpointMap, HeatCfnPublic, host]}
- - ':'
- - {get_param: [EndpointMap, HeatCfnPublic, port]}
+ make_url:
+ scheme: {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+ host: {get_param: [EndpointMap, HeatCfnPublic, host]}
+ port: {get_param: [EndpointMap, HeatCfnPublic, port]}
heat::engine::heat_waitcondition_server_url:
- list_join:
- - ''
- - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
- - '://'
- - {get_param: [EndpointMap, HeatCfnPublic, host]}
- - ':'
- - {get_param: [EndpointMap, HeatCfnPublic, port]}
- - '/v1/waitcondition'
+ make_url:
+ scheme: {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+ host: {get_param: [EndpointMap, HeatCfnPublic, host]}
+ port: {get_param: [EndpointMap, HeatCfnPublic, port]}
+ path: /v1/waitcondition
heat::engine::convergence_engine: {get_param: HeatConvergenceEngine}
tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge}
heat::database_connection:
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: heat
+ password: {get_param: HeatPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /heat
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
+ heat::keystone_ec2_uri:
list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://heat:'
- - {get_param: HeatPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/heat'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
+ - ''
+ - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+ - '/ec2tokens'
heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
heat::engine::auth_encryption_key:
yaql:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Horizon service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
type: string
hidden: true
default: ''
+ HorizonSecureCookies:
+ description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
+ type: boolean
+ default: true
MemcachedIPv6:
default: false
description: Enable IPv6 features in Memcached.
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
options: ['FollowSymLinks','MultiViews']
horizon::bind_address: {get_param: [ServiceNetMap, HorizonNetwork]}
- horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ horizon::keystone_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
horizon::password_validator: {get_param: [HorizonPasswordValidator]}
horizon::password_validator_help: {get_param: [HorizonPasswordValidatorHelp]}
horizon::secret_key:
passwords:
- {get_param: HorizonSecret}
- {get_param: [DefaultPasswords, horizon_secret]}
+ horizon::secure_cookies: {get_param: [HorizonSecureCookies]}
memcached_ipv6: {get_param: MemcachedIPv6}
-
if:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ironic API configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ IronicApiPolicies:
+ description: |
+ A hash of policies to configure for Ironic API.
+ e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
IronicBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [IronicBase, role_data, config_settings]
- ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
+ ironic::api::authtoken::user_domain_name: 'Default'
+ ironic::api::authtoken::project_domain_name: 'Default'
ironic::api::authtoken::username: 'ironic'
- ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ironic::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ironic::api::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
# This is used to build links in responses
ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+ ironic::policy::policies: {get_param: IronicApiPolicies}
tripleo.ironic_api.firewall_rules:
'133 ironic api':
dport:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ironic services configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: ironic_base
config_settings:
ironic::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://ironic:'
- - {get_param: IronicPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ironic'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: ironic
+ password: {get_param: IronicPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ironic
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
ironic::debug: {get_param: Debug}
ironic::rabbit_userid: {get_param: RabbitUserName}
ironic::rabbit_password: {get_param: RabbitPassword}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ironic conductor configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
created yet) and should be changed to an actual UUID in
a post-deployment stack update.
type: string
+ IronicDefaultBootOption:
+ default: 'local'
+ description: How to boot the bare metal instances. Set to 'local' (the
+ default) to use local bootloader (requires grub2 for partition
+ images). Set to 'netboot' to make the instances boot from
+ controllers using PXE/iPXE.
+ type: string
+ IronicDefaultNetworkInterface:
+ default: 'flat'
+ description: Network interface implementation to use by default.
+ Set to "flat" (the default) to use one flat provider network.
+ Set to "neutron" to make Ironic interact with the Neutron
+ ML2 driver to enable other network types and certain
+ advances networking features. Requires
+ IronicProvisioningNetwork to be correctly set.
+ type: string
IronicEnabledDrivers:
default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo']
description: Enabled Ironic drivers
type: comma_delimited_list
+ IronicEnabledHardwareTypes:
+ default: ['ipmi']
+ description: Enabled Ironic hardware types
+ type: comma_delimited_list
IronicIPXEEnabled:
default: true
description: Whether to use iPXE instead of PXE for deployment.
default: 8088
description: Port to use for serving images when iPXE is used.
type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
+ IronicProvisioningNetwork:
+ default: 'provisioning'
+ description: Name or UUID of the *overcloud* network used for provisioning
+ of bare metal nodes, if IronicDefaultNetworkInterface is
+ set to "neutron". The default value of "provisioning" can be
+ left during the initial deployment (when no networks are
+ created yet) and should be changed to an actual UUID in
+ a post-deployment stack update.
+ type: string
MonitoringSubscriptionIronicConductor:
default: 'overcloud-ironic-conductor'
type: string
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
- # FIXME: I have no idea why neutron_url is in "api" manifest
- - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
- ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+ - ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
+ ironic::conductor::provisioning_network: {get_param: IronicProvisioningNetwork}
+ ironic::conductor::default_boot_option: {get_param: IronicDefaultBootOption}
ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
+ ironic::conductor::enabled_hardware_types: {get_param: IronicEnabledHardwareTypes}
# We need an endpoint containing a real IP, not a VIP here
ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::conductor::http_url:
# NOTE(dtantsur): UEFI only works with iPXE currently for us
ironic::drivers::pxe::uefi_pxe_config_template: '$pybasedir/drivers/modules/ipxe_config.template'
ironic::drivers::pxe::uefi_pxe_bootfile_name: 'ipxe.efi'
+ ironic::drivers::interfaces::enabled_console_interfaces: ['ipmitool-socat', 'no-console']
+ ironic::drivers::interfaces::enabled_network_interfaces: ['flat', 'neutron']
+ ironic::drivers::interfaces::default_network_interface: {get_param: IronicDefaultNetworkInterface}
tripleo.ironic_conductor.firewall_rules:
'134 ironic conductor TFTP':
dport: 69
# the VIP, but rather a real IP of the host.
ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
-
+ # Credentials to access other services
+ ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::glance::username: 'ironic'
+ ironic::glance::password: {get_param: IronicPassword}
+ ironic::glance::project_name: 'service'
+ ironic::glance::user_domain_name: 'Default'
+ ironic::glance::project_domain_name: 'Default'
+ ironic::neutron::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::neutron::username: 'ironic'
+ ironic::neutron::password: {get_param: IronicPassword}
+ ironic::neutron::project_name: 'service'
+ ironic::neutron::user_domain_name: 'Default'
+ ironic::neutron::project_domain_name: 'Default'
+ ironic::service_catalog::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::service_catalog::username: 'ironic'
+ ironic::service_catalog::password: {get_param: IronicPassword}
+ ironic::service_catalog::project_name: 'service'
+ ironic::service_catalog::user_domain_name: 'Default'
+ ironic::service_catalog::project_domain_name: 'Default'
+ ironic::swift::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::swift::username: 'ironic'
+ ironic::swift::password: {get_param: IronicPassword}
+ ironic::swift::project_name: 'service'
+ ironic::swift::user_domain_name: 'Default'
+ ironic::swift::project_domain_name: 'Default'
+ # ironic-inspector support is not implemented, but let's configure
+ # the credentials for consistency.
+ ironic::drivers::inspector::enabled: false
+ ironic::drivers::inspector::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::drivers::inspector::username: 'ironic'
+ ironic::drivers::inspector::password: {get_param: IronicPassword}
+ ironic::drivers::inspector::project_name: 'service'
+ ironic::drivers::inspector::user_domain_name: 'Default'
+ ironic::drivers::inspector::project_domain_name: 'Default'
step_config: |
include ::tripleo::profile::base::ironic::conductor
upgrade_tasks:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Keepalived service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
- tripleo.keepalived.firewall_rules:
'106 keepalived vrrp':
proto: vrrp
- -
+ -
if:
- control_iface_empty
- {}
- tripleo::keepalived::control_virtual_interface: {get_param: ControlVirtualInterface}
- -
+ -
if:
- public_iface_empty
- {}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Load kernel modules with kmod and configure kernel options with sysctl.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 1048576
description: Configures sysctl kernel.pid_max key
type: number
+ KernelDisableIPv6:
+ default: 0
+ description: Configures sysctl net.ipv6.{default/all}.disable_ipv6 keys
+ type: number
+ NeighbourGcThreshold1:
+ default: 1024
+ description: Configures sysctl net.ipv4.neigh.default.gc_thresh1 value.
+ This is the minimum number of entries to keep in the ARP
+ cache. The garbage collector will not run if there are
+ fewer than this number of entries in the cache.
+ type: number
+ NeighbourGcThreshold2:
+ default: 2048
+ description: Configures sysctl net.ipv4.neigh.default.gc_thresh2 value.
+ This is the soft maximum number of entries to keep in the
+ ARP cache. The garbage collector will allow the number of
+ entries to exceed this for 5 seconds before collection will
+ be performed.
+ type: number
+ NeighbourGcThreshold3:
+ default: 4096
+ description: Configures sysctl net.ipv4.neigh.default.gc_thresh3 value.
+ This is the hard maximum number of entries to keep in the
+ ARP cache. The garbage collector will always run if there
+ are more than this number of entries in the cache.
+ type: number
outputs:
role_data:
config_settings:
kernel_modules:
nf_conntrack: {}
- ip_conntrack_proto_sctp: {}
+ nf_conntrack_proto_sctp: {}
sysctl_settings:
net.ipv4.tcp_keepalive_intvl:
value: 1
value: 5
net.ipv4.tcp_keepalive_time:
value: 5
+ net.ipv4.conf.default.send_redirects:
+ value: 0
+ net.ipv4.conf.all.send_redirects:
+ value: 0
+ net.ipv4.conf.default.accept_redirects:
+ value: 0
+ net.ipv4.conf.default.secure_redirects:
+ value: 0
+ net.ipv4.conf.all.secure_redirects:
+ value: 0
+ net.ipv4.conf.default.log_martians:
+ value: 1
+ net.ipv4.conf.all.log_martians:
+ value: 1
net.nf_conntrack_max:
value: 500000
net.netfilter.nf_conntrack_max:
value: 500000
+ net.ipv6.conf.default.disable_ipv6:
+ value: {get_param: KernelDisableIPv6}
+ net.ipv6.conf.all.disable_ipv6:
+ value: {get_param: KernelDisableIPv6}
# prevent neutron bridges from autoconfiguring ipv6 addresses
net.ipv6.conf.all.accept_ra:
value: 0
value: 0
net.ipv6.conf.default.autoconf:
value: 0
+ net.ipv6.conf.default.accept_redirects:
+ value: 0
+ net.ipv6.conf.all.accept_redirects:
+ value: 0
net.core.netdev_max_backlog:
value: 10000
kernel.pid_max:
value: {get_param: KernelPidMax}
+ kernel.dmesg_restrict:
+ value: 1
+ fs.suid_dumpable:
+ value: 0
+ #avoid neighbour table overflow on large deployments
+ net.ipv4.neigh.default.gc_thresh1:
+ value: {get_param: NeighbourGcThreshold1}
+ net.ipv4.neigh.default.gc_thresh2:
+ value: {get_param: NeighbourGcThreshold2}
+ net.ipv4.neigh.default.gc_thresh3:
+ value: {get_param: NeighbourGcThreshold3}
step_config: |
include ::tripleo::profile::base::kernel
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Keystone service configured with Puppet
KeystoneTokenProvider:
description: The keystone token format
type: string
- default: 'uuid'
+ default: 'fernet'
constraints:
- allowed_values: ['uuid', 'fernet']
ServiceNetMap:
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
Cron to purge expired tokens - Ensure
default: 'present'
KeystoneCronTokenFlushMinute:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Minute
default: '1'
KeystoneCronTokenFlushHour:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Hour
- default: '0'
+ default: '*'
KeystoneCronTokenFlushMonthday:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Month Day
default: '*'
KeystoneCronTokenFlushMonth:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Month
default: '*'
KeystoneCronTokenFlushWeekday:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Week Day
default: '*'
description: >
Cron to purge expired tokens - User
default: 'keystone'
+ KeystonePolicies:
+ description: |
+ A hash of policies to configure for Keystone.
+ e.g. { keystone-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
+ KeystoneLDAPDomainEnable:
+ description: Trigger to call ldap_backend puppet keystone define.
+ type: boolean
+ default: False
+ KeystoneLDAPBackendConfigs:
+ description: Hash containing the configurations for the LDAP backends
+ configured in keystone.
+ type: json
+ default: {}
+ hidden: true
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
conditions:
keystone_fernet_tokens: {equals: [{get_param: KeystoneTokenProvider}, "fernet"]}
+ keystone_ldap_domain_enabled: {equals: [{get_param: KeystoneLDAPDomainEnable}, True]}
outputs:
role_data:
map_merge:
- get_attr: [ApacheServiceBase, role_data, config_settings]
- keystone::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://keystone:'
- - {get_param: AdminToken}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/keystone'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: keystone
+ password: {get_param: AdminToken}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /keystone
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
keystone::admin_token: {get_param: AdminToken}
keystone::admin_password: {get_param: AdminPassword}
keystone::roles::admin::password: {get_param: AdminPassword}
+ keystone::policy::policies: {get_param: KeystonePolicies}
keystone_ssl_certificate: {get_param: KeystoneSSLCertificate}
keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
keystone::token_provider: {get_param: KeystoneTokenProvider}
content: {get_param: KeystoneFernetKey0}
'/etc/keystone/fernet-keys/1':
content: {get_param: KeystoneFernetKey1}
+ keystone::fernet_replace_keys: false
keystone::debug: {get_param: Debug}
keystone::rabbit_userid: {get_param: RabbitUserName}
keystone::rabbit_password: {get_param: RabbitPassword}
keystone::cron::token_flush::maxdelay: 3600
keystone::roles::admin::service_tenant: 'service'
keystone::roles::admin::admin_tenant: 'admin'
- keystone::cron::token_flush::destination: '/dev/null'
+ keystone::cron::token_flush::destination: '/var/log/keystone/keystone-tokenflush.log'
keystone::config::keystone_config:
ec2/driver:
value: 'keystone.contrib.ec2.backends.sql.Ec2'
keystone::cron::token_flush::maxdelay: {get_param: KeystoneCronTokenFlushMaxDelay}
keystone::cron::token_flush::destination: {get_param: KeystoneCronTokenFlushDestination}
keystone::cron::token_flush::user: {get_param: KeystoneCronTokenFlushUser}
+ -
+ if:
+ - keystone_ldap_domain_enabled
+ -
+ tripleo::profile::base::keystone::ldap_backend_enable: True
+ keystone::using_domain_config: True
+ tripleo::profile::base::keystone::ldap_backends_config:
+ get_param: KeystoneLDAPBackendConfigs
+ - {}
step_config: |
include ::tripleo::profile::base::keystone
keystone::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
- # Ansible tasks to handle upgrade
- upgrade_tasks:
- - name: Stop keystone service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ horizon:
+ if:
+ - keystone_ldap_domain_enabled
+ -
+ horizon::keystone_multidomain_support: true
+ horizon::keystone_default_domain: 'Default'
+ - {}
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ yaql:
+ expression: $.data.apache_upgrade + $.data.keystone_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ keystone_upgrade:
+ - name: Stop keystone service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: Fluentd base service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: >
-heat_template_version: ocata
+heat_template_version: pike
description: Fluentd client configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: >
type: ./fluentd-base.yaml
properties:
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
-heat_template_version: ocata
+heat_template_version: pike
description: Fluentd logging configuration
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: >
-heat_template_version: ocata
+heat_template_version: pike
description: >
Manila-api service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [ManilaBase, role_data, config_settings]
- manila::keystone::authtoken::password: {get_param: ManilaPassword}
- manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ manila::keystone::authtoken::auth_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
manila::keystone::authtoken::project_name: 'service'
+ manila::keystone::authtoken::user_domain_name: 'Default'
+ manila::keystone::authtoken::project_domain_name: 'Default'
tripleo.manila_api.firewall_rules:
'150 manila':
dport:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Manila Cephfs backend
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 'ceph'
ManilaCephFSNativeCephFSEnableSnapshots:
type: boolean
- default: true
+ default: false
ManilaCephFSDataPoolName:
default: manila_data
type: string
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Manila generic backend.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Manila netapp backend.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Manila base service. Shared by manila-api/scheduler/share services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
manila::db::database_db_max_retries: -1
manila::db::database_max_retries: -1
manila::sql_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://manila:'
- - {get_param: ManilaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/manila'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: manila
+ password: {get_param: ManilaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /manila
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
service_config_settings:
mysql:
manila::db::mysql::password: {get_param: ManilaPassword}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Manila-scheduler service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Manila-share service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Memcached service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: Collectd client service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
CollectdDefaultPlugins:
default:
- disk
CollectdSecurityLevel:
type: string
description: >
- Security level setting for remote collectd connection.
+ Security level setting for remote collectd connection. If it is
+ set to Sign or Encrypt the CollectdPassword and CollectdUsername
+ parameters need to be set.
default: 'None'
constraints:
- allowed_values:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Mistral API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 1
description: The number of workers for the mistral-api.
type: number
+ MistralApiPolicies:
+ description: |
+ A hash of policies to configure for Mistral API.
+ e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
MistralBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [MistralBase, role_data, config_settings]
- mistral::api::api_workers: {get_param: MistralWorkers}
mistral::api::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]}
+ mistral::policy::policies: {get_param: MistralApiPolicies}
tripleo.mistral_api.firewall_rules:
'133 mistral':
dport:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Mistral base service. Shared for all Mistral services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: mistral_base
config_settings:
mistral::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://mistral:'
- - {get_param: MistralPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/mistral'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: mistral
+ password: {get_param: MistralPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /mistral
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
mistral::rabbit_userid: {get_param: RabbitUserName}
mistral::rabbit_password: {get_param: RabbitPassword}
mistral::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
mistral::keystone_password: {get_param: MistralPassword}
mistral::keystone_tenant: 'service'
mistral::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- mistral::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
+ mistral::keystone_ec2_uri:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, KeystoneV3Internal, uri]}
+ - '/ec2tokens'
mistral::identity_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
service_config_settings:
keystone:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Mistral Engine service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Mistral API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: Sensu base service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
MonitoringRabbitHost:
description: RabbitMQ host Sensu has to connect to.
type: string
default: false
description: >
RabbitMQ client subscriber parameter to specify an SSL connection
- to the RabbitMQ host.
+ to the RabbitMQ host. Set MonitoringRabbitUseSSL to true without
+ specifying a private key or cert chain to use SSL transport,
+ but not cert auth.
+ type: string
+ MonitoringRabbitSSLPrivateKey:
+ default: ''
+ description: Private key to be used by Sensu to connect to RabbitMQ host.
+ type: string
+ MonitoringRabbitSSLCertChain:
+ default: ''
+ description: >
+ Private SSL cert chain to be used by Sensu to connect to RabbitMQ host.
type: string
MonitoringRabbitPassword:
description: The RabbitMQ password used for monitoring purposes.
sensu::rabbitmq_password: {get_param: MonitoringRabbitPassword}
sensu::rabbitmq_port: {get_param: MonitoringRabbitPort}
sensu::rabbitmq_ssl: {get_param: MonitoringRabbitUseSSL}
+ sensu::rabbitmq_ssl_private_key: {get_param: MonitoringRabbitSSLPrivateKey}
+ sensu::rabbitmq_ssl_cert_chain: {get_param: MonitoringRabbitSSLCertChain}
sensu::rabbitmq_user: {get_param: MonitoringRabbitUserName}
sensu::rabbitmq_vhost: {get_param: MonitoringRabbitVhost}
sensu::redact: {get_param: SensuRedactVariables}
-heat_template_version: ocata
+heat_template_version: pike
description: Sensu client configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: >
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- name: Install sensu package if it was disabled
tags: step3
yum: name=sensu state=latest
- when: sensu_client.rc != 0
+ when: sensu_client_enabled.rc != 0
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Analytics Database service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Analytics service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Base parameters for all Contrail Services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Config service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Control service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Database service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Heat plugin adds Contrail specific heat resources enabling heat
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Opencontrail plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Provision Contrail services after deployment
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail TSN Service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute OpenContrail plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: vRouter physical interface
type: string
ContrailVrouterGateway:
- default: '192.0.2.1'
+ default: '192.168.24.1'
description: vRouter default gateway
type: string
ContrailVrouterNetmask:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail WebUI service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Server configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
NeutronWorkers:
default: ''
description: |
- Sets the number of API and RPC workers for the Neutron service. The
- default value results in the configuration being left unset and a
- system-dependent default will be chosen (usually the number of
- processors). Please note that this can result in a large number of
- processes and memory consumption on systems with a large core count. On
- such systems it is recommended that a non-default value be selected that
- matches the load requirements.
+ Sets the number of API and RPC workers for the Neutron service.
+ The default value results in the configuration being left unset
+ and a system-dependent default will be chosen (usually the number
+ of processors). Please note that this can result in a large number
+ of processes and memory consumption on systems with a large core
+ count. On such systems it is recommended that a non-default value
+ be selected that matches the load requirements.
type: string
NeutronPassword:
description: The password for the neutron service and db account, used by neutron agents.
default:
tag: openstack.neutron.api
path: /var/log/neutron/server.log
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ NeutronApiPolicies:
+ description: |
+ A hash of policies to configure for Neutron API.
+ e.g. { neutron-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
# DEPRECATED: the following options are deprecated and are currently maintained
# for backwards compatibility. They will be removed in the Ocata cycle.
removed in Ocata. Future releases will enable L3 HA by default if it is
appropriate for the deployment type. Alternate mechanisms will be
available to override.
- EnableInternalTLS:
- type: boolean
- default: false
-
parameter_groups:
- label: deprecated
description: |
conditions:
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
+ neutron_workers_unset: {equals : [{get_param: NeutronWorkers}, '']}
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
NeutronBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [NeutronBase, role_data, config_settings]
- get_attr: [TLSProxyBase, role_data, config_settings]
- neutron::server::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://neutron:'
- - {get_param: NeutronPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ovs_neutron'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: neutron
+ password: {get_param: NeutronPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ovs_neutron
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
+ neutron::policy::policies: {get_param: NeutronApiPolicies}
+ neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- neutron::server::api_workers: {get_param: NeutronWorkers}
- neutron::server::rpc_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron::server::enable_proxy_headers_parsing: true
neutron::keystone::authtoken::password: {get_param: NeutronPassword}
- neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneV3Admin, uri ] }
+ neutron::server::notifications::auth_url: { get_param: [ EndpointMap, KeystoneInternal, uri_no_suffix ] }
neutron::server::notifications::tenant_name: 'service'
neutron::server::notifications::project_name: 'service'
neutron::server::notifications::password: {get_param: NovaPassword}
neutron::keystone::authtoken::project_name: 'service'
+ neutron::keystone::authtoken::user_domain_name: 'Default'
+ neutron::keystone::authtoken::project_domain_name: 'Default'
neutron::server::sync_db: true
tripleo.neutron_api.firewall_rules:
'114 neutron api':
- 9696
- 13696
neutron::server::router_distributed: {get_param: NeutronEnableDVR}
+ neutron::server::enable_dvr: {get_param: NeutronEnableDVR}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
- 'localhost'
- {get_param: [ServiceNetMap, NeutronApiNetwork]}
tripleo::profile::base::neutron::server::l3_ha_override: {get_param: NeutronL3HA}
+ -
+ if:
+ - neutron_workers_unset
+ - {}
+ - neutron::server::api_workers: {get_param: NeutronWorkers}
+ neutron::server::rpc_workers: {get_param: NeutronWorkers}
step_config: |
include tripleo::profile::base::neutron::server
service_config_settings:
tags: step1
when: neutron_server_enabled.rc == 0
service: name=neutron-server state=stopped
+ metadata_settings:
+ get_attr: [TLSProxyBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron base service. Shared for all Neutron agents.
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ DatabaseSyncTimeout:
+ default: 300
+ description: DB Sync Timeout default
+ type: number
NeutronDhcpAgentsPerNetwork:
type: number
default: 0
description: The number of neutron dhcp agents to schedule per network
+ NeutronDnsDomain:
+ type: string
+ default: openstacklocal
+ description: Domain to use for building the hostnames.
NeutronCorePlugin:
default: 'ml2'
description: |
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
NeutronGlobalPhysnetMtu:
type: number
default: 1500
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
neutron::debug: {get_param: Debug}
neutron::purge_config: {get_param: EnableConfigPurge}
neutron::allow_overlapping_ips: true
+ neutron::dns_domain: {get_param: NeutronDnsDomain}
neutron::rabbit_heartbeat_timeout_threshold: 60
neutron::host: '%{::fqdn}'
neutron::db::database_db_max_retries: -1
neutron::db::database_max_retries: -1
+ neutron::db::sync::db_sync_timeout: {get_param: DatabaseSyncTimeout}
neutron::global_physnet_mtu: {get_param: NeutronGlobalPhysnetMtu}
- if:
- dhcp_agents_zero
--- /dev/null
+heat_template_version: pike
+
+description: >
+ BGPVPN API service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ BgpvpnServiceProvider:
+ default: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
+ description: Backend to use as a service provider for BGPVPN
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the BGPVPN role.
+ value:
+ service_name: neutron_bgpvpn_api
+ config_settings:
+ neutron::services::bgpvpn::service_providers: {get_param: BgpvpnServiceProvider}
+ step_config: |
+ include ::tripleo::profile::base::neutron::bgpvpn
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Installs bigswitch agent and enables the services
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+
+outputs:
+ role_data:
+ description: Configure the bigswitch agent services
+ value:
+ service_name: neutron_bigswitch_agent
+ step_config: |
+ include ::tripleo::profile::base::neutron::agents::bigswitch
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute Midonet plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute Nuage plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: The password for the nova service account, used by nova-api.
type: string
hidden: true
+ NuageMetadataPort:
+ description: TCP Port to listen for metadata server requests
+ type: string
+ default: '9697'
outputs:
role_data:
tripleo::profile::base::neutron::agents::nuage::nova_os_tenant_name: 'service'
tripleo::profile::base::neutron::agents::nuage::nova_os_password: {get_param: NovaPassword}
tripleo::profile::base::neutron::agents::nuage::nova_auth_ip: {get_param: [EndpointMap, KeystoneInternal, host]}
+ tripleo.neutron_compute_plugin_nuage.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '100 metadata agent':
+ dport: {get_param: NuageMetadataPort}
step_config: |
include ::tripleo::profile::base::neutron::agents::nuage
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute OVN agent
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
ovn::controller::ovn_encap_type: {get_param: OVNTunnelEncapType}
ovn::controller::ovn_encap_ip: {get_param: [ServiceNetMap, NeutronApiNetwork]}
ovn::controller::ovn_bridge_mappings: {get_param: NeutronBridgeMappings}
+ nova::compute::force_config_drive: true
tripleo.neutron_compute_plugin_ovn.firewall_rules:
'118 neutron vxlan networks':
proto: 'udp'
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute Plumgrid plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron DHCP agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ L2 Gateway agent configured with Puppet
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ L2gwAgentOvsdbHosts:
+ default: ''
+ description: L2 gateway agent OVSDB server list.
+ type: comma_delimited_list
+ L2gwAgentEnableManager:
+ default: false
+ description: Connection can be initiated by the ovsdb server.
+ type: boolean
+ L2gwAgentManagerTableListeningPort:
+ default: 6632
+ description: port number for L2 gateway agent, so that it can listen
+ type: number
+ L2gwAgentPeriodicInterval:
+ default: 20
+ description: The L2 gateway agent checks connection state with the OVSDB
+ servers. The interval is number of seconds between attempts.
+ type: number
+ L2gwAgentMaxConnectionRetries:
+ default: 10
+ description: The L2 gateway agent retries to connect to the OVSDB server
+ type: number
+ L2gwAgentSocketTimeout:
+ default: 30
+ description: socket timeout
+ type: number
+ MonitoringSubscriptionNeutronL2gwAgent:
+ default: 'overcloud-neutron-l2gw-agent'
+ type: string
+ NeutronL2gwAgentLoggingSource:
+ type: json
+ default:
+ tag: openstack.neutron.agent.l2gw
+ path: /var/log/neutron/l2gw-agent.log
+
+conditions:
+ internal_manager_enabled: {equals: [{get_param: L2gwAgentEnableManager}, True]}
+
+outputs:
+ role_data:
+ description: Role data for the L2 Gateway role.
+ value:
+ service_name: neutron_l2gw_agent
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronL2gwAgent}
+ logging_source: {get_param: NeutronL2gwAgentLoggingSource}
+ logging_groups:
+ - neutron
+ config_settings:
+ map_merge:
+ - neutron::agents::l2gw::ovsdb_hosts: {get_param: L2gwAgentOvsdbHosts}
+ neutron::agents::l2gw::enable_manager: {get_param: L2gwAgentEnableManager}
+ neutron::agents::l2gw::manager_table_listening_port: {get_param: L2gwAgentManagerTableListeningPort}
+ neutron::agents::l2gw::periodic_interval: {get_param: L2gwAgentPeriodicInterval}
+ neutron::agents::l2gw::max_connection_retries: {get_param: L2gwAgentMaxConnectionRetries}
+ neutron::agents::l2gw::socket_timeout: {get_param: L2gwAgentSocketTimeout}
+ -
+ if:
+ - internal_manager_enabled
+ - tripleo.neutron_l2gw_agent.firewall_rules:
+ '142 neutron l2gw agent input':
+ proto: 'tcp'
+ dport: {get_param: L2gwAgentManagerTableListeningPort}
+ - null
+
+ step_config: |
+ include tripleo::profile::base::neutron::agents::l2gw
+ upgrade_tasks:
+ - name: Check if neutron_l2gw_agent is deployed
+ command: systemctl is-enabled neutron-l2gw-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_l2gw_agent_enabled
+ - name: "PreUpgrade step0,validation: Check service neutron-l2gw-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-l2gw-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_l2gw_agent_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop neutron_l2gw_agent service
+ tags: step1
+ when: neutron_l2gw_agent_enabled.rc == 0
+ service: name=neutron-l2gw-agent state=stopped
--- /dev/null
+heat_template_version: pike
+
+description: >
+ L2 Gateway service plugin configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ L2gwServiceDefaultInterfaceName:
+ default: 'FortyGigE1/0/1'
+ description: default interface name of the L2 gateway
+ type: string
+ L2gwServiceDefaultDeviceName:
+ default: 'Switch1'
+ description: default device name of the L2 gateway
+ type: string
+ L2gwServiceQuotaL2Gateway:
+ default: 5
+ description: quota of the L2 gateway
+ type: number
+ L2gwServicePeriodicMonitoringInterval:
+ default: 5
+ description: The periodic interval at which the plugin
+ type: number
+ L2gwServiceProvider:
+ default: ["L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default"]
+ description: Backend to use as a service provider for L2 Gateway
+ type: comma_delimited_list
+
+outputs:
+ role_data:
+ description: Role data for the L2 Gateway role.
+ value:
+ service_name: neutron_l2gw_api
+ config_settings:
+ neutron::services::l2gw::default_interface_name: {get_param: L2gwServiceDefaultInterfaceName}
+ neutron::services::l2gw::default_device_name: {get_param: L2gwServiceDefaultDeviceName}
+ neutron::services::l2gw::quota_l2_gateway: {get_param: L2gwServiceQuotaL2Gateway}
+ neutron::services::l2gw::periodic_monitoring_interval: {get_param: L2gwServicePeriodicMonitoringInterval}
+ neutron::services::l2gw::service_providers: {get_param: L2gwServiceProvider}
+ step_config: |
+ include tripleo::profile::base::neutron::l2gw
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron L3 agent for DVR enabled compute nodes
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron L3 agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Metadata agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
tag: openstack.neutron.agent.metadata
path: /var/log/neutron/metadata-agent.log
+conditions:
+ neutron_workers_unset: {equals : [{get_param: NeutronWorkers}, '']}
+
resources:
NeutronBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
neutron::agents::metadata::auth_tenant: 'service'
neutron::agents::metadata::metadata_ip: "%{hiera('nova_metadata_vip')}"
+ -
+ if:
+ - neutron_workers_unset
+ - {}
+ - neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
step_config: |
include tripleo::profile::base::neutron::metadata
upgrade_tasks:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Midonet plugin and services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron OVS agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
outputs:
role_data:
step_config: |
include ::tripleo::profile::base::neutron::ovs
upgrade_tasks:
- - name: Check if neutron_ovs_agent is deployed
- command: systemctl is-enabled neutron-openvswitch-agent
- tags: common
- ignore_errors: True
- register: neutron_ovs_agent_enabled
- - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
- shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
- when: neutron_ovs_agent_enabled.rc == 0
- tags: step0,validation
- - name: Stop neutron_ovs_agent service
- tags: step1
- when: neutron_ovs_agent_enabled.rc == 0
- service: name=neutron-openvswitch-agent state=stopped
+ yaql:
+ expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
+ data:
+ ovs_upgrade:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ neutron_ovs_upgrade:
+ - name: Check if neutron_ovs_agent is deployed
+ command: systemctl is-enabled neutron-openvswitch-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_ovs_agent_enabled
+ - name: "PreUpgrade step0,validation: Check service neutron-openvswitch-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-openvswitch-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_ovs_agent_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop neutron_ovs_agent service
+ tags: step1
+ when: neutron_ovs_agent_enabled.rc == 0
+ service: name=neutron-openvswitch-agent state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron OVS DPDK configured with Puppet for Compute Role
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
outputs:
role_data:
service_name: neutron_ovs_dpdk_agent
config_settings:
map_merge:
- - get_attr: [NeutronOvsAgent, role_data, config_settings]
+ - map_replace:
+ - get_attr: [NeutronOvsAgent, role_data, config_settings]
+ - keys:
+ tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory}
vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType}
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
+ upgrade_tasks:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure hieradata for Fujitsu C-Fabric plugin configuration
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Fujitsu fossw plugin configuration
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Neutron ML2/OpenDaylight plugin configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OpenDaylightPortBindingController:
+ description: OpenDaylight port binding controller
+ type: string
+ default: 'network-topology'
+
+resources:
+
+ NeutronMl2Base:
+ type: ./neutron-plugin-ml2.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron ML2/ODL plugin.
+ value:
+ service_name: neutron_plugin_ml2_odl
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronMl2Base, role_data, config_settings]
+ - neutron::plugins::ml2::opendaylight::port_binding_controller: {get_param: OpenDaylightPortBindingController}
+ step_config: |
+ include ::tripleo::profile::base::neutron::plugins::ml2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron ML2/OVN plugin configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron ML2 Plugin configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Neutron NSX
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultOverlayTz:
+ description: UUID of the default NSX overlay transport zone.
+ type: string
+ DefaultTier0Router:
+ description: UUID of the default tier0 router that will be used for connecting to
+ tier1 logical routers and configuring external networks.
+ type: string
+ NsxApiManagers:
+ description: IP address of one or more NSX managers separated by commas.
+ type: string
+ NsxApiUser:
+ description: User name of NSX Manager.
+ type: string
+ NsxApiPassword:
+ description: Password of NSX Manager.
+ type: string
+ NativeDhcpMetadata:
+ default: True
+ description: This is the flag to indicate if using native DHCP/Metadata or not.
+ type: string
+ DhcpProfileUuid:
+ description: This is the UUID of the NSX DHCP Profile that will be used to enable
+ native DHCP service.
+ type: string
+ MetadataProxyUuid:
+ description: This is the UUID of the NSX Metadata Proxy that will be used to enable
+ native metadata service.
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Neutron NSX plugin
+ value:
+ service_name: neutron_plugin_nsx
+ config_settings:
+ neutron::plugins::nsx_v3::default_overlay_tz: {get_param: DefaultOverlayTz}
+ neutron::plugins::nsx_v3::default_tier0_router: {get_param: DefaultTier0Router}
+ neutron::plugins::nsx_v3::nsx_api_managers: {get_param: NsxApiManagers}
+ neutron::plugins::nsx_v3::nsx_api_user: {get_param: NsxApiUser}
+ neutron::plugins::nsx_v3::nsx_api_password: {get_param: NsxApiPassword}
+ neutron::plugins::nsx_v3::native_dhcp_metadata: {get_param: NativeDhcpMetadata}
+ neutron::plugins::nsx_v3::dhcp_profile_uuid: {get_param: DhcpProfileUuid}
+ neutron::plugins::nsx_v3::metadata_proxy_uuid: {get_param: MetadataProxyUuid}
+
+ step_config: |
+ include tripleo::profile::base::neutron::plugins::nsx_v3
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Nuage plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
# Config specific parameters, to be provided via parameter_defaults
- NeutronNuageOSControllerIp:
- description: IP address of the OpenStack Controller
- type: string
-
NeutronNuageNetPartitionName:
description: Specifies the title that you will see on the VSD
type: string
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::plugins::nuage::nuage_oscontroller_ip: {get_param: NeutronNuageOSControllerIp}
- neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName}
+ - neutron::plugins::nuage::nuage_net_partition_name: {get_param: NeutronNuageNetPartitionName}
neutron::plugins::nuage::nuage_vsd_ip: {get_param: NeutronNuageVSDIp}
neutron::plugins::nuage::nuage_vsd_username: {get_param: NeutronNuageVSDUsername}
neutron::plugins::nuage::nuage_vsd_password: {get_param: NeutronNuageVSDPassword}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Plumgrid plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: neutron_plugin_plumgrid
config_settings:
neutron::plugins::plumgrid::connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://neutron:'
- - {get_param: NeutronPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ovs_neutron'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: neutron
+ password: {get_param: NeutronPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ovs_neutron
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneInternal, host]}
neutron::plugins::plumgrid::admin_password: {get_param: AdminPassword}
neutron::plugins::plumgrid::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron SR-IOV nic agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Neutron ML2/VPP agent configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: >
+ Mapping of service_name -> network name. Typically set via
+ parameter_defaults in the resource registry. This mapping overrides those
+ in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronVPPAgentPhysnets:
+ description: >
+ List of <physical_network>:<VPP Interface>
+ Example: "physnet1:GigabitEthernet2/2/0,physnet2:GigabitEthernet2/3/0"
+ type: comma_delimited_list
+ default: ""
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron ML2/VPP agent service.
+ value:
+ service_name: neutron_vpp_agent
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - tripleo::profile::base::neutron::agents::vpp::physnet_mapping: {get_param: NeutronVPPAgentPhysnets}
+ step_config: |
+ include ::tripleo::profile::base::neutron::agents::vpp
\ No newline at end of file
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 300
description: Timeout for Nova db sync
type: number
+ NovaApiPolicies:
+ description: |
+ A hash of policies to configure for Nova API.
+ e.g. { nova-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
conditions:
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
# ServiceNetMap: {get_param: ServiceNetMap}
# DefaultPasswords: {get_param: DefaultPasswords}
# EndpointMap: {get_param: EndpointMap}
+ # RoleName: {get_param: RoleName}
+ # RoleParameters: {get_param: RoleParameters}
# EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- 13774
- 8775
nova::keystone::authtoken::project_name: 'service'
+ nova::keystone::authtoken::user_domain_name: 'Default'
+ nova::keystone::authtoken::project_domain_name: 'Default'
nova::keystone::authtoken::password: {get_param: NovaPassword}
- nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ nova::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
nova::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::api::enabled: true
nova::api::default_floating_pool: {get_param: NovaDefaultFloatingPool}
nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
nova::api::instance_name_template: {get_param: InstanceNameTemplate}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
+ nova::policy::policies: {get_param: NovaApiPolicies}
-
if:
- nova_workers_zero
- name: Run puppet apply to set tranport_url in nova.conf
tags: step5
when: is_bootstrap_node
- command: puppet apply --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
register: puppet_apply_nova_api_upgrade
failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
changed_when: puppet_apply_nova_api_upgrade.rc == 2
- name: Setup cell_v2 (map cell0)
tags: step5
when: is_bootstrap_node
- command: nova-manage cell_v2 map_cell0
+ shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
- name: Setup cell_v2 (create default cell)
tags: step5
when: is_bootstrap_node
command: nova-manage db sync
async: {get_param: NovaDbSyncTimeout}
poll: 10
- - name: Setup cell_v2 (migrate hosts)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 map_cell_and_hosts
- name: Setup cell_v2 (get cell uuid)
tags: step5
when: is_bootstrap_node
shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
register: nova_api_cell_uuid
+ - name: Setup cell_v2 (migrate hosts)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
- name: Setup cell_v2 (migrate instances)
tags: step5
when: is_bootstrap_node
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova base service. Shared for all Nova services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ DatabaseSyncTimeout:
+ default: 300
+ description: DB Sync Timeout default
+ type: number
Debug:
type: string
default: ''
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
NovaIPv6:
default: false
description: Enable IPv6 features in Nova
nova::placement::os_region_name: {get_param: KeystoneRegion}
nova::placement::os_interface: {get_param: NovaPlacementAPIInterface}
nova::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: nova
+ password: {get_param: NovaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /nova
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
+ nova::cell0_database_connection:
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: nova
+ password: {get_param: NovaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /nova_cell0
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
nova::api_database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova_api:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova_api'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: nova_api
+ password: {get_param: NovaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /nova_api
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
nova::placement_database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova_placement:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova_placement'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: nova_placement
+ password: {get_param: NovaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /nova_placement
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
nova::debug: {get_param: Debug}
nova::purge_config: {get_param: EnableConfigPurge}
nova::network::neutron::neutron_project_name: 'service'
nova::network::neutron::neutron_auth_type: 'v3password'
nova::db::database_db_max_retries: -1
nova::db::database_max_retries: -1
+ nova::db::sync::db_sync_timeout: {get_param: DatabaseSyncTimeout}
+ nova::db::sync_api::db_sync_timeout: {get_param: DatabaseSyncTimeout}
nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
nova::use_ipv6: {get_param: NovaIPv6}
nova::network::neutron::neutron_ovs_bridge: {get_param: NovaOVSBridge}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Compute service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
For different formats, refer to the nova.conf documentation for
pci_passthrough_whitelist configuration
type: json
- default: {}
+ default: ''
NovaVcpuPinSet:
description: >
A list or range of physical CPU cores to reserve for virtual machine
type: string
description: Nova Compute upgrade level
default: auto
+ MigrationSshKey:
+ type: json
+ description: >
+ SSH key for migration.
+ Expects a dictionary with keys 'public_key' and 'private_key'.
+ Values should be identical to SSH public/private key files.
+ default: {}
resources:
NovaBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- nova::compute::libvirt::manage_libvirt_services: false
- nova::compute::pci_passthrough: {get_param: NovaPCIPassthrough}
+ nova::compute::pci_passthrough:
+ str_replace:
+ template: "JSON_PARAM"
+ params:
+ JSON_PARAM: {get_param: NovaPCIPassthrough}
nova::compute::vcpu_pin_set: {get_param: NovaVcpuPinSet}
nova::compute::reserved_host_memory: {get_param: NovaReservedHostMemory}
# we manage migration in nova common puppet profile
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
+ tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
+ tripleo::profile::base::nova::migration_ssh_localaddrs:
+ - "%{hiera('cold_migration_ssh_inbound_addr')}"
+ - "%{hiera('live_migration_ssh_inbound_addr')}"
+ live_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ cold_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaColdMigrationNetwork]}
tripleo::profile::base::nova::nova_compute_enabled: true
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
params:
LEVEL: {get_param: UpgradeLevelNovaCompute}
+ - name: install openstack-nova-migration
+ tags: step3
+ yum: name=openstack-nova-migration state=latest
- name: Start nova-compute service
tags: step6
service: name=openstack-nova-compute state=started
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Conductor service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Consoleauth service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Compute service configured with Puppet and using Ironic
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
nova::compute::vnc_enabled: false
nova::ironic::common::password: {get_param: IronicPassword}
nova::ironic::common::project_name: 'service'
- nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ nova::ironic::common::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
nova::ironic::common::username: 'ironic'
nova::ironic::common::api_endpoint: {get_param: [EndpointMap, IronicInternal, uri]}
nova::network::neutron::dhcp_domain: ''
nova::scheduler::filter::scheduler_host_manager: 'ironic_host_manager'
step_config: |
include tripleo::profile::base::nova::compute::ironic
+ upgrade_tasks:
+ - name: Stop openstack-nova-compute service
+ tags: step1
+ service: name=openstack-nova-compute state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
Libvirt service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
MonitoringSubscriptionNovaLibvirt:
default: 'overcloud-nova-libvirt'
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ UseTLSTransportForLiveMigration:
+ type: boolean
+ default: true
+ description: If set to true and if EnableInternalTLS is enabled, it will
+ set the libvirt URI's transport to tls and configure the
+ relevant keys for libvirt.
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
+ LibvirtCACert:
+ type: string
+ default: ''
+ description: This specifies the CA certificate to use for TLS in libvirt.
+ This file will be symlinked to the default CA path in libvirt,
+ which is /etc/pki/CA/cacert.pem. Note that due to limitations
+ GNU TLS, which is the TLS backend for libvirt, the file must
+ be less than 65K (so we can't use the system's CA bundle).
+ This parameter should be used if the default (which comes from
+ the InternalTLSCAFile parameter) is not desired. The current
+ default reflects TripleO's default CA, which is FreeIPA.
+ It will only be used if internal TLS is enabled.
+
+conditions:
+
+ use_tls_for_live_migration:
+ and:
+ - equals:
+ - {get_param: EnableInternalTLS}
+ - true
+ - equals:
+ - {get_param: UseTLSTransportForLiveMigration}
+ - true
+
+ libvirt_specific_ca_unset:
+ equals:
+ - {get_param: LibvirtCACert}
+ - ''
resources:
NovaBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
tripleo.nova_libvirt.firewall_rules:
'200 nova_libvirt':
dport:
- - 16509
- 16514
- '49152-49215'
- '5900-5999'
+ -
+ if:
+ - use_tls_for_live_migration
+ -
+ generate_service_certificates: true
+ tripleo::profile::base::nova::libvirt_tls: true
+ nova::migration::libvirt::live_migration_inbound_addr:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ tripleo::certmonger::ca::libvirt::origin_ca_pem:
+ if:
+ - libvirt_specific_ca_unset
+ - get_param: InternalTLSCAFile
+ - get_param: LibvirtCACert
+ tripleo::certmonger::libvirt_dirs::certificate_dir: '/etc/pki/libvirt'
+ tripleo::certmonger::libvirt_dirs::key_dir: '/etc/pki/libvirt/private'
+ libvirt_certificates_specs:
+ libvirt-server-cert:
+ service_certificate: '/etc/pki/libvirt/servercert.pem'
+ service_key: '/etc/pki/libvirt/private/serverkey.pem'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ principal:
+ str_replace:
+ template: "libvirt/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ libvirt-client-cert:
+ service_certificate: '/etc/pki/libvirt/clientcert.pem'
+ service_key: '/etc/pki/libvirt/private/clientkey.pem'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ principal:
+ str_replace:
+ template: "libvirt/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ - {}
step_config: |
include tripleo::profile::base::nova::libvirt
+ metadata_settings:
+ if:
+ - use_tls_for_live_migration
+ -
+ - service: libvirt
+ network: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ type: node
+ - null
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Placement API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Scheduler service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Vncproxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia API service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default:
tag: openstack.octavia.api
path: /var/log/octavia/api.log
+ OctaviaApiPolicies:
+ description: |
+ A hash of policies to configure for Octavia API.
+ e.g. { octavia-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [OctaviaBase, role_data, config_settings]
- octavia::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ octavia::policy::policies: {get_param: OctaviaApiPolicies}
octavia::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://octavia:'
- - {get_param: OctaviaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/octavia'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: octavia
+ password: {get_param: OctaviaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /octavia
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
octavia::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
octavia::keystone::authtoken::project_name: 'service'
octavia::keystone::authtoken::password: {get_param: OctaviaPassword}
- 9876
- 13876
octavia::api::host: {get_param: [ServiceNetMap, OctaviaApiNetwork]}
- neutron::server::service_providers: ['LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default']
step_config: |
include tripleo::profile::base::octavia::api
service_config_settings:
octavia::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ neutron_api:
+ neutron::server::service_providers: ['LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default']
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia base service. Shared for all Octavia services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: Set to True to enable debugging on all services.
EnableConfigPurge:
type: boolean
- default: true
+ default: false
description: >
- Remove configuration that is not generated by TripleO. Setting
- to false may result in configuration remnants after updates/upgrades.
+ Remove configuration that is not generated by TripleO. Used to avoid
+ configuration remnants after upgrades.
RabbitPassword:
description: The password for RabbitMQ
type: string
octavia::debug: {get_param: Debug}
octavia::purge_config: {get_param: EnableConfigPurge}
octavia::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
- tripleo::profile::base::octavia::rabbit_user: {get_param: RabbitUserName}
- tripleo::profile::base::octavia::rabbit_password: {get_param: RabbitPassword}
- tripleo::profile::base::octavia::rabbit_port: {get_param: RabbitClientPort}
+ octavia::rabbit_userid: {get_param: RabbitUserName}
+ octavia::rabbit_password: {get_param: RabbitPassword}
+ octavia::rabbit_port: {get_param: RabbitClientPort}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia Health Manager service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia Housekeeping service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia Worker service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenDaylight SDN Controller.
OpenDaylightFeatures:
description: List of features to install with ODL
type: comma_delimited_list
- default: ["odl-netvirt-openstack","odl-netvirt-ui"]
+ default: ["odl-netvirt-openstack","odl-netvirt-ui","odl-jolokia"]
OpenDaylightApiVirtualIP:
type: string
default: ''
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
outputs:
role_data:
opendaylight::extra_features: {get_param: OpenDaylightFeatures}
opendaylight::enable_dhcp: {get_param: OpenDaylightEnableDHCP}
opendaylight::odl_bind_ip: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
- opendaylight::nb_connection_protocol: {get_param: OpenDayLightConnectionProtocol}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
tripleo.opendaylight_api.firewall_rules:
'137 opendaylight api':
dport:
- {get_param: OpenDaylightPort}
- 6640
- 6653
+ - 2550
step_config: |
include tripleo::profile::base::neutron::opendaylight
+ upgrade_tasks:
+ - name: Check if opendaylight is deployed
+ command: systemctl is-enabled opendaylight
+ tags: common
+ ignore_errors: True
+ register: opendaylight_enabled
+ - name: "PreUpgrade step0,validation: Check service opendaylight is running"
+ shell: /usr/bin/systemctl show 'opendaylight' --property ActiveState | grep '\bactive\b'
+ when: opendaylight_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop opendaylight service
+ tags: step1
+ when: opendaylight_enabled.rc == 0
+ service: name=opendaylight state=stopped
+ - name: Removes ODL snapshots, data, journal directories
+ file:
+ state: absent
+ path: /opt/opendaylight/{{item}}
+ tags: step2
+ with_items:
+ - snapshots
+ - data
+ - journal
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenDaylight OVS Configuration.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+ OpenVswitchUpgrade:
+ type: ./openvswitch-upgrade.yaml
outputs:
role_data:
opendaylight_check_url: {get_param: OpenDaylightCheckURL}
opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings:
- str_replace:
- template: MAPPINGS
- params:
- MAPPINGS: {get_param: OpenDaylightProviderMappings}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
tripleo.opendaylight_ovs.firewall_rules:
'118 neutron vxlan networks':
proto: 'udp'
proto: 'gre'
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
+ upgrade_tasks:
+ yaql:
+ expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
+ data:
+ ovs_upgrade:
+ get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ opendaylight_upgrade:
+ - name: Check if openvswitch is deployed
+ command: systemctl is-enabled openvswitch
+ tags: common
+ ignore_errors: True
+ register: openvswitch_enabled
+ - name: "PreUpgrade step0,validation: Check service openvswitch is running"
+ shell: /usr/bin/systemctl show 'openvswitch' --property ActiveState | grep '\bactive\b'
+ when: openvswitch_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop openvswitch service
+ tags: step1
+ when: openvswitch_enabled.rc == 0
+ service: name=openvswitch state=stopped
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Openvswitch package special handling for upgrade.
+
+outputs:
+ role_data:
+ description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
+ value:
+ service_name: openvswitch_upgrade
+ upgrade_tasks:
+ - name: Check openvswitch version.
+ tags: step2
+ register: ovs_version
+ ignore_errors: true
+ shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+ - name: Check openvswitch packaging.
+ tags: step2
+ shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+ register: ovs_packaging_issue
+ ignore_errors: true
+ - block:
+ - name: "Ensure empty directory: emptying."
+ file:
+ state: absent
+ path: /root/OVS_UPGRADE
+ - name: "Ensure empty directory: creating."
+ file:
+ state: directory
+ path: /root/OVS_UPGRADE
+ owner: root
+ group: root
+ mode: 0750
+ - name: Download OVS packages.
+ command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+ - name: Get rpm list for manual upgrade of OVS.
+ shell: ls -1 /root/OVS_UPGRADE/*.rpm
+ register: ovs_list_of_rpms
+ - name: Manual upgrade of OVS
+ shell: |
+ rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+ rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+ args:
+ chdir: /root/OVS_UPGRADE
+ with_items:
+ - "{{ovs_list_of_rpms.stdout_lines}}"
+ tags: step2
+ when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ or
+ ovs_packaging_issue|succeeded"
-heat_template_version: ocata
+heat_template_version: pike
description: >
OVN databases configured with puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ovn::northbound::port: {get_param: OVNNorthboundServerPort}
ovn::southbound::port: {get_param: OVNSouthboundServerPort}
ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+ tripleo.ovn_dbs.firewall_rules:
+ '121 OVN DB server ports':
+ proto: 'tcp'
+ dport:
+ - {get_param: OVNNorthboundServerPort}
+ - {get_param: OVNSouthboundServerPort}
step_config: |
include ::tripleo::profile::base::neutron::ovn_northd
-heat_template_version: ocata
+heat_template_version: pike
description: >
Pacemaker service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
\[(?<pid>[^ ]*)\]
(?<host>[^ ]*)
(?<message>.*)$/
+
+ EnableLoadBalancer:
+ default: true
+ description: Whether to deploy a LoadBalancer on the Controller
+ type: boolean
+
PacemakerResources:
type: comma_delimited_list
description: List of resources managed by pacemaker
- default: ['rabbitmq','haproxy']
+ default: ['rabbitmq', 'galera']
outputs:
role_data:
- name: Check pacemaker cluster running before upgrade
tags: step0,validation
pacemaker_cluster: state=online check_and_fail=true
+ async: 30
+ poll: 4
- name: Stop pacemaker cluster
tags: step2
pacemaker_cluster: state=offline
pacemaker_cluster: state=online
- name: Check pacemaker resource
tags: step4
- pacemaker_resource: state=started resource={{item}} check_mode=true wait_for_resource=true timeout=500
+ pacemaker_is_active:
+ resource: "{{ item }}"
+ max_wait: 500
with_items: {get_param: PacemakerResources}
+ - name: Check pacemaker haproxy resource
+ tags: step4
+ pacemaker_is_active:
+ resource: haproxy
+ max_wait: 500
+ when: {get_param: EnableLoadBalancer}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph RBD mirror service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Backup service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
CinderBackupBackend: {get_param: CinderBackupBackend}
CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
CephClientUserName: {get_param: CephClientUserName}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Volume service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
MySQL with Pacemaker service deployment using puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Redis service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [RedisBase, role_data, config_settings]
- redis::service_manage: false
redis::notify_service: false
+ redis::managed_by_cluster_manager: true
step_config: |
include ::tripleo::profile::pacemaker::database::redis
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAproxy service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
The manila-share service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
RabbitMQ service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- rabbitmq::service_manage: false
step_config: |
include ::tripleo::profile::pacemaker::rabbitmq
- upgrade_tasks:
- - name: get bootstrap nodeid
- tags: common
- command: hiera bootstrap_nodeid
- register: bootstrap_node
- - name: set is_bootstrap_node fact
- tags: common
- set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
- - name: get rabbitmq policy
- tags: common
- shell: pcs resource show rabbitmq | grep -q -E "Attributes:.*\"ha-mode\":\"all\""
- register: rabbit_ha_mode
- when: is_bootstrap_node
- ignore_errors: true
- - name: set migrate_rabbit_ha_mode fact
- tags: common
- set_fact: migrate_rabbit_ha_mode={{rabbit_ha_mode.rc == 0}}
- when: is_bootstrap_node
- - name: Fixup for rabbitmq ha-queues LP#1668600
- tags: step0,pre-upgrade
- shell: |
- nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
- nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
- if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
- echo "ERROR: The nr. of HA queues during the rabbit upgrade is out of range: $nr_queues"
- exit 1
- fi
- pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
- when: is_bootstrap_node and migrate_rabbit_ha_mode
+ metadata_settings:
+ get_attr: [RabbitMQServiceBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
Pacemaker remote service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
- OpenStack Panko API service configured with Puppet
+ OpenStack Panko API service configured with Puppet.
+ Note, This service is deprecated in Pike release and will
+ be disabled in future releases.
parameters:
ServiceNetMap:
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ PankoApiPolicies:
+ description: |
+ A hash of policies to configure for Panko API.
+ e.g. { panko-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
PankoBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ./apache.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, PankoApiNetwork]}
+ panko::policy::policies: {get_param: PankoApiPolicies}
panko::api::service_name: 'httpd'
panko::api::enable_proxy_headers_parsing: true
tripleo.panko_api.firewall_rules:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Check if httpd is deployed
- command: systemctl is-enabled httpd
- tags: common
- ignore_errors: True
- register: httpd_enabled
- - name: "PreUpgrade step0,validation: Check if httpd is running"
- shell: >
- /usr/bin/systemctl show 'httpd' --property ActiveState |
- grep '\bactive\b'
- when: httpd_enabled.rc == 0
- tags: step0,validation
- - name: Stop panko-api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
- when: httpd_enabled.rc == 0
- - name: Install openstack-panko-api package if it was not installed
- tags: step3
- yum: name=openstack-panko-api state=latest
+ yaql:
+ expression: $.data.apache_upgrade + $.data.panko_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ panko_api_upgrade:
+ - name: Check if httpd is deployed
+ command: systemctl is-enabled httpd
+ tags: common
+ ignore_errors: True
+ register: httpd_enabled
+ - name: "PreUpgrade step0,validation: Check if httpd is running"
+ shell: >
+ /usr/bin/systemctl show 'httpd' --property ActiveState |
+ grep '\bactive\b'
+ when: httpd_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop panko-api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: httpd_enabled.rc == 0
+ - name: Install openstack-panko-api package if it was not installed
+ tags: step3
+ yum: name=openstack-panko-api state=latest
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Panko service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: panko_base
config_settings:
panko::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://panko:'
- - {get_param: PankoPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/panko'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: panko
+ password: {get_param: PankoPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /panko
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
panko::debug: {get_param: Debug}
panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::project_name: 'service'
+ panko::keystone::authtoken::user_domain_name: 'Default'
+ panko::keystone::authtoken::project_domain_name: 'Default'
panko::keystone::authtoken::password: {get_param: PankoPassword}
- panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ panko::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::auth::auth_password: {get_param: PankoPassword}
panko::auth::auth_region: 'regionOne'
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Qpid dispatch router service configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RabbitUserName:
+ default: guest
+ description: The username for Qdr
+ type: string
+ RabbitPassword:
+ description: The password for Qdr
+ type: string
+ hidden: true
+ RabbitClientPort:
+ description: Listening port for Qdr
+ default: 5672
+ type: number
+ MonitoringSubscriptionQdr:
+ default: 'overcloud-qdr'
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Qdr role.
+ value:
+ service_name: rabbitmq
+ monitoring_subscription: {get_param: MonitoringSubscriptionQdr}
+ global_config_settings:
+ messaging_notify_service_name: 'amqp'
+ messaging_rpc_service_name: 'amqp'
+ keystone::messaging::amqp::amqp_pre_settled: 'notify'
+ config_settings:
+ tripleo.rabbitmq.firewall_rules:
+ '109 qdr':
+ dport:
+ - {get_param: RabbitClientPort}
+ qdr::listener_addr: {get_param: [ServiceNetMap, QdrNetwork]}
+ # cannot pass qdr::listener_port directly because it needs to be a string
+ # we do the conversion in the puppet layer
+ tripleo::profile::base::qdr::qdr_listener_port: {get_param: RabbitClientPort}
+ tripleo::profile::base::qdr::qdr_username: {get_param: RabbitUserName}
+ tripleo::profile::base::qdr::qdr_password: {get_param: RabbitPassword}
+
+ step_config: |
+ include ::tripleo::profile::base::qdr
-heat_template_version: ocata
+heat_template_version: pike
description: >
RabbitMQ service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
hidden: true
RabbitHAQueues:
description:
- The number of HA queues to be configured in rabbit. The default is 0 which will
- be automatically overridden to CEIL(N/2) where N is the number of nodes running
- rabbitmq.
- default: 0
+ The number of HA queues to be configured in rabbit. The default is -1 which
+ translates to "ha-mode all". The special value 0 will be automatically
+ overridden to CEIL(N/2) where N is the number of nodes running rabbitmq.
+ default: -1
type: number
MonitoringSubscriptionRabbitmq:
default: 'overcloud-rabbitmq'
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
service_name: rabbitmq
monitoring_subscription: {get_param: MonitoringSubscriptionRabbitmq}
config_settings:
- rabbitmq::file_limit: {get_param: RabbitFDLimit}
- rabbitmq::default_user: {get_param: RabbitUserName}
- rabbitmq::default_pass: {get_param: RabbitPassword}
- rabbit_ipv6: {get_param: RabbitIPv6}
- tripleo.rabbitmq.firewall_rules:
- '109 rabbitmq':
- dport:
- - 4369
- - 5672
- - 25672
- rabbitmq::delete_guest_user: false
- rabbitmq::wipe_db_on_cookie_change: true
- rabbitmq::port: '5672'
- rabbitmq::package_provider: yum
- rabbitmq::package_source: undef
- rabbitmq::repos_ensure: false
- rabbitmq::tcp_keepalive: true
- rabbitmq_environment:
- NODE_PORT: ''
- NODE_IP_ADDRESS: ''
- RABBITMQ_NODENAME: "rabbit@%{::hostname}"
- RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
- 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
- rabbitmq_kernel_variables:
- inet_dist_listen_min: '25672'
- inet_dist_listen_max: '25672'
- rabbitmq_config_variables:
- cluster_partition_handling: 'pause_minority'
- queue_master_locator: '<<"min-masters">>'
- loopback_users: '[]'
- rabbitmq::erlang_cookie:
- yaql:
- expression: $.data.passwords.where($ != '').first()
- data:
- passwords:
- - {get_param: RabbitCookie}
- - {get_param: [DefaultPasswords, rabbit_cookie]}
- # NOTE: bind IP is found in Heat replacing the network name with the
- # local node IP for the given network; replacement examples
- # (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
- rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+ map_merge:
+ -
+ rabbitmq::file_limit: {get_param: RabbitFDLimit}
+ rabbitmq::default_user: {get_param: RabbitUserName}
+ rabbitmq::default_pass: {get_param: RabbitPassword}
+ rabbit_ipv6: {get_param: RabbitIPv6}
+ tripleo.rabbitmq.firewall_rules:
+ '109 rabbitmq':
+ dport:
+ - 4369
+ - 5672
+ - 25672
+ rabbitmq::delete_guest_user: false
+ rabbitmq::wipe_db_on_cookie_change: true
+ rabbitmq::port: '5672'
+ rabbitmq::package_provider: yum
+ rabbitmq::package_source: undef
+ rabbitmq::repos_ensure: false
+ rabbitmq::tcp_keepalive: true
+ rabbitmq_environment:
+ NODE_PORT: ''
+ NODE_IP_ADDRESS: ''
+ RABBITMQ_NODENAME: "rabbit@%{::hostname}"
+ RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+ 'export ERL_EPMD_ADDRESS': "%{hiera('rabbitmq::interface')}"
+ rabbitmq_kernel_variables:
+ inet_dist_listen_min: '25672'
+ inet_dist_listen_max: '25672'
+ rabbitmq_config_variables:
+ cluster_partition_handling: 'pause_minority'
+ queue_master_locator: '<<"min-masters">>'
+ loopback_users: '[]'
+ rabbitmq::erlang_cookie:
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: RabbitCookie}
+ - {get_param: [DefaultPasswords, rabbit_cookie]}
+ # NOTE: bind IP is found in Heat replacing the network name with the
+ # local node IP for the given network; replacement examples
+ # (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ rabbitmq::interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+ rabbitmq::ssl: {get_param: EnableInternalTLS}
+ rabbitmq::ssl_port: '5672'
+ rabbitmq::ssl_depth: 1
+ rabbitmq::ssl_only: {get_param: EnableInternalTLS}
+ rabbitmq::ssl_interface: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ # TODO(jaosorior): Remove this once we set a proper default in
+ # puppet-tripleo
+ tripleo::profile::base::rabbitmq::enable_internal_tls: {get_param: EnableInternalTLS}
+ -
+ if:
+ - internal_tls_enabled
+ - generate_service_certificates: true
+ tripleo::profile::base::rabbitmq::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/rabbitmq.crt'
+ service_key: '/etc/pki/tls/private/rabbitmq.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ principal:
+ str_replace:
+ template: "rabbitmq/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ - {}
step_config: |
include ::tripleo::profile::base::rabbitmq
upgrade_tasks:
- name: Start rabbitmq service
tags: step4
service: name=rabbitmq-server state=started
-
+ metadata_settings:
+ if:
+ - internal_tls_enabled
+ -
+ - service: rabbitmq
+ network: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+ type: node
+ - null
--- /dev/null
+---
+upgrade:
+ - When a service is deployed in WSGI with Apache, make sure mode_ssl
+ package is deployed during the upgrade process, it's now required
+ by default so Apache can start properly.
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Sahara API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default:
tag: openstack.sahara.api
path: /var/log/sahara/sahara-api.log
+ SaharaApiPolicies:
+ description: |
+ A hash of policies to configure for Sahara API.
+ e.g. { sahara-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
SaharaBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [SaharaBase, role_data, config_settings]
- sahara::port: {get_param: [EndpointMap, SaharaInternal, port]}
+ sahara::policy::policies: {get_param: SaharaApiPolicies}
sahara::service::api::api_workers: {get_param: SaharaWorkers}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Sahara base service. Shared for all Sahara services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: sahara_base
config_settings:
sahara::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://sahara:'
- - {get_param: SaharaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/sahara'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: sahara
+ password: {get_param: SaharaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /sahara
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
sahara::rabbit_password: {get_param: RabbitPassword}
sahara::rabbit_user: {get_param: RabbitUserName}
sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
sahara::rabbit_port: {get_param: RabbitClientPort}
sahara::debug: {get_param: Debug}
+ # Remove admin_password when https://review.openstack.org/442619 is merged.
sahara::admin_password: {get_param: SaharaPassword}
- sahara::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
- sahara::identity_uri: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
sahara::use_neutron: true
sahara::plugins: {get_param: SaharaPlugins}
sahara::rpc_backend: rabbit
- sahara::admin_tenant_name: 'service'
sahara::db::database_db_max_retries: -1
sahara::db::database_max_retries: -1
+ sahara::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ sahara::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ sahara::keystone::authtoken::password: {get_param: SaharaPassword}
+ sahara::keystone::authtoken::project_name: 'service'
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Sahara Engine service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Configure securetty values
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ TtyValues:
+ default: {}
+ description: Configures console values in securetty
+ type: json
+ constraints:
+ - length: { min: 1}
+
+outputs:
+ role_data:
+ description: Console data for the securetty
+ value:
+ service_name: securetty
+ config_settings:
+ tripleo::profile::base::securetty::tty_list: {get_param: TtyValues}
+ step_config: |
+ include ::tripleo::profile::base::securetty
-heat_template_version: ocata
+heat_template_version: pike
description: >
Utility stack to convert an array of services into a set of combined
description: Mapping of service -> default password. Used to help
pass top level passwords managed by Heat into services.
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ description: Role Specific parameters to be provided to service
+ default: {}
+ type: json
resources:
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
LoggingConfiguration:
type: OS::TripleO::LoggingConfiguration
# fluentd user.
yaql:
expression: >
- set($.data.groups.flatten()).where($)
+ set(($.data.default + $.data.extra + $.data.role_data.where($ != null).select($.get('logging_groups'))).flatten()).where($)
data:
- groups:
- - [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
- - yaql:
- expression: list($.data.role_data.where($ != null).select($.get('logging_groups')).where($ != null))
- data: {role_data: {get_attr: [ServiceChain, role_data]}}
- - [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
+ default: {get_attr: [LoggingConfiguration, LoggingDefaultGroups]}
+ extra: {get_attr: [LoggingConfiguration, LoggingExtraGroups]}
+ role_data: {get_attr: [ServiceChain, role_data]}
config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
global_config_settings:
map_merge:
-heat_template_version: ocata
+heat_template_version: pike
description: >
SNMP client configured with Puppet, to facilitate Ceilometer Hardware
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
+ SnmpdBindHost:
+ description: An array of bind host addresses on which SNMP daemon will listen.
+ type: comma_delimited_list
+ default: ['udp:161','udp6:[::1]:161']
+ SnmpdOptions:
+ description: A string containing the commandline options passed to snmpd
+ type: string
+ default: '-LS0-5d'
outputs:
role_data:
config_settings:
tripleo::profile::base::snmp::snmpd_user: {get_param: SnmpdReadonlyUserName}
tripleo::profile::base::snmp::snmpd_password: {get_param: SnmpdReadonlyUserPassword}
+ snmp::agentaddress: {get_param: SnmpdBindHost}
+ snmp::snmpd_options: {get_param: SnmpdOptions}
tripleo.snmp.firewall_rules:
'127 snmp':
dport: 161
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure sshd_config
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: ''
description: Configures Banner text in sshd_config
type: string
+ MessageOfTheDay:
+ default: ''
+ description: Configures /etc/motd text
+ type: string
+ SshServerOptions:
+ default:
+ HostKey:
+ - '/etc/ssh/ssh_host_rsa_key'
+ - '/etc/ssh/ssh_host_ecdsa_key'
+ - '/etc/ssh/ssh_host_ed25519_key'
+ SyslogFacility: 'AUTHPRIV'
+ AuthorizedKeysFile: '.ssh/authorized_keys'
+ PasswordAuthentication: 'no'
+ ChallengeResponseAuthentication: 'no'
+ GSSAPIAuthentication: 'yes'
+ GSSAPICleanupCredentials: 'no'
+ UsePAM: 'yes'
+ X11Forwarding: 'yes'
+ UsePrivilegeSeparation: 'sandbox'
+ AcceptEnv:
+ - 'LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES'
+ - 'LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT'
+ - 'LC_IDENTIFICATION LC_ALL LANGUAGE'
+ - 'XMODIFIERS'
+ Subsystem: 'sftp /usr/libexec/openssh/sftp-server'
+ description: Mapping of sshd_config values
+ type: json
outputs:
role_data:
value:
service_name: sshd
config_settings:
- BannerText: {get_param: BannerText}
+ tripleo::profile::base::sshd::bannertext: {get_param: BannerText}
+ tripleo::profile::base::sshd::motd: {get_param: MessageOfTheDay}
+ tripleo::profile::base::sshd::options: {get_param: SshServerOptions}
step_config: |
include ::tripleo::profile::base::sshd
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Proxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Proxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: Timeout for requests going from swift-proxy to swift a/c/o services.
type: number
SwiftWorkers:
- default: 0
+ default: auto
description: Number of workers for Swift service.
- type: number
+ type: string
KeystoneRegion:
type: string
default: 'regionOne'
Rabbit client subscriber parameter to specify
an SSL connection to the RabbitMQ host.
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
conditions:
ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+ use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
resources:
SwiftBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+ TLSProxyBase:
+ type: OS::TripleO::Services::TLSProxyBase
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
config_settings:
map_merge:
- get_attr: [SwiftBase, role_data, config_settings]
-
+ - get_attr: [TLSProxyBase, role_data, config_settings]
- swift::proxy::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
swift::proxy::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
swift::proxy::authtoken::password: {get_param: SwiftPassword}
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- swift::proxy::proxy_local_net_ip: {get_param: [ServiceNetMap, SwiftProxyNetwork]}
+ tripleo::profile::base::swift::proxy::tls_proxy_bind_ip:
+ get_param: [ServiceNetMap, SwiftProxyNetwork]
+ tripleo::profile::base::swift::proxy::tls_proxy_fqdn:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, SwiftProxyNetwork]}
+ tripleo::profile::base::swift::proxy::tls_proxy_port:
+ get_param: [EndpointMap, SwiftInternal, port]
+ swift::proxy::port: {get_param: [EndpointMap, SwiftInternal, port]}
+ swift::proxy::proxy_local_net_ip:
+ if:
+ - use_tls_proxy
+ - 'localhost'
+ - {get_param: [ServiceNetMap, SwiftProxyNetwork]}
step_config: |
include ::tripleo::profile::base::swift::proxy
service_config_settings:
- name: Stop swift_proxy service
tags: step1
service: name=openstack-swift-proxy state=stopped
+ metadata_settings:
+ get_attr: [TLSProxyBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Ringbuilder
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: true
description: 'Use a local directory for Swift storage services when building rings'
type: boolean
+ SwiftRingGetTempurl:
+ default: ''
+ description: A temporary Swift URL to download rings from.
+ type: string
+ SwiftRingPutTempurl:
+ default: ''
+ description: A temporary Swift URL to upload rings to.
+ type: string
conditions:
swift_use_local_dir:
value:
service_name: swift_ringbuilder
config_settings:
+ tripleo::profile::base::swift::ringbuilder::swift_ring_get_tempurl: {get_param: SwiftRingGetTempurl}
+ tripleo::profile::base::swift::ringbuilder::swift_ring_put_tempurl: {get_param: SwiftRingPutTempurl}
tripleo::profile::base::swift::ringbuilder::build_ring: {get_param: SwiftRingBuild}
tripleo::profile::base::swift::ringbuilder::replicas: {get_param: SwiftReplicas}
tripleo::profile::base::swift::ringbuilder::part_power: {get_param: SwiftPartPower}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Storage service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
conditions:
swift_mount_check:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Tacker service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 5672
description: Set rabbit subscriber port, change this if using SSL
type: number
+ TackerPolicies:
+ description: |
+ A hash of policies to configure for Tacker.
+ e.g. { tacker-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
outputs:
role_data:
config_settings:
tacker_password: {get_param: TackerPassword}
tacker::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://tacker:'
- - {get_param: TackerPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/tacker'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: tacker
+ password: {get_param: TackerPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /tacker
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
tacker::debug: {get_param: Debug}
tacker::rpc_backend: rabbit
tacker::server::bind_host: {get_param: [ServiceNetMap, TackerApiNetwork]}
tacker::keystone::authtoken::project_name: 'service'
- tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ tacker::keystone::authtoken::user_domain_name: 'Default'
+ tacker::keystone::authtoken::project_domain_name: 'Default'
+ tacker::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ tacker::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
tacker::db::mysql::password: {get_param: TackerPassword}
tacker::db::mysql::user: tacker
tacker::db::mysql::allowed_hosts:
- '%'
- {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ tacker::policy::policies: {get_param: TackerPolicies}
service_config_settings:
keystone:
tacker::keystone::auth::tenant: 'service'
+ tacker::keystone::auth::region: {get_param: KeystoneRegion}
tacker::keystone::auth::password: {get_param: TackerPassword}
tacker::keystone::auth::public_url: {get_param: [EndpointMap, TackerPublic, uri]}
tacker::keystone::auth::internal_url: {get_param: [EndpointMap, TackerInternal, uri]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
NTP service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Composable Timezone service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
TripleO Firewall settings
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
tripleo::firewall::purge_firewall_rules: {get_param: PurgeFirewallRules}
step_config: |
include ::tripleo::firewall
+ upgrade_tasks:
+ - name: blank ipv6 rule before activating ipv6 firewall.
+ tags: step3
+ shell: cat /etc/sysconfig/ip6tables > /etc/sysconfig/ip6tables.n-o-upgrade; cat</dev/null>/etc/sysconfig/ip6tables
+ args:
+ creates: /etc/sysconfig/ip6tables.n-o-upgrade
-heat_template_version: ocata
+heat_template_version: pike
description: >
TripleO Package installation settings
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Vpp service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
step_config: |
include ::tripleo::profile::base::vpp
upgrade_tasks:
+ - name: Check if vpp is deployed
+ command: systemctl is-enabled vpp
+ tags: common
+ ignore_errors: True
+ register: vpp_enabled
+ - name: "PreUpgrade step0,validation: Check service vpp is running"
+ shell: /usr/bin/systemctl show 'vpp' --property ActiveState | grep '\bactive\b'
+ when: vpp_enabled.rc == 0
+ tags: step0,validation
- name: Stop vpp service
- tags: step2
+ tags: step1
+ when: vpp_enabled.rc == 0
service: name=vpp state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Zaqar service. Shared for all Heat services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
Debug:
default: ''
description: Set to True to enable debugging on all services.
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ ZaqarPolicies:
+ description: |
+ A hash of policies to configure for Zaqar.
+ e.g. { zaqar-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
+ ZaqarWorkers:
+ type: string
+ description: Set the number of workers for zaqar::wsgi::apache
+ default: '%{::os_workers}'
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ zaqar_workers_zero: {equals : [{get_param: ZaqarWorkers}, 0]}
+
+resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
value:
service_name: zaqar
config_settings:
- zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
- zaqar::keystone::authtoken::project_name: 'service'
- zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- zaqar::debug: {get_param: Debug}
- zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
- zaqar::transport::wsgi::bind: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
- zaqar::message_pipeline: 'zaqar.notification.notifier'
- zaqar::unreliable: true
+ map_merge:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - zaqar::policy::policies: {get_param: ZaqarPolicies}
+ zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
+ zaqar::keystone::authtoken::project_name: 'service'
+ zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ zaqar::debug: {get_param: Debug}
+ zaqar::server::service_name: 'httpd'
+ zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
+ zaqar::wsgi::apache::ssl: false
+ zaqar::wsgi::apache::bind_host: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
+ zaqar::message_pipeline: 'zaqar.notification.notifier'
+ zaqar::unreliable: true
+ zaqar::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
+ -
+ if:
+ - zaqar_workers_zero
+ - {}
+ - zaqar::wsgi::apache::workers: {get_param: ZaqarWorkers}
service_config_settings:
keystone:
zaqar::keystone::auth::password: {get_param: ZaqarPassword}
step_config: |
include ::tripleo::profile::base::zaqar
upgrade_tasks:
- - name: Check if zaqar is deployed
- command: systemctl is-enabled openstack-zaqar
- tags: common
- ignore_errors: True
- register: zaqar_enabled
- - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
- shell: >
- /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
- grep '\bactive\b'
- when: zaqar_enabled.rc == 0
- tags: step0,validation
- - name: Stop zaqar service
- tags: step1
- when: zaqar_enabled.rc == 0
- service: name=openstack-zaqar state=stopped
- - name: Install openstack-zaqar package if it was disabled
- tags: step3
- yum: name=openstack-zaqar state=latest
- when: zaqar_enabled.rc != 0
+ yaql:
+ expression: $.data.apache_upgrade + $.data.zaqar_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ zaqar_upgrade:
+ - name: Check if zaqar is deployed
+ command: systemctl is-enabled openstack-zaqar
+ tags: common
+ ignore_errors: True
+ register: zaqar_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
+ grep '\bactive\b'
+ when: zaqar_enabled.rc == 0
+ tags: step0,validation
+ - name: Check for zaqar running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q zaqar_wsgi"
+ register: zaqar_apache
+ ignore_errors: true
+ - name: Stop zaqar service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: zaqar_apache.rc == 0
+ - name: Stop and disable zaqar service (pre-upgrade not under httpd)
+ tags: step1
+ when: zaqar_enabled.rc == 0
+ service: name=openstack-zaqar state=stopped enabled=no
+ - name: Install openstack-zaqar package if it was disabled
+ tags: step3
+ yum: name=openstack-zaqar state=latest
+ when: zaqar_enabled.rc != 0
-heat_template_version: ocata
+heat_template_version: pike
description: 'Upgrade for via ansible by applying a step related tag'
parameters:
--- /dev/null
+---
+features:
+ - Adds the InternalTLSCAFile parameter, which defines which CA file should be
+ used by the internal services to verify that the peer's certificate is
+ trusted. This is applicable if internal TLS is enabled. Currently, it
+ defaults to using the CA file for FreeIPA, which is the default CA.
--- /dev/null
+---
+features:
+ - |
+ If TLS in the internal network is enabled, libvirt's transport defaults to
+ using TLS. This can be changed by setting the ``UseTLSTransportForLiveMigration``
+ parameter, which is ``true`` by default.
--- /dev/null
+---
+features:
+ - Keystone's default token provider is now fernet instead of UUID
+upgrade:
+ - When upgrading, old tokens will not work anymore due to the provider
+ changing from UUID to fernet.
--- /dev/null
+---
+fixes:
+ - Previously only the VIPs and their associated hostnames were present
+ in the HostsEntry output, due to the hosts_entries output on the
+ hosts-config.yaml nested stack being empty. It was referencing an
+ invalid attribute. See
+ https://bugs.launchpad.net/tripleo/+bug/1683517
+
+
--- /dev/null
+---
+features:
+ - Add support for BGPVPN Neutron service plugin
--- /dev/null
+---
+features:
+ - Add support to configure Ceilometer Agent Ipmi profiles.
--- /dev/null
+---
+security:
+ - |
+ Add IPv6 disable option and make it configurable for user to disable IPv6
+ when it's not used, this will descrease the risk of ipv6 attack.
+ Both net.ipv6.conf.default.disable_ipv6 & net.ipv6.conf.all.disable_ipv6
+ will be explicitly set to the default value (0) which is enabled.
--- /dev/null
+---
+features:
+ - Add support for L2 Gateway Neutron agent
--- /dev/null
+---
+features:
+ - Add support for L2 Gateway Neutron service plugin
--- /dev/null
+---
+features:
+ - Add capabilities to configure LDAP backends as for keystone domains.
+ This can be done by using the KeystoneLDAPDomainEnable and
+ KeystoneLDAPBackendConfigs parameters.
--- /dev/null
+---
+features:
+ - Adds support for OpenDaylight HA clustering. Now when specifying
+ three or more ODL roles, ODL will be deployed in a cluster, and
+ use port 2550 for cluster communication.
--- /dev/null
+---
+features:
+ - The relevant parameters have been added to deploy the heat APIs over httpd.
+ This means that the HeatWorkers now affect httpd instead of the heat API
+ themselves, and that the apache hieradata will also be deployed in the
+ nodes where the heat APIs run.
--- /dev/null
+---
+features:
+ - Introduce the ability to deploy the qpid-dispatch-router (Qdr) for
+ the oslo.messaging AMQP 1.0 driver backend. The Qdr provides
+ direct messaging (e.g. brokerless) communications for
+ oslo.messaging services. To facilitate simple use for evaluation
+ in an overcloud deployment, the Qdr aliases the RabbitMQ service
+ to provide the messaging backend.
--- /dev/null
+---
+features:
+ - Added Pure Storage FlashArray iSCSI and FC backend support for cinder
--- /dev/null
+---
+features:
+ - Adds DatabaseSyncTimeout parameter to Nova and Neutron templates.
--- /dev/null
+---
+features:
+ - |
+ TripleO is now able to configure role-based access API policies with new
+ parameters for each API service.
+ For example, Nova API service has now NovaApiPolicies and the value
+ could be { nova-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ It will configure /etc/nova/policy.json file and configure context_is_admin
+ to true. Puppet will take care of this configuration and API services are
+ restarted when the file is touched.
+ We're also adding augeas resource to the list of Puppet providers that
+ container deployments grab in the catalog to generate configurations, so
+ this feature can be used when deploying TripleO in containers.
--- /dev/null
+---
+fixes:
+ - |
+ Updated bigswitch environment file to include the bigswitch agent
+ installation and correct support for the restproxy configuration.
--- /dev/null
+---
+upgrade:
+ - |
+ We are not changing the rabbitmq ha-mode policy during upgrades any longer.
+ The policy chosen at deploy time will remain the same but can be changed
+ manually.
+fixes:
+ - |
+ Due to https://bugs.launchpad.net/tripleo/+bug/1686337 we switch the
+ default of rabbitmq back ha-mode "all". This is to make the installation
+ more robust in the face of network issues.
--- /dev/null
+---
+features:
+ - |
+ Per default, don't log a message in syslog for each incoming SNMP query.
+ So set the default log level to '-LS0-5d'. Allow the operator to customize
+ the log level via a parameter.
--- /dev/null
+---
+fixes:
+ - The initial firewall will now be purged by the deployed-server bootstrap
+ scripts. This is needed to prevent possible issues with bootstrapping the
+ initial Pacemaker cluster. See
+ https://bugs.launchpad.net/tripleo/+bug/1679234
--- /dev/null
+---
+upgrade:
+ - The ``NeutronExternalNetworkBridge`` parameter changed its default value
+ from ``br-ex`` to an empty string value. It means that by default Neutron
+ L3 agent will be able to serve multiple external networks. (It was always
+ the case for those who were using templates with the value of the parameter
+ overridden by an empty string value.)
+deprecations:
+ - The ``NeutronExternalNetworkBridge`` parameter is deprecated and will be
+ removed in a next release.
--- /dev/null
+---
+upgrade:
+ - With expirer deprecated and disabled by default, there is an upgrade
+ impact here. If you had expirer enabled in ocata and you upgrade to
+ pike the expirer will not be enabled anymore. If you wish to use
+ expirer, ensure you include the ceilometer-expirer.yaml
+ to your upgrade deploy command. Also note that with collector
+ disabled, there is no need for expirer to be running.
+deprecations:
+ - Deprecate and turn off expirer service as collector. Without collector
+ and standard storage, expirer has no use.
--- /dev/null
+---
+upgrade:
+ - With collector deprecated and disabled by default, there is an upgrade
+ impact here. If you had collector enabled in ocata and you upgrade to
+ pike the collector will not be enabled anymore. If you wish to use
+ collector, ensure you include the ceilometer-collector.yaml
+ to your upgrade deploy command. We recommend switching to using the
+ new pipeline approach with publisher instead.
+deprecations:
+ - Deprecate and disable ceilometer collector service by default. Instead
+ use the publisher directly in the pipeline to push data where appropriate.
+ This can be manually enabled by passing the environment file to deploy
+ command which is included in environment dir as ceilometer-collector.yaml.
+ By default, the pipeline publisher pushes data automatically to gnocchi.
--- /dev/null
+---
+deprecations:
+ - Panko API service is deprecated in Pike release. Note that this service
+ will remain enabled by default as there is no replacement yet. This will
+ be disabled in future releases.
--- /dev/null
+---
+deprecations:
+ - Deprecate and disable ceilometer Api by default. This can be enabled
+ by passing in an env file to deploy command.
--- /dev/null
+---
+upgrade:
+ - |
+ The fs.suid_dumpable kernel parameter is now explicitly set to 0 to prevent
+ exposing sensitive data through core dumps of processes with elevated
+ permissions. Deployments that set or depend on non-zero values for
+ fs.suid_dumpable may be affected by upgrading.
+security:
+ - |
+ Explicitly disable core dump for setuid programs by setting
+ fs.suid_dumpable = 0, this will descrease the risk of unauthorized access
+ of core dump file generated by setuid program.
--- /dev/null
+---
+upgrade:
+ - The net.ipv4.conf.default.send_redirects & net.ipv4.conf.all.send_redirects
+ are now set to 0 to prevent a compromised host from sending invalid ICMP
+ redirects to other router devices.
+ - The net.ipv4.conf.default.accept_redirects,
+ net.ipv6.conf.default.accept_redirects & net.ipv6.conf.all.accept_redirects
+ are now set to 0 to prevent forged ICMP packet from altering host's routing
+ tables.
+ - The net.ipv4.conf.default.secure_redirects &
+ net.ipv4.conf.all.secure_redirects are now set to 0 to disable acceptance
+ of secure ICMP redirected packets.
+security:
+ - Invalide ICMP redirects may corrupt routing and have users access a system
+ set up by the attacker as opposed to a valid system.
+ - Routing tables may be altered by bogus ICMP redirect messages and send
+ packets to incorrect networks.
+ - Secure ICMP redirects are the same as ICMP redirects, except they come from
+ gateways listed on the default gateway list.
--- /dev/null
+---
+upgrade:
+ - |
+ Disabled cephfs snapshot support (ManilaCephFSNativeCephFSEnableSnapshots
+ parameter) in manila by default.
--- /dev/null
+---
+upgrade:
+ - |
+ Disable default vhost for apache. It is required for a hybrid deployments
+ when WSGI based services running both at host and in containers, without
+ conflicting default ports.
--- /dev/null
+---
+features:
+ - |
+ When deploying with environments/docker.yaml, the docker service
+ is now deployed on all predefined roles.
--- /dev/null
+---
+upgrade:
+ - |
+ The net.ipv4.conf.default.log_martians & net.ipv4.conf.all.log_martians are
+ now set to 1 to enable logging of suspicious packets.
+security:
+ - |
+ Logging of suspicious packets allows an administrator to investigate the
+ spoofed packets sent to their system.
--- /dev/null
+---
+features:
+ - Added support for external swift proxy. Users may need to
+ configure endpoints pointing to swift proxy service
+ already available.
--- /dev/null
+---
+security:
+ - |
+ Secure EtcdInitialClusterToken by removing the default value
+ and make the parameter hidden.
+ Fixes `bug 1673266 <https://bugs.launchpad.net/tripleo/+bug/1673266>`__.
--- /dev/null
+---
+fixes:
+ - Expose metric_processing_delay to tweak gnocchi performance.
--- /dev/null
+---
+fixes:
+ - Fixes an issue when using the CinderNfsServers
+ parameter_defaults setting. It now works using a
+ single share as well as a comma-separated list of
+ shares.
--- /dev/null
+---
+fixes:
+ - Fixes firewall rules from neutron OVS agent not being
+ inherited correctly and applied in neutron OVS DPDK
+ template.
--- /dev/null
+---
+fixes:
+ - Fixes OpenDaylightProviderMappings parsing on a
+ comma delimited list.
--- /dev/null
+---
+fixes:
+ - The deployed-server Heat agent configuration script,
+ get-occ-config.sh, is now updated to configure the
+ local data source for os-collect-config instead of
+ configuring /etc/os-collect-config.conf directly. Doing
+ so means that the configuration template for os-apply-config
+ no longer has to be deleted as the file will be rendered
+ correctly with the right data. See
+ https://bugs.launchpad.net/tripleo/+bug/1679705
--- /dev/null
+---
+features:
+ - Deploy Glance with Keystone v3 endpoints and make
+ sure it doesn't rely on Keystone v2 anymore.
--- /dev/null
+---
+features:
+ - Deploy Gnocchi with Keystone v3 endpoints and make
+ sure it doesn't rely on Keystone v2 anymore.
+++ /dev/null
----
-deprecations:
- - The environments/puppet-pacemaker.yaml file is now deprecated and the HA
- deployment is now the default. In order to get the non-HA deployment use
- environments/nonha-arch.yaml explicitly.
--- /dev/null
+---
+fixes:
+ - openstack-selinux is now installed by the deployed-server
+ bootstrap scripts. Previously, it was not installed, so
+ if SELinux was set to enforcing, all OpenStack policy
+ was missing.
--- /dev/null
+---
+features:
+ - |
+ New configuration ``IronicDefaultBootOption`` allows to change the default
+ boot option to use for bare metal instances in the overcloud.
+upgrade:
+ - |
+ The default boot option for bare metal instances in overcloud was changed
+ to "local". This was already the default for whole-disk images, but for
+ partition images it requires ``grub2`` to be installed on them.
+ Use the new ``IronicDefaultBootOption`` configuration to override, or
+ set ``boot_option`` capability on nodes and flavors.
--- /dev/null
+---
+features:
+ - |
+ Configuring enabled Ironic hardware types is now possible via new
+ ``IronicEnabledHardwareTypes`` parameter. See this spec for details:
+ http://specs.openstack.org/openstack/ironic-specs/specs/approved/driver-composition-reform.html.
+ - |
+ Bare metal serial console support via ``socat`` utility is enabled for
+ Ironic hardware types supporting it (currently only ``ipmi``).
--- /dev/null
+---
+features:
+ - |
+ Allow setting the Ironic provisioning network UUID or name via new
+ ``IronicProvisioningNetwork`` configuration.
+ - |
+ Enable support for "neutron" Ironic networking plugin, enabling advanced
+ integration with Neutron, such as VLAN/VXLAN network support, bonding and
+ security groups.
--- /dev/null
+---
+fixes:
+ - |
+ Previously the RHEL registration script disabled the satellite repo after
+ installing the necessary packages from it. This makes it awkward to
+ update those packages later, so the repo will no longer be disabled.
--- /dev/null
+---
+fixes:
+ - Since panko is enabled by default, include it the default dispatcher
+ for ceilometer events.
--- /dev/null
+---
+upgrade:
+ - |
+ Neutron API controller no longer advertises ``dvr`` extension if the
+ cloud is not configured for DVR. This is achieved by setting ``enable_dvr``
+ to match ``NeutronEnableDVR`` setting.
--- /dev/null
+---
+features:
+ - |
+ Add support for cold migration over ssh.
+
+ This enables nova cold migration.
+
+ This also switches to SSH as the default transport for live-migration.
+ The tripleo-common mistral action that generates passwords supplies the
+ MigrationSshKey parameter that enables this.
+deprecations:
+ - |
+ The TCP transport is no longer used for live-migration and the firewall
+ port has been closed.
--- /dev/null
+---
+features:
+ - Add support for NSX Neutron plugin
--- /dev/null
+---
+fixes:
+ - |
+ Octavia API and Neutron Server can now be deployed on separated nodes.
+ See https://bugs.launchpad.net/tripleo/+bug/1687026
--- /dev/null
+---
+features:
+ - Support configuring NeutronBridgeMappings
+ - Set force_config_drive to true as OVN doesn't support metadata service
+ - Add necessary iptables rules to allow Geneve traffic and ovsdb-server
+ traffic for Northbound and Southbound databases.
--- /dev/null
+---
+issues:
+ - During the ovs upgrade for 2.5 to 2.6 we need to workaround the classic
+ yum update command by handling the upgrade of the package separately to not
+ loose the IPs and the connectivity on the nodes. The workaround is
+ discussed here https://bugs.launchpad.net/tripleo/+bug/1669714
+upgrade:
+ - The upgrade from openvswitch 2.5 to 2.6 is handled gracefully and there should
+ be no user impact in particular no restart of the openvswitch service. For more
+ information please see the related bug above which also links the relevant code reviews.
+ The workaround (transparent to the user/doesn't require any input) is to download the OVS
+ package and install with --nopostun and --notriggerun options provided by the rpm binary.
--- /dev/null
+---
+features:
+ - The server resource type, OS::TripleO::Server can now be
+ mapped per role instead of globally. This allows users to
+ mix baremetal (OS::Nova::Server) and
+ deployed-server (OS::Heat::DeployedServer) server resources
+ in the same deployment. See
+ https://blueprints.launchpad.net/tripleo/+spec/pluggable-server-type-per-role
--- /dev/null
+---
+upgrade:
+ - |
+ The default network for the ctlplane changed from 192.0.2.0/24 to
+ 192.168.24.0/24. All references to the ctlplane network in the templates
+ have been updated to reflect this change. When upgrading from a previous
+ release, if the default network was used for the ctlplane (192.0.2.0/24),
+ then it is necessary to provide as input, via environment file, the correct
+ setting for all the parameters that previously defaulted to 192.0.2.x and
+ now default to 192.168.24.x; there is an environment file which could be
+ used on upgrade `environments/updates/update-from-192_0_2-subnet.yaml` to
+ cover a simple scenario but it won't be enough for scenarios using an
+ external load balancer, Contrail or Cisto N1KV. Follows a list of params to
+ be provided on upgrade.
+ From contrail-net.yaml: EC2MetadataIp, ControlPlaneDefaultRoute
+ From external-loadbalancer-vip-v6.yaml: ControlFixedIPs
+ From external-loadbalancer-vip.yaml: ControlFixedIPs
+ From network-environment.yaml: EC2MetadataIp, ControlPlaneDefaultRoute
+ From neutron-ml2-cisco-n1kv.yaml: N1000vVSMIP, N1000vMgmtGatewayIP
+ From contrail-vrouter.yaml: ContrailVrouterGateway
--- /dev/null
+---
+upgrade:
+ - |
+ The kernel.dmesg_restrict is now set to 1 to prevent exposure of sensitive
+ kernel address information with unprivileged access. Deployments that set
+ or depend on values other than 1 for kernel.dmesg_restrict may be affected
+ by upgrading.
+security:
+ - |
+ Kernel syslog contains sensitive kernel address information, setting
+ kernel.dmesg_restrict to avoid unprivileged access to this information.
--- /dev/null
+---
+fixes:
+ - Add knobs to limit memory comsumed by mongodb with systemd
--- /dev/null
+---
+features:
+ - |
+ Adds tags to roles that allow an operator to specify custom tags to use
+ when trying to find functionality available from a role. Currently a role
+ with both the 'primary' and 'controller' tag is consider to be the primary
+ role. Historically the role named 'Controller' was the 'primary' role and
+ this primary designation is used to determine items like memcache ip
+ addresses. If no roles have the both the 'primary' and 'controller' tags,
+ the first role specified in the roles_data.yaml is used as the primary
+ role.
+upgrade:
+ - |
+ If using custom roles data, the logic was changed to leverage the first
+ role listed in the roles_data.yaml file to be the primary role. This can
+ be worked around by adding the 'primary' and 'controller' tags to the
+ custom controller role in your roles_data.yaml to ensure that the defined
+ custom controller role is still considered the primary role.
--- /dev/null
+---
+features:
+ - Sahara is now deployed with keystone_authtoken parameters and move
+ forward with Keystone v3 version.
--- /dev/null
+---
+features:
+ - Role specific informations are added to the service template to enable
+ role specific decisions on the service.
--- /dev/null
+---
+fixes:
+ - We need ceilometer user in cases where ceilometer API is disabled.
+ This is to ensure other ceilometer services can still authenticate
+ with keystone.
--- /dev/null
+---
+features:
+ - |
+ Adding a new parameter to SNMP profile, SnmpdBindHost
+ so users can change the binding addresses on SNMP daemon.
+ The parameter is an array and takes the default value that
+ were previously hardcoded in puppet-tripleo.
--- /dev/null
+---
+fixes:
+ - The ``pci_passthrough`` hiera value should be passed as a string
+ (`bug 1675036 <https://bugs.launchpad.net/tripleo/+bug/1675036>`__).
--- /dev/null
+---
+features:
+ - SSH host key exchange. The ssh host keys are collected from each host,
+ combined, and written to /etc/ssh/ssh_known_hosts.
--- /dev/null
+---
+features:
+ - |
+ Added ability to manage MOTD Banner
+ Enabled SSHD composible service by default. Puppet-ssh manages the sshd config.
--- /dev/null
+---
+fixes:
+ - The stack name can now be overridden in the get-occ-config.sh script
+ for deployed-server's by setting the $STACK_NAME variable in the
+ environment.
--- /dev/null
+---
+fixes:
+ - This commit merges both [Pre|Post]Puppet and [Pre|Post]Config
+ resources, giving an agnostic name for the configuration
+ steps. The [Pre|Post]Puppet resource is removed and should not
+ be used anymore.
--- /dev/null
+---
+fixes:
+ - The token flush cron job has been modified to run hourly instead of once
+ a day. This is because this was causing issues with larger deployments, as
+ the operation would take too long and sometimes even fail because of the
+ transaction being so large. Note that this only affects people using the
+ UUID token provider.
--- /dev/null
+---
+features:
+ - |
+ Adds a new boolean parameter for RHEL Registration called
+ 'UpdateOnRHELRegistration' that when enabled will trigger a yum update
+ on the node after the registration process completes.
--- /dev/null
+---
+features:
+ - Add name and description fields to plan-environment.yaml
--- /dev/null
+---
+upgrade:
+ - |
+ The new StackUpdateType parameter is now set to UPGRADE
+ when a major version upgrade is in progress. This enables application
+ configuration via puppet to distinuish a major version upgrade from a
+ normal stack update (e.g for minor updates or reconfiguration) by
+ inspecting the stack_update_type hiera value. In future other values may be added to
+ flag e.g minor updates vs reconfiguration, but for now only UPGRADE is considered.
--- /dev/null
+---
+features:
+ - Adds support for networking-vpp ML2 mechanism driver and agent.
--- /dev/null
+---
+features:
+ - Run the Zaqar WSGI service over httpd in Puppet.
# built documents.
#
# The full version, including alpha/beta/rc tags.
-release = '6.0.0.0b3'
+release = '7.0.0.0b1'
# The short X.Y version.
-version = '6.0.0'
+version = '7.0.0'
# The full version, including alpha/beta/rc tags.
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.8 # Apache-2.0
+pbr!=2.1.0,>=2.0.0 # Apache-2.0
Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
six>=1.9.0 # MIT
# ServicesDefault: (list) optional default list of services to be deployed
# on the role, defaults to an empty list. Sets the default for the
# {{role.name}}Services parameter in overcloud.yaml
-
-- name: Controller # the 'primary' role goes first
+#
+# tags: (list) list of tags used by other parts of the deployment process to
+# find the role for a specific type of functionality. Currently a role
+# with both 'primary' and 'controller' is used as the primary role for the
+# deployment process. If no roles have have 'primary' and 'controller', the
+# first role in this file is used as the primary role.
+#
+- name: Controller
CountDefault: 1
+ tags:
+ - primary
+ - controller
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephMds
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::CinderBackendDellPs
+ - OS::TripleO::Services::CinderBackendDellSc
+ - OS::TripleO::Services::CinderBackendNetApp
+ - OS::TripleO::Services::CinderBackendScaleIO
- OS::TripleO::Services::Congress
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronBgpVpnApi
- OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL2gwApi
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronApi
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::NeutronL2gwAgent
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::ExternalSwiftProxy
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- - OS::TripleO::Services::CeilometerApi
- - OS::TripleO::Services::CeilometerCollector
- - OS::TripleO::Services::CeilometerExpirer
- OS::TripleO::Services::CeilometerAgentCentral
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::Horizon
- OS::TripleO::Services::OctaviaHousekeeping
- OS::TripleO::Services::OctaviaWorker
- OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::NeutronVppAgent
+ - OS::TripleO::Services::Docker
- name: Compute
CountDefault: 1
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephExternal
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
- name: BlockStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
- name: ObjectStorage
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
- name: CephStorage
ServicesDefault:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
+ - OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Docker
-- name: Undercloud # the 'primary' role goes first
+- name: Undercloud
CountDefault: 1
disable_constraints: True
+ tags:
+ - primary
+ - controller
ServicesDefault:
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::UndercloudAodhApi
+ - OS::TripleO::Services::UndercloudAodhEvaluator
+ - OS::TripleO::Services::UndercloudAodhNotifier
+ - OS::TripleO::Services::UndercloudAodhListener
+ - OS::TripleO::Services::UndercloudGnocchiApi
+ - OS::TripleO::Services::UndercloudGnocchiMetricd
+ - OS::TripleO::Services::UndercloudGnocchiStatsd
+ - OS::TripleO::Services::UndercloudPankoApi
+ - OS::TripleO::Services::UndercloudCeilometerAgentCentral
+ - OS::TripleO::Services::UndercloudCeilometerAgentNotification
if grep -q "^# HEAT_HOSTS_START" "$file"; then
temp=$(mktemp)
- awk -v v="$entries" '/^# HEAT_HOSTS_START/ {
- print $0
- print v
- f=1
- }f &&!/^# HEAT_HOSTS_END$/{next}/^# HEAT_HOSTS_END$/{f=0}!f' "$file" > "$temp"
- echo "INFO: Updating hosts file $file, check below for changes"
- diff "$file" "$temp" || true
- cat "$temp" > "$file"
+ (
+ sed '/^# HEAT_HOSTS_START/,$d' "$file"
+ echo -ne "\n# HEAT_HOSTS_START - Do not edit manually within this section!\n"
+ echo "$entries"
+ echo -ne "# HEAT_HOSTS_END\n\n"
+ sed '1,/^# HEAT_HOSTS_END/d' "$file"
+ ) > "$temp"
+ echo "INFO: Updating hosts file $file, check below for changes"
+ diff "$file" "$temp" || true
+ cat "$temp" > "$file"
else
echo -ne "\n# HEAT_HOSTS_START - Do not edit manually within this section!\n" >> "$file"
echo "$entries" >> "$file"
pass
setuptools.setup(
- setup_requires=['pbr>=1.8'],
+ setup_requires=['pbr>=2.0.0'],
pbr=True)
parser.add_argument('-r', '--roles-data', metavar='ROLES_DATA',
help="""relative path to the roles_data.yaml file.""",
default='roles_data.yaml')
+ parser.add_argument('-n', '--network-data', metavar='NETWORK_DATA',
+ help="""relative path to the network_data.yaml file.""",
+ default='network_data.yaml')
parser.add_argument('--safe',
action='store_true',
help="""Enable safe mode (do not overwrite files).""",
out_f.write(r_template)
-def process_templates(template_path, role_data_path, output_dir, overwrite):
+def process_templates(template_path, role_data_path, output_dir,
+ network_data_path, overwrite):
with open(role_data_path) as role_data_file:
role_data = yaml.safe_load(role_data_file)
+ with open(network_data_path) as network_data_file:
+ network_data = yaml.safe_load(network_data_file)
+
j2_excludes_path = os.path.join(template_path, 'j2_excludes.yaml')
with open(j2_excludes_path) as role_data_file:
j2_excludes = yaml.safe_load(role_data_file)
print("jinja2 rendering normal template %s" % f)
with open(file_path) as j2_template:
template_data = j2_template.read()
- j2_data = {'roles': role_data}
+ j2_data = {'roles': role_data,
+ 'networks': network_data}
out_f = os.path.basename(f).replace('.j2.yaml', '.yaml')
out_f_path = os.path.join(out_dir, out_f)
_j2_render_to_file(template_data, j2_data, out_f_path,
opts = parse_opts(sys.argv)
role_data_path = os.path.join(opts.base_path, opts.roles_data)
+network_data_path = os.path.join(opts.base_path, opts.network_data)
-process_templates(opts.base_path, role_data_path, opts.output_dir, (not opts.safe))
+process_templates(opts.base_path, role_data_path, opts.output_dir,
+ network_data_path, (not opts.safe))
print("Error couldn't find run-os-net-config.sh relative to filename")
exit_usage()
- for r in six.iteritems(tpl.get('resources', {})):
+ for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
import yaml
-required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords']
+required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
+ 'RoleName', 'RoleParameters']
envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
'tls-everywhere-endpoints-dns.yaml']
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
+REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
+ 'config_settings', 'step_config']
+OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
+ 'service_config_settings', 'host_prep_tasks',
+ 'metadata_settings', 'kolla_config']
+REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
+ 'config_image']
+OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
+
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
return 1
return 0
+
def validate_mysql_connection(settings):
no_op = lambda *args: False
error_status = [0]
return error_status[0]
+def validate_docker_service(filename, tpl):
+ if 'outputs' in tpl and 'role_data' in tpl['outputs']:
+ if 'value' not in tpl['outputs']['role_data']:
+ print('ERROR: invalid role_data for filename: %s'
+ % filename)
+ return 1
+ role_data = tpl['outputs']['role_data']['value']
+
+ for section_name in REQUIRED_DOCKER_SECTIONS:
+ if section_name not in role_data:
+ print('ERROR: %s is required in role_data for %s.'
+ % (section_name, filename))
+ return 1
+
+ for section_name in role_data.keys():
+ if section_name in REQUIRED_DOCKER_SECTIONS:
+ continue
+ else:
+ if section_name in OPTIONAL_DOCKER_SECTIONS:
+ continue
+ else:
+ print('ERROR: %s is extra in role_data for %s.'
+ % (section_name, filename))
+ return 1
+
+ if 'puppet_config' in role_data:
+ puppet_config = role_data['puppet_config']
+ for key in puppet_config:
+ if key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
+ continue
+ else:
+ if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS:
+ continue
+ else:
+ print('ERROR: %s should not be in puppet_config section.'
+ % key)
+ return 1
+ for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
+ if key not in puppet_config:
+ print('ERROR: %s is required in puppet_config for %s.'
+ % (key, filename))
+ return 1
+
+ if 'parameters' in tpl:
+ for param in required_params:
+ if param not in tpl['parameters']:
+ print('ERROR: parameter %s is required for %s.'
+ % (param, filename))
+ return 1
+ return 0
+
+
def validate_service(filename, tpl):
if 'outputs' in tpl and 'role_data' in tpl['outputs']:
if 'value' not in tpl['outputs']['role_data']:
% filename)
return 1
+ # qdr aliases rabbitmq service to provide alternative messaging backend
if (filename.startswith('./puppet/services/') and
- filename != './puppet/services/services.yaml'):
+ filename not in ['./puppet/services/services.yaml',
+ './puppet/services/qdr.yaml']):
retval = validate_service(filename, tpl)
+ if (filename.startswith('./docker/services/') and
+ filename != './docker/services/services.yaml'):
+ retval = validate_docker_service(filename, tpl)
+
if filename.endswith('hyperconverged-ceph.yaml'):
retval = validate_hci_compute_services_default(filename, tpl)
[testenv]
usedevelop = True
+install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
echo "SUCCESS"
}
+# Verify the FQDN from the nova/ironic deployment matches
+# FQDN in the heat templates.
+function fqdn_check() {
+ HOSTNAME=$(hostname)
+ SHORT_NAME=$(hostname -s)
+ FQDN_FROM_HOSTS=$(awk '$3 == "'${SHORT_NAME}'"{print $2}' /etc/hosts)
+ echo -n "Checking hostname vs /etc/hosts entry..."
+ if [[ $HOSTNAME != $FQDN_FROM_HOSTS ]]; then
+ echo "FAILURE"
+ echo -e "System hostname: ${HOSTNAME}\nEntry from /etc/hosts: ${FQDN_FROM_HOSTS}\n"
+ exit 1
+ fi
+ echo "SUCCESS"
+}
+
+# Verify at least one time source is available.
+function ntp_check() {
+ NTP_SERVERS=$(hiera ntp::servers nil |tr -d '[],"')
+ if [[ "$NTP_SERVERS" != "nil" ]];then
+ echo -n "Testing NTP..."
+ NTP_SUCCESS=0
+ for NTP_SERVER in $NTP_SERVERS; do
+ set +e
+ NTPDATE_OUT=$(ntpdate -qud $NTP_SERVER 2>&1)
+ NTPDATE_EXIT=$?
+ set -e
+ if [[ "$NTPDATE_EXIT" == "0" ]];then
+ NTP_SUCCESS=1
+ break
+ else
+ NTPDATE_OUT_FULL="$NTPDATE_OUT_FULL $NTPDATE_OUT"
+ fi
+ done
+ if [[ "$NTP_SUCCESS" == "0" ]];then
+ echo "FAILURE"
+ echo "$NTPDATE_OUT_FULL"
+ exit 1
+ fi
+ echo "SUCCESS"
+ fi
+}
+
ping_controller_ips "$ping_test_ips"
ping_default_gateways
+if [[ $validate_fqdn == "True" ]];then
+ fqdn_check
+fi
+if [[ $validate_ntp == "True" ]];then
+ ntp_check
+fi