pip-log.txt
# Unit test / coverage reports
+cover
.coverage
.tox
+.testrepository
nosetests.xml
# Translations
--- /dev/null
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./tripleo_heat_templates ./tripleo_heat_templates $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
--- /dev/null
+parameter_defaults:
+ CephPoolDefaultSize: 1
+++ /dev/null
-# NOTE: This is an environment specific for containers CI. Mainly we
-# deploy non-pacemakerized overcloud. Once we are able to deploy and
-# upgrade pacemakerized and containerized overcloud, we should remove
-# this file and use normal CI multinode environments/scenarios.
-
-resource_registry:
- OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
- OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
-
- # NOTE: This is needed because of upgrades from Ocata to Pike. We
- # deploy the initial environment with Ocata templates, and
- # overcloud-resource-registry.yaml there doesn't have this Docker
- # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
- # remove this.
- OS::TripleO::Services::Docker: OS::Heat::None
-
-parameter_defaults:
- ControllerServices:
- - OS::TripleO::Services::CephMon
- - OS::TripleO::Services::CephOSD
- - OS::TripleO::Services::CinderApi
- - OS::TripleO::Services::CinderScheduler
- - OS::TripleO::Services::CinderVolume
- - OS::TripleO::Services::Docker
- - OS::TripleO::Services::Kernel
- - OS::TripleO::Services::Keystone
- - OS::TripleO::Services::GlanceApi
- - OS::TripleO::Services::HeatApi
- - OS::TripleO::Services::HeatApiCfn
- - OS::TripleO::Services::HeatApiCloudwatch
- - OS::TripleO::Services::HeatEngine
- - OS::TripleO::Services::MySQL
- - OS::TripleO::Services::MySQLClient
- - OS::TripleO::Services::NeutronDhcpAgent
- - OS::TripleO::Services::NeutronL3Agent
- - OS::TripleO::Services::NeutronMetadataAgent
- - OS::TripleO::Services::NeutronServer
- - OS::TripleO::Services::NeutronCorePlugin
- - OS::TripleO::Services::NeutronOvsAgent
- - OS::TripleO::Services::RabbitMQ
- - OS::TripleO::Services::HAproxy
- - OS::TripleO::Services::Keepalived
- - OS::TripleO::Services::Memcached
- - OS::TripleO::Services::Pacemaker
- - OS::TripleO::Services::NovaConductor
- - OS::TripleO::Services::NovaApi
- - OS::TripleO::Services::NovaPlacement
- - OS::TripleO::Services::NovaMetadata
- - OS::TripleO::Services::NovaScheduler
- - OS::TripleO::Services::Ntp
- - OS::TripleO::Services::SwiftProxy
- - OS::TripleO::Services::SwiftStorage
- - OS::TripleO::Services::SwiftRingBuilder
- - OS::TripleO::Services::Snmp
- - OS::TripleO::Services::Timezone
- - OS::TripleO::Services::TripleoPackages
- - OS::TripleO::Services::NovaCompute
- - OS::TripleO::Services::NovaLibvirt
- - OS::TripleO::Services::Sshd
- ControllerExtraConfig:
- nova::compute::libvirt::services::libvirt_virt_type: qemu
- nova::compute::libvirt::libvirt_virt_type: qemu
- # Required for Centos 7.3 and Qemu 2.6.0
- nova::compute::libvirt::libvirt_cpu_mode: 'none'
- #NOTE(gfidente): not great but we need this to deploy on ext4
- #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
- ceph::profile::params::osd_max_object_name_len: 256
- ceph::profile::params::osd_max_object_namespace_len: 64
- SwiftCeilometerPipelineEnabled: False
- Debug: True
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
- OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
- OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
- OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
+ # TODO: Barbican is not yet containerized: https://review.openstack.org/#/c/474327
+ # OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
+ OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
+ OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
- OS::TripleO::Services::SaharaApi: ../../puppet/services/sahara-api.yaml
- OS::TripleO::Services::SaharaEngine: ../../puppet/services/sahara-engine.yaml
- OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml
- OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml
- OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml
+ OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+ OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
+ OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+ OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/
OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml
OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
- OS::TripleO::Services::ManilaApi: ../../puppet/services/manila-api.yaml
- OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
+ # NOTE: being containerized here: https://review.openstack.org/#/c/471527/
OS::TripleO::Services::ManilaShare: ../../puppet/services/manila-share.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+ # TODO: containerize NeutronBgpVpnApi
OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
--- /dev/null
+This will contain some common templates but it needs to be added to the RPM spec first
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ deployment_swift_data:
+ type: json
+ default: {}
resources:
deployed-server:
properties:
name: {get_param: name}
software_config_transport: {get_param: software_config_transport}
+ deployment_swift_data: {get_param: deployment_swift_data}
UpgradeInitConfig:
type: OS::Heat::SoftwareConfig
- {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
name:
value: {get_attr: [HostsEntryDeployment, hostname]}
+ os_collect_config:
+ value: {get_attr: [deployed-server, os_collect_config]}
if [ -n "$PUPPET_TAGS" ]; then
TAGS="--tags \"$PUPPET_TAGS\""
fi
+
+ # workaround LP1696283
+ mkdir -p /etc/ssh
+ touch /etc/ssh/ssh_known_hosts
+
FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
# Disables archiving
if [ -z "$NO_ARCHIVE" ]; then
- rm -Rf /var/lib/config-data/${NAME}
-
- # copying etc should be enough for most services
- mkdir -p /var/lib/config-data/${NAME}/etc
- cp -a /etc/* /var/lib/config-data/${NAME}/etc/
-
- # workaround LP1696283
- mkdir -p /var/lib/config-data/${NAME}/etc/ssh
- touch /var/lib/config-data/${NAME}/etc/ssh/ssh_known_hosts
-
- if [ -d /root/ ]; then
- cp -a /root/ /var/lib/config-data/${NAME}/root/
- fi
- if [ -d /var/lib/ironic/tftpboot/ ]; then
- mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
- cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/tftpboot/
- fi
- if [ -d /var/lib/ironic/httpboot/ ]; then
- mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
- cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/httpboot/
- fi
-
- # apache services may files placed in /var/www/
- if [ -d /var/www/ ]; then
- mkdir -p /var/lib/config-data/${NAME}/var/www
- cp -a /var/www/* /var/lib/config-data/${NAME}/var/www/
- fi
+ archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
+ rsync_srcs=""
+ for d in "${archivedirs[@]}"; do
+ if [ -d "$d" ]; then
+ rsync_srcs+=" $d"
+ fi
+ done
+ rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
+
+ # Also make a copy of files modified during puppet run
+ # This is useful for debugging
+ mkdir -p /var/lib/config-data/puppet-generated/${NAME}
+ rsync -a -R -0 --delay-updates --delete-after \
+ --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \
+ / /var/lib/config-data/puppet-generated/${NAME}
# Write a checksum of the config-data dir, this is used as a
# salt to trigger container restart when the config changes
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ ctlplane_service_ips:
+ type: json
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
resources:
- name: bootstrap_server_id
config: {get_file: deploy-steps-playbook.yaml}
+{%- for step in range(1, deploy_steps_max) %}
+# BEGIN service_workflow_tasks handling
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on:
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
+# END service_workflow_tasks handling
+{% endfor %}
+
{% for role in roles %}
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
- name: Write kolla config json files
copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes
with_dict: "{{kolla_config}}"
- - name: Install paunch FIXME remove when packaged
- shell: |
- yum -y install python-pip
- pip install paunch
########################################################
# Bootstrap tasks, only performed on bootstrap_server_id
########################################################
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
- {% for dep in roles %}
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
- {% endfor %}
- {% endif %}
+ {% endfor %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
{% endfor %}
# END CONFIG STEPS
- {{role.name}}PostConfig:
- type: OS::TripleO::Tasks::{{role.name}}PostConfig
+ # Note, this should be the last step to execute configuration changes.
+ # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+ # after all the previous deployment steps.
+ {{role.name}}ExtraConfigPost:
depends_on:
{% for dep in roles %}
- {{dep.name}}Deployment_Step5
{% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
properties:
- servers: {get_param: servers}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
+ servers: {get_param: [servers, {{role.name}}]}
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
+ # The {{role.name}}PostConfig steps are in charge of
+ # quiescing all services, i.e. in the Controller case,
+ # we should run a full service reload.
+ {{role.name}}PostConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PostConfig
depends_on:
{% for dep in roles %}
- - {{dep.name}}PostConfig
+ - {{dep.name}}ExtraConfigPost
{% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
properties:
- servers: {get_param: [servers, {{role.name}}]}
+ servers: {get_param: servers}
+ input_values:
+ update_identifier: {get_param: DeployIdentifier}
+
{% endfor %}
def docker_arg_map(key, value):
value = str(value).encode('ascii', 'ignore')
+ if len(value) == 0:
+ return ''
+
return {
'environment': "--env=%s" % value,
# 'image': value,
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
aodh_init_log:
- start_order: 0
image: *aodh_image
user: root
volumes:
- /var/log/containers/aodh:/var/log/aodh
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
+ step_3:
aodh_db_sync:
- start_order: 1
image: *aodh_image
net: host
privileged: false
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Ceilometer Agent Ipmi service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCeilometerIpmiImage:
+ description: image
+ default: 'centos-binary-ceilometer-ipmi:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CeilometerAgentIpmiBase:
+ type: ../../puppet/services/ceilometer-agent-ipmi.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Ipmi role.
+ value:
+ service_name: {get_attr: [CeilometerAgentIpmiBase, role_data, service_name]}
+ config_settings: {get_attr: [CeilometerAgentIpmiBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CeilometerAgentIpmiBase, role_data, step_config]
+ service_config_settings: {get_attr: [CeilometerAgentIpmiBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ceilometer
+ puppet_tags: ceilometer_config
+ step_config: *step_config
+ config_image: &ceilometer_agent_ipmi_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerIpmiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:
+ command: /usr/bin/ceilometer-polling --polling-namespaces ipmi
+ docker_config:
+ step_3:
+ ceilometer_init_log:
+ start_order: 0
+ image: *ceilometer_agent_ipmi_image
+ user: root
+ command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+ volumes:
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ step_4:
+ ceilometer_agent_ipmi:
+ image: *ceilometer_agent_ipmi_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
+ ceilometer_gnocchi_upgrade:
+ start_order: 1
+ image: *ceilometer_agent_ipmi_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ command: "/usr/bin/bootstrap_host_exec ceilometer su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'"
+ upgrade_tasks:
+ - name: Stop and disable ceilometer agent ipmi service
+ tags: step2
+ service: name=openstack-ceilometer-agent-ipmi state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderApiImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CinderBase:
+ type: ../../puppet/services/cinder-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder API role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CinderBase, role_data, step_config]
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_api.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_2:
+ cinder_api_init_logs:
+ image: &cinder_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_3:
+ cinder_api_db_sync:
+ image: *cinder_api_image
+ net: host
+ privileged: false
+ detach: false
+ user: root
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/log/containers/cinder:/var/log/cinder
+ command:
+ - '/usr/bin/bootstrap_host_exec'
+ - 'cinder_api'
+ - "su cinder -s /bin/bash -c 'cinder-manage db sync'"
+ step_4:
+ cinder_api:
+ image: *cinder_api_image
+ net: host
+ privileged: false
+ restart: always
+ # NOTE(mandre) kolla image changes the user to 'cinder', we need it
+ # to be root to run httpd
+ user: root
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/cinder_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/lib/config-data/cinder/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/cinder/var/www/:/var/www/:ro
+ - /var/log/containers/cinder:/var/log/cinder
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/cinder
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable cinder_api service
+ tags: step2
+ service: name=httpd state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Backup service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderBackupImage:
+ description: image
+ default: 'centos-binary-cinder-backup:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CinderBase:
+ type: ../../puppet/services/cinder-backup.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Backup role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CinderBase, role_data, step_config]
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_backup.json:
+ command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/lib/cinder
+ owner: cinder:cinder
+ recurse: true
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_backup_init_logs:
+ start_order: 0
+ image: &cinder_backup_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_4:
+ cinder_backup:
+ image: *cinder_backup_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/cinder_backup.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/lib/config-data/ceph/etc/ceph/:/etc/ceph/:ro #FIXME: we need to generate a ceph.conf with puppet for this
+ - /dev/:/dev/
+ - /run/:/run/
+ - /sys:/sys
+ - /lib/modules:/lib/modules:ro
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/cinder:/var/lib/cinder
+ - /var/log/containers/cinder:/var/log/cinder
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/lib/cinder
+ - /var/log/containers/cinder
+ upgrade_tasks:
+ - name: Stop and disable cinder_backup service
+ tags: step2
+ service: name=openstack-cinder-backup state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Scheduler service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderSchedulerImage:
+ description: image
+ default: 'centos-binary-cinder-scheduler:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CinderBase:
+ type: ../../puppet/services/cinder-scheduler.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Scheduler role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CinderBase, role_data, step_config]
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_scheduler.json:
+ command: /usr/bin/cinder-scheduler --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_2:
+ cinder_scheduler_init_logs:
+ image: &cinder_scheduler_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderSchedulerImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_4:
+ cinder_scheduler:
+ image: *cinder_scheduler_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/cinder_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/log/containers/cinder:/var/log/cinder
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/cinder
+ upgrade_tasks:
+ - name: Stop and disable cinder_scheduler service
+ tags: step2
+ service: name=openstack-cinder-scheduler state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Volume service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderVolumeImage:
+ description: image
+ default: 'centos-binary-cinder-volume:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ # custom parameters for the Cinder volume role
+ CinderEnableIscsiBackend:
+ default: true
+ description: Whether to enable or not the Iscsi backend for Cinder
+ type: boolean
+ CinderLVMLoopDeviceSize:
+ default: 10280
+ description: The size of the loopback file used by the cinder LVM driver.
+ type: number
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CinderBase:
+ type: ../../puppet/services/cinder-volume.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Volume role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CinderBase, role_data, step_config]
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_volume.json:
+ command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_volume_init_logs:
+ start_order: 0
+ image: &cinder_volume_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderVolumeImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_4:
+ cinder_volume:
+ image: *cinder_volume_image
+ net: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/cinder_volume.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+ - /var/lib/config-data/ceph/etc/ceph/:/etc/ceph/:ro #FIXME: we need to generate a ceph.conf with puppet for this
+ - /dev/:/dev/
+ - /run/:/run/
+ - /sys:/sys
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/cinder:/var/lib/cinder
+ - /var/log/containers/cinder:/var/log/cinder
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/cinder
+ - /var/lib/cinder
+ - name: cinder_enable_iscsi_backend fact
+ set_fact:
+ cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
+ - name: cinder create LVM volume group dd
+ command:
+ list_join:
+ - ''
+ - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
+ - str_replace:
+ template: VALUE
+ params:
+ VALUE: {get_param: CinderLVMLoopDeviceSize}
+ - 'M'
+ args:
+ creates: /var/lib/cinder/cinder-volumes
+ when: cinder_enable_iscsi_backend
+ - name: cinder create LVM volume group
+ shell: |
+ if ! losetup /dev/loop2; then
+ losetup /dev/loop2 /var/lib/cinder/cinder-volumes
+ fi
+ if ! pvdisplay | grep cinder-volumes; then
+ pvcreate /dev/loop2
+ fi
+ if ! vgdisplay | grep cinder-volumes; then
+ vgcreate cinder-volumes /dev/loop2
+ fi
+ args:
+ executable: /bin/bash
+ creates: /dev/loop2
+ when: cinder_enable_iscsi_backend
+ upgrade_tasks:
+ - name: Stop and disable cinder_volume service
+ tags: step2
+ service: name=openstack-cinder-volume state=stopped enabled=no
description: Role data for the collectd role.
value:
service_name: {get_attr: [CollectdBase, role_data, service_name]}
- config_settings: {get_attr: [CollectdBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - get_attr: [CollectdBase, role_data, config_settings]
+ - tripleo::profile::base::metrics::collectd::enable_file_logging: true
+ collectd::plugin::logfile::log_file: /var/log/collectd/collectd.log
step_config: &step_config
get_attr: [CollectdBase, role_data, step_config]
service_config_settings: {get_attr: [CollectdBase, role_data, service_config_settings]}
kolla_config:
/var/lib/kolla/config_files/collectd.json:
command: /usr/sbin/collectd -f
+ permissions:
+ - path: /var/log/collectd
+ owner: collectd:collectd
+ recurse: true
docker_config:
step_3:
collectd:
-
- /var/run/docker.sock:/var/run/docker.sock:rw
- /var/lib/kolla/config_files/collectd.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/collectd/etc/collectd/:/etc/collectd/:ro
+ - /var/lib/config-data/collectd/etc/collectd.conf:/etc/collectd.conf:ro
+ - /var/lib/config-data/collectd/etc/collectd.d:/etc/collectd.d:ro
+ - /var/log/containers/collectd:/var/log/collectd:rw
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/collectd
+ state: directory
upgrade_tasks:
- name: Stop and disable collectd service
tags: step2
service: name=collectd.service state=stopped enabled=no
-
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
congress_init_logs:
- start_order: 0
image: &congress_image
list_join:
- '/'
volumes:
- /var/log/containers/congress:/var/log/congress
command: ['/bin/bash', '-c', 'chown -R congress:congress /var/log/congress']
+ step_3:
congress_db_sync:
- start_order: 1
image: *congress_image
net: host
privileged: false
description: >
Contains a static list of common things necessary for containers
+parameters:
+
+ # Required parameters
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
outputs:
volumes:
description: Common volumes for the containers.
value:
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- # required for bootstrap_host_exec
- - /etc/puppet:/etc/puppet:ro
- # OpenSSL trusted CAs
- - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
- - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
- - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
- - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
- # Syslog socket
- - /dev/log:/dev/log
- - /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro
+ list_concat:
+ - - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ # required for bootstrap_host_exec
+ - /etc/puppet:/etc/puppet:ro
+ # OpenSSL trusted CAs
+ - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
+ - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
+ - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
+ - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
+ # Syslog socket
+ - /dev/log:/dev/log
+ - /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro
+ - if:
+ - internal_tls_enabled
+ - - {get_param: InternalTLSCAFile}
+ - null
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Configuration for containerized MySQL clients
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMysqlImage:
+ description: image
+ default: 'centos-binary-mariadb:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
+
+outputs:
+ role_data:
+ description: Role for setting mysql client parameters
+ value:
+ service_name: mysql_client
+ config_settings:
+ tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+ tripleo::profile::base::database::mysql::client::ssl_ca: {get_param: InternalTLSCAFile}
+ # BEGIN DOCKER SETTINGS #
+ step_config: ""
+ puppet_config:
+ config_volume: mysql_client
+ puppet_tags: file # set this even though file is the default
+ step_config: "include ::tripleo::profile::base::database::mysql::client"
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+ # no need for a docker config, this service only generates configuration files
+ docker_config: {}
recurse: true
docker_config:
# Kolla_bootstrap runs before permissions set by kolla_config
- step_2:
+ step_1:
mysql_init_logs:
- start_order: 0
image: *mysql_image
privileged: false
user: root
volumes:
- /var/log/containers/mysql:/var/log/mariadb
command: ['/bin/bash', '-c', 'chown -R mysql:mysql /var/log/mariadb']
+ step_2:
mysql_bootstrap:
- start_order: 1
detach: false
image: *mysql_image
net: host
step_1:
redis_init_logs:
start_order: 0
+ detach: false
image: *redis_image
privileged: false
user: root
- /var/log/containers/redis:/var/log/redis
command: ['/bin/bash', '-c', 'chown -R redis:redis /var/log/redis']
redis:
+ start_order: 1
image: *redis_image
net: host
privileged: false
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized EC2 API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerEc2ApiImage:
+ description: image
+ default: 'centos-binary-ec2-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ Ec2ApiPuppetBase:
+ type: ../../puppet/services/ec2-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the EC2 API role.
+ value:
+ service_name: {get_attr: [Ec2ApiPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [Ec2ApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ec2api
+ puppet_tags: ec2api_api_paste_ini,ec2api_config
+ step_config: *step_config
+ config_image: &ec2_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ec2_api.json:
+ command: /usr/bin/ec2-api
+ permissions:
+ - path: /var/log/ec2api
+ owner: ec2api:ec2api
+ recurse: true
+ /var/lib/kolla/config_files/ec2_api_metadata.json:
+ command: /usr/bin/ec2-api-metadata
+ permissions:
+ - path: /var/log/ec2api # default log dir for metadata service as well
+ owner: ec2api:ec2api
+ recurse: true
+ docker_config:
+ # db sync runs before permissions set by kolla_config
+ step_2:
+ ec2_api_init_logs:
+ image: *ec2_api_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/ec2_api:/var/log/ec2api
+ # mount ec2_api_metadata to "ec2api-metadata" only here to fix
+ # permissions of both directories in one go
+ - /var/log/containers/ec2_api_metadata:/var/log/ec2api-metadata
+ command: ['/bin/bash', '-c', 'chown -R ec2api:ec2api /var/log/ec2api /var/log/ec2api-metadata']
+ step_3:
+ ec2_api_db_sync:
+ image: *ec2_api_image
+ net: host
+ detach: false
+ privileged: false
+ user: root
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+ - /var/log/containers/ec2_api:/var/log/ec2api
+ command: "/usr/bin/bootstrap_host_exec ec2_api su ec2api -s /bin/bash -c '/usr/bin/ec2-api-manage db_sync'"
+ step_4:
+ ec2_api:
+ image: *ec2_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ec2_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+ - /var/log/containers/ec2_api:/var/log/ec2api
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ ec2_api_metadata:
+ image: *ec2_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ec2_api_metadata.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+ - /var/log/containers/ec2_api_metadata:/var/log/ec2api
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent log directories
+ file:
+ path: /var/log/containers/{{ item }}
+ state: directory
+ with_items:
+ - ec2_api
+ - ec2_api_metadata
+ upgrade_tasks:
+ - name: Stop and disable EC2-API services
+ tags: step2
+ service: name={{ item }} state=stopped enabled=no
+ with_items:
+ - openstack-ec2-api
+ - openstack-ec2-api-metadata
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
# Kolla_bootstrap/db_sync runs before permissions set by kolla_config
- step_3:
+ step_2:
glance_init_logs:
- start_order: 0
image: *glance_image
privileged: false
user: root
volumes:
- /var/log/containers/glance:/var/log/glance
command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
+ step_3:
glance_api_db_sync:
- start_order: 1
image: *glance_image
net: host
privileged: false
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
gnocchi_init_log:
- start_order: 0
image: *gnocchi_image
user: root
volumes:
- /var/log/containers/gnocchi:/var/log/gnocchi
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
+ step_3:
gnocchi_db_sync:
- start_order: 1
image: *gnocchi_image
net: host
detach: false
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
heat_init_log:
- start_order: 0
image: *heat_engine_image
user: root
volumes:
- /var/log/containers/heat:/var/log/heat
command: ['/bin/bash', '-c', 'chown -R heat:heat /var/log/heat']
+ step_3:
heat_engine_db_sync:
- start_order: 1
image: *heat_engine_image
net: host
privileged: false
owner: apache:apache
recurse: false
docker_config:
- step_3:
+ step_2:
horizon_fix_perms:
image: *horizon_image
user: root
volumes:
- /var/log/containers/horizon:/var/log/horizon
- /var/lib/config-data/horizon/etc/:/etc/
+ step_3:
horizon:
- start_order: 1
image: *horizon_image
net: host
privileged: false
config_settings:
map_merge:
- get_attr: [IronicApiBase, role_data, config_settings]
+ - apache::default_vhost: false
step_config: &step_config
get_attr: [IronicApiBase, role_data, step_config]
service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
- [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/ironic_api.json:
- command: /usr/bin/ironic-api
+ command: /usr/sbin/httpd -DFOREGROUND
permissions:
- path: /var/log/ironic
owner: ironic:ironic
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
ironic_init_logs:
- start_order: 0
image: &ironic_image
list_join:
- '/'
volumes:
- /var/log/containers/ironic:/var/log/ironic
command: ['/bin/bash', '-c', 'chown -R ironic:ironic /var/log/ironic']
+ step_3:
ironic_db_sync:
start_order: 1
image: *ironic_image
start_order: 10
image: *ironic_image
net: host
- privileged: false
+ user: root
restart: always
volumes:
list_concat:
-
- /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/ironic/etc/ironic:/etc/ironic:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf/:/etc/httpd/conf/:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
+ - /var/lib/config-data/ironic/var/www/:/var/www/:ro
- /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Iscsid service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerIscsidImage:
+ description: image
+ default: 'centos-binary-iscsid:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Iscsid API role.
+ value:
+ service_name: iscsid
+ config_settings: {}
+ step_config: ''
+ service_config_settings: {}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: iscsid
+ #puppet_tags: file
+ step_config: ''
+ config_image: &iscsid_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIscsidImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/iscsid.json:
+ command: /usr/sbin/iscsid -f
+ docker_config:
+ step_3:
+ iscsid:
+ start_order: 2
+ image: *iscsid_image
+ net: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro
+ - /dev/:/dev/
+ - /run/:/run/
+ - /sys:/sys
+ - /lib/modules:/lib/modules:ro
+ - /etc/iscsi:/etc/iscsi
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /etc/iscsi
+ file:
+ path: /etc/iscsi
+ state: directory
+ - name: stat /lib/systemd/system/iscsid.socket
+ stat: path=/lib/systemd/system/iscsid.socket
+ register: stat_iscsid_socket
+ - name: Stop and disable iscsid.socket service
+ service: name=iscsid.socket state=stopped enabled=no
+ when: stat_iscsid_socket.stat.exists
+ upgrade_tasks:
+ - name: stat /lib/systemd/system/iscsid.service
+ stat: path=/lib/systemd/system/iscsid.service
+ register: stat_iscsid_service
+ - name: Stop and disable iscsid service
+ tags: step2
+ service: name=iscsid state=stopped enabled=no
+ when: stat_iscsid_service.stat.exists
+ - name: stat /lib/systemd/system/iscsid.socket
+ stat: path=/lib/systemd/system/iscsid.socket
+ register: stat_iscsid_socket
+ - name: Stop and disable iscsid.socket service
+ tags: step2
+ service: name=iscsid.socket state=stopped enabled=no
+ when: stat_iscsid_socket.stat.exists
+ metadata_settings: {}
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
# Kolla_bootstrap/db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
keystone_init_log:
- start_order: 0
image: *keystone_image
user: root
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
volumes:
- /var/log/containers/keystone:/var/log/keystone
+ step_3:
keystone_db_sync:
- start_order: 1
image: *keystone_image
net: host
privileged: false
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Manila API service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerManilaApiImage:
+ description: image
+ default: 'centos-binary-manila-api:latest'
+ type: string
+ DockerManilaConfigImage:
+ description: image
+ default: 'centos-binary-manila-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ ManilaApiPuppetBase:
+ type: ../../puppet/services/manila-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Manila API role.
+ value:
+ service_name: {get_attr: [ManilaApiPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [ManilaApiPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ {get_attr: [ManilaApiPuppetBase, role_data, step_config]}
+ service_config_settings: {get_attr: [ManilaApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: manila
+ puppet_tags: manila_config,manila_api_paste_ini
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerManilaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/manila_api.json:
+ command: /usr/bin/manila-api --config-file /usr/share/manila/manila-dist.conf --config-file /etc/manila/manila.conf
+ permissions:
+ - path: /var/log/manila
+ owner: manila:manila
+ recurse: true
+ docker_config:
+ step_2:
+ manila_init_logs:
+ image: &manila_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerManilaApiImage} ]
+ user: root
+ volumes:
+ - /var/log/containers/manila:/var/log/manila
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R manila:manila /var/log/manila']
+ step_3:
+ manila_api_db_sync:
+ user: root
+ image: *manila_api_image
+ net: host
+ detach: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+ - /var/log/containers/manila:/var/log/manila
+ command: "/usr/bin/bootstrap_host_exec manila_api su manila -s /bin/bash -c '/usr/bin/manila-manage db sync'"
+ step_4:
+ manila_api:
+ image: *manila_api_image
+ net: host
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/manila_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+ - /var/log/containers/manila:/var/log/manila
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: Create persistent manila logs directory
+ file:
+ path: /var/log/containers/manila
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable manila_api service
+ tags: step2
+ service: name=openstack-manila-api state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Manila Scheduler service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerManilaSchedulerImage:
+ description: image
+ default: 'centos-binary-manila-scheduler:latest'
+ type: string
+ DockerManilaConfigImage:
+ description: image
+ default: 'centos-binary-manila-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ ManilaSchedulerPuppetBase:
+ type: ../../puppet/services/manila-scheduler.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Manila Scheduler role.
+ value:
+ service_name: {get_attr: [ManilaSchedulerPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [ManilaSchedulerPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ {get_attr: [ManilaSchedulerPuppetBase, role_data, step_config]}
+ service_config_settings: {get_attr: [ManilaSchedulerPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: manila
+ puppet_tags: manila_config,manila_scheduler_paste_ini
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerManilaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/manila_scheduler.json:
+ command: /usr/bin/manila-scheduler --config-file /usr/share/manila/manila-dist.conf --config-file /etc/manila/manila.conf
+ permissions:
+ - path: /var/log/manila
+ owner: manila:manila
+ recurse: true
+ docker_config:
+ step_4:
+ manila_scheduler:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerManilaSchedulerImage} ]
+ net: host
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/manila_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+ - /var/log/containers/manila:/var/log/manila
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: Create persistent manila logs directory
+ file:
+ path: /var/log/containers/manila
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable manila_scheduler service
+ tags: step2
+ service: name=openstack-manila-scheduler state=stopped enabled=no
step_1:
memcached_init_logs:
start_order: 0
+ detach: false
image: *memcached_image
privileged: false
user: root
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
mistral_init_logs:
- start_order: 0
image: &mistral_image
list_join:
- '/'
volumes:
- /var/log/containers/mistral:/var/log/mistral
command: ['/bin/bash', '-c', 'chown -R mistral:mistral /var/log/mistral']
+ step_3:
mistral_db_sync:
- start_order: 1
+ start_order: 0
image: *mistral_image
net: host
privileged: false
- /var/log/containers/mistral:/var/log/mistral
command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head'"
mistral_db_populate:
- start_order: 2
+ start_order: 1
image: *mistral_image
net: host
privileged: false
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Multipathd service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMultipathdImage:
+ description: image
+ default: 'centos-binary-multipathd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+outputs:
+ role_data:
+ description: Role data for the Multipathd API role.
+ value:
+ service_name: multipathd
+ config_settings: {}
+ step_config: ''
+ service_config_settings: {}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: multipathd
+ #puppet_tags: file
+ step_config: ''
+ config_image: &multipathd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/multipathd.json:
+ command: /usr/sbin/multipathd -d
+ docker_config:
+ step_3:
+ multipathd:
+ start_order: 1
+ image: *multipathd_image
+ net: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro
+ - /dev/:/dev/
+ - /run/:/run/
+ - /sys:/sys
+ - /lib/modules:/lib/modules:ro
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/cinder:/var/lib/cinder
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ upgrade_tasks:
+ - name: Stop and disable multipathd service
+ tags: step2
+ service: name=multipathd state=stopped enabled=no
+ metadata_settings: {}
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
neutron_init_logs:
- start_order: 0
image: &neutron_api_image
list_join:
- '/'
volumes:
- /var/log/containers/neutron:/var/log/neutron
command: ['/bin/bash', '-c', 'chown -R neutron:neutron /var/log/neutron']
+ step_3:
neutron_db_sync:
- start_order: 1
image: *neutron_api_image
net: host
privileged: false
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
nova_init_logs:
- start_order: 0
image: &nova_api_image
list_join:
- '/'
volumes:
- /var/log/containers/nova:/var/log/nova
command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
+ step_3:
nova_api_db_sync:
- start_order: 1
+ start_order: 0
image: *nova_api_image
net: host
detach: false
# to be capable of upgrading a baremetal setup. This is to ensure the name
# of the cell is 'default'
nova_api_map_cell0:
- start_order: 2
+ start_order: 1
image: *nova_api_image
net: host
detach: false
volumes: *nova_api_volumes
command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 map_cell0'"
nova_api_create_default_cell:
- start_order: 3
+ start_order: 2
image: *nova_api_image
net: host
detach: false
user: root
command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 create_cell --name=default'"
nova_db_sync:
- start_order: 4
+ start_order: 3
image: *nova_api_image
net: host
detach: false
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Nova Consoleauth service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaConsoleauthImage:
+ description: image
+ default: 'centos-binary-nova-consoleauth:latest'
+ type: string
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ NovaConsoleauthPuppetBase:
+ type: ../../puppet/services/nova-consoleauth.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Consoleauth service.
+ value:
+ service_name: {get_attr: [NovaConsoleauthPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaConsoleauthPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NovaConsoleauthPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaConsoleauthPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_consoleauth.json:
+ command: /usr/bin/nova-consoleauth
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
+ docker_config:
+ step_4:
+ nova_consoleauth:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConsoleauthImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_consoleauth.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /var/log/containers/nova:/var/log/nova
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova_consoleauth service
+ tags: step2
+ service: name=openstack-nova-consoleauth state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Nova Vncproxy service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNovaVncProxyImage:
+ description: image
+ default: 'centos-binary-nova-novncproxy:latest'
+ type: string
+ DockerNovaConfigImage:
+ description: image
+ default: 'centos-binary-nova-base:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ NovaVncProxyPuppetBase:
+ type: ../../puppet/services/nova-vnc-proxy.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Nova Vncproxy service.
+ value:
+ service_name: {get_attr: [NovaVncProxyPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [NovaVncProxyPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NovaVncProxyPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [NovaVncProxyPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: nova
+ puppet_tags: nova_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/nova_vnc_proxy.json:
+ command: /usr/bin/nova-novncproxy --web /usr/share/novnc/
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
+ docker_config:
+ step_4:
+ nova_vnc_proxy:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaVncProxyImage} ]
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_vnc_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /var/log/containers/nova:/var/log/nova
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable nova_vnc_proxy service
+ tags: step2
+ service: name=openstack-nova-novncproxy state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Backup service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderBackupImage:
+ description: image
+ default: 'centos-binary-cinder-backup:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ CinderBackupBackend:
+ default: swift
+ description: The short name of the Cinder Backup backend to use.
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'ceph']
+ CinderBackupRbdPoolName:
+ default: backups
+ type: string
+ CephClientUserName:
+ default: openstack
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ CinderBackupBase:
+ type: ../../../puppet/services/cinder-backup.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ CinderBackupBackend: {get_param: CinderBackupBackend}
+ CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
+ CephClientUserName: {get_param: CephClientUserName}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Backup role.
+ value:
+ service_name: {get_attr: [CinderBackupBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [CinderBackupBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::cinder::backup_bundle::cinder_backup_docker_image: &cinder_backup_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+ cinder::backup::manage_service: false
+ cinder::backup::enabled: false
+ step_config: ""
+ service_config_settings: {get_attr: [CinderBackupBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: {get_attr: [CinderBackupBase, role_data, step_config]}
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_backup.json:
+ command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/lib/cinder
+ owner: cinder:cinder
+ recurse: true
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_backup_init_logs:
+ start_order: 0
+ image: *cinder_backup_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_5:
+ cinder_backup_init_bundle:
+ start_order: 1
+ detach: false
+ net: host
+ user: root
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::backup_bundle'
+ image: *cinder_backup_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/lib/cinder
+ - /var/log/containers/cinder
+ upgrade_tasks:
+ - name: Stop and disable cinder_backup service
+ tags: step2
+ service: name=openstack-cinder-backup state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Volume service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderVolumeImage:
+ description: image
+ default: 'centos-binary-cinder-volume:latest'
+ type: string
+ # we configure all cinder services in the same cinder base container
+ DockerCinderConfigImage:
+ description: image
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ # custom parameters for the Cinder volume role
+ CinderEnableIscsiBackend:
+ default: true
+ description: Whether to enable or not the Iscsi backend for Cinder
+ type: boolean
+ CinderLVMLoopDeviceSize:
+ default: 10280
+ description: The size of the loopback file used by the cinder LVM driver.
+ type: number
+
+resources:
+
+ CinderBase:
+ type: ../../../puppet/services/cinder-volume.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Volume role.
+ value:
+ service_name: {get_attr: [CinderBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [CinderBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::cinder::volume_bundle::cinder_volume_docker_image: &cinder_volume_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderVolumeImage} ]
+ cinder::volume::manage_service: false
+ cinder::volume::enabled: false
+ cinder::host: hostgroup
+ step_config: ""
+ service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: {get_attr: [CinderBase, role_data, step_config]}
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_volume.json:
+ command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_volume_init_logs:
+ start_order: 0
+ image: *cinder_volume_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_5:
+ cinder_volume_init_bundle:
+ start_order: 0
+ detach: false
+ net: host
+ user: root
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::volume_bundle'
+ image: *cinder_volume_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/cinder
+ - /var/lib/cinder
+ #FIXME: all of this should be conditional on the CinderEnableIscsiBackend value being set to true
+ - name: cinder create LVM volume group dd
+ command:
+ list_join:
+ - ''
+ - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
+ - str_replace:
+ template: VALUE
+ params:
+ VALUE: {get_param: CinderLVMLoopDeviceSize}
+ - 'M'
+ args:
+ creates: /var/lib/cinder/cinder-volumes
+ - name: cinder create LVM volume group
+ shell: |
+ if ! losetup /dev/loop2; then
+ losetup /dev/loop2 /var/lib/cinder/cinder-volumes
+ fi
+ if ! pvdisplay | grep cinder-volumes; then
+ pvcreate /dev/loop2
+ fi
+ if ! vgdisplay | grep cinder-volumes; then
+ vgcreate cinder-volumes /dev/loop2
+ fi
+ args:
+ executable: /bin/bash
+ creates: /dev/loop2
+ upgrade_tasks:
+ - name: Stop and disable cinder_volume service
+ tags: step2
+ service: name=openstack-cinder-volume state=stopped enabled=no
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
- step_config:
- list_join:
- - "\n"
- - - &noop_pcmk "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
- - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
+ step_config: ""
service_config_settings: {get_attr: [HAProxyBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
list_join:
- "\n"
- - "exec {'wait-for-settle': command => '/bin/true' }"
- - &noop_firewall "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
- - *noop_pcmk
+ - "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
+ - "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
- 'include ::tripleo::profile::pacemaker::haproxy_bundle'
config_image: *haproxy_image
kolla_config:
detach: false
net: host
user: root
+ privileged: true
command:
- '/bin/bash'
- '-c'
- - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
- "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
params:
- TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+ TAGS: 'tripleo::firewall::rule,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
CONFIG:
list_join:
- ';'
- - - *noop_firewall
- - 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::haproxy_bundle'
+ - - 'include ::tripleo::profile::base::pacemaker'
+ - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
image: *haproxy_image
volumes:
+ # puppet saves iptables rules in /etc/sysconfig
+ - /etc/sysconfig:/etc/sysconfig:rw
+ # saving rules require accessing /usr/libexec/iptables/iptables.init, just bind-mount
+ # the necessary bit and prevent systemd to try to reload the service in the container
+ - /usr/libexec/iptables:/usr/libexec/iptables:ro
+ - /usr/libexec/initscripts/legacy-actions:/usr/libexec/initscripts/legacy-actions:ro
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
- /etc/puppet:/tmp/puppet-etc:ro
owner: panko:panko
recurse: true
docker_config:
- step_3:
+ step_2:
panko_init_log:
- start_order: 0
image: *panko_image
user: root
volumes:
- /var/log/containers/panko:/var/log/panko
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
+ step_3:
panko_db_sync:
- start_order: 1
image: *panko_image
net: host
detach: false
step_1:
rabbitmq_init_logs:
start_order: 0
+ detach: false
image: *rabbitmq_image
privileged: false
user: root
command: ['/bin/bash', '-c', 'chown -R rabbitmq:rabbitmq /var/log/rabbitmq']
rabbitmq_bootstrap:
start_order: 1
+ detach: false
image: *rabbitmq_image
net: host
privileged: false
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Sahara service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSaharaApiImage:
+ description: image
+ default: 'centos-binary-sahara-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ SaharaApiPuppetBase:
+ type: ../../puppet/services/sahara-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Sahara API role.
+ value:
+ service_name: {get_attr: [SaharaApiPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [SaharaApiPuppetBase, role_data, config_settings]
+ - sahara::sync_db: false
+ step_config: &step_config
+ get_attr: [SaharaApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [SaharaApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: sahara
+ puppet_tags: sahara_api_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+ step_config: *step_config
+ config_image: &sahara_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/sahara-api.json:
+ command: /usr/bin/sahara-api --config-file /etc/sahara/sahara.conf
+ permissions:
+ - path: /var/lib/sahara
+ owner: sahara:sahara
+ recurse: true
+ - path: /var/log/sahara
+ owner: sahara:sahara
+ recurse: true
+ docker_config:
+ step_3:
+ sahara_db_sync:
+ image: *sahara_image
+ net: host
+ privileged: false
+ detach: false
+ volumes: &sahara_volumes
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/sahara-api.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+ - /lib/modules:/lib/modules:ro
+ - /var/lib/sahara:/var/lib/sahara
+ - /var/log/containers/sahara:/var/log/sahara
+ command: "/usr/bin/bootstrap_host_exec sahara_api su sahara -s /bin/bash -c 'sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head'"
+ step_4:
+ sahara_api:
+ image: *sahara_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *sahara_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/sahara
+ file:
+ path: /var/lib/sahara
+ state: directory
+ - name: create persistent sahara logs directory
+ file:
+ path: /var/log/containers/sahara
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable sahara_api service
+ tags: step2
+ service: name=openstack-sahara-api state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Sahara service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSaharaEngineImage:
+ description: image
+ default: 'centos-binary-sahara-engine:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ SaharaEnginePuppetBase:
+ type: ../../puppet/services/sahara-engine.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Sahara Engine role.
+ value:
+ service_name: {get_attr: [SaharaEnginePuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [SaharaEnginePuppetBase, role_data, config_settings]
+ - sahara::sync_db: false
+ step_config: &step_config
+ get_attr: [SaharaEnginePuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [SaharaEnginePuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: sahara
+ puppet_tags: sahara_engine_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+ step_config: *step_config
+ config_image: &sahara_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/sahara-engine.json:
+ command: /usr/bin/sahara-engine --config-file /etc/sahara/sahara.conf
+ permissions:
+ - path: /var/lib/sahara
+ owner: sahara:sahara
+ recurse: true
+ - path: /var/log/sahara
+ owner: sahara:sahara
+ recurse: true
+ docker_config:
+ step_4:
+ sahara_engine:
+ image: *sahara_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: &sahara_volumes
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/sahara-engine.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+ - /var/lib/sahara:/var/lib/sahara
+ - /var/log/containers/sahara:/var/log/sahara
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/lib/sahara
+ file:
+ path: /var/lib/sahara
+ state: directory
+ - name: create persistent sahara logs directory
+ file:
+ path: /var/log/containers/sahara
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable sahara_engine service
+ tags: step2
+ service: name=openstack-sahara-engine state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Containerized Sensu client service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerSensuClientImage:
+ description: image
+ default: 'centos-binary-sensu-client:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ SensuDockerCheckCommand:
+ type: string
+ default: |
+ for i in $(docker ps --format '{{.ID}}'); do
+ if result=$(docker inspect --format='{{.State.Health.Status}}' $i 2>/dev/null); then
+ if [ "$result" != 'healthy' ]; then
+ echo "$(docker inspect --format='{{.Name}}' $i) ($i): $(docker inspect --format='{{json .State}}' $i)" && exit 2;
+ fi
+ fi
+ done
+ SensuDockerCheckInterval:
+ type: number
+ description: The frequency in seconds the docker health check is executed.
+ default: 10
+ SensuDockerCheckHandlers:
+ default: []
+ description: The Sensu event handler to use for events
+ created by the docker health check.
+ type: comma_delimited_list
+ SensuDockerCheckOccurrences:
+ type: number
+ description: The number of event occurrences before sensu-plugin-aware handler should take action.
+ default: 3
+ SensuDockerCheckRefresh:
+ type: number
+ description: The number of seconds sensu-plugin-aware handlers should wait before taking second action.
+ default: 90
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ SensuClientBase:
+ type: ../../puppet/services/monitoring/sensu-client.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Sensu client role.
+ value:
+ service_name: {get_attr: [SensuClientBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [SensuClientBase, role_data, config_settings]
+ - sensu::checks:
+ check-docker-health:
+ standalone: true
+ command: {get_param: SensuDockerCheckCommand}
+ interval: {get_param: SensuDockerCheckInterval}
+ handlers: {get_param: SensuDockerCheckHandlers}
+ occurrences: {get_param: SensuDockerCheckOccurrences}
+ refresh: {get_param: SensuDockerCheckRefresh}
+ step_config: &step_config
+ get_attr: [SensuClientBase, role_data, step_config]
+ service_config_settings: {get_attr: [SensuClientBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: sensu
+ puppet_tags: sensu_rabbitmq_config,sensu_client_config,sensu_check_config,sensu_check
+ step_config: *step_config
+ config_image: &sensu_client_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/sensu-client.json:
+ command: /usr/bin/sensu-client -d /etc/sensu/conf.d/ -l /var/log/sensu/sensu-client.log
+ permissions:
+ - path: /var/log/sensu
+ owner: sensu:sensu
+ recurse: true
+ docker_config:
+ step_3:
+ sensu_client:
+ image: *sensu_client_image
+ net: host
+ privileged: true
+ # NOTE(mmagr) kolla image changes the user to 'sensu', we need it
+ # to be root have rw permission to docker.sock to run successfully
+ # "docker inspect" command
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/run/docker.sock:/var/run/docker.sock:rw
+ - /var/lib/kolla/config_files/sensu-client.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/sensu/etc/sensu/:/etc/sensu/:ro
+ - /var/log/containers/sensu:/var/log/sensu:rw
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/sensu
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable sensu-client service
+ tags: step2
+ service: name=sensu-client.service state=stopped enabled=no
+++ /dev/null
-heat_template_version: pike
-
-description: >
- Utility stack to convert an array of services into a set of combined
- role configs.
-
-parameters:
- Services:
- default: []
- description: |
- List nested stack service templates.
- type: comma_delimited_list
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
- DefaultPasswords:
- default: {}
- description: Mapping of service -> default password. Used to help
- pass top level passwords managed by Heat into services.
- type: json
- RoleName:
- default: ''
- description: Role name on which the service is applied
- type: string
- RoleParameters:
- default: {}
- description: Parameters specific to the role
- type: json
-
-resources:
-
- PuppetServices:
- type: ../../puppet/services/services.yaml
- properties:
- Services: {get_param: Services}
- ServiceNetMap: {get_param: ServiceNetMap}
- EndpointMap: {get_param: EndpointMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- RoleName: {get_param: RoleName}
- RoleParameters: {get_param: RoleParameters}
-
- ServiceChain:
- type: OS::Heat::ResourceChain
- properties:
- resources: {get_param: Services}
- concurrent: true
- resource_properties:
- ServiceNetMap: {get_param: ServiceNetMap}
- EndpointMap: {get_param: EndpointMap}
- DefaultPasswords: {get_param: DefaultPasswords}
- RoleName: {get_param: RoleName}
- RoleParameters: {get_param: RoleParameters}
-
-outputs:
- role_data:
- description: Combined Role data for this set of services.
- value:
- service_names:
- {get_attr: [PuppetServices, role_data, service_names]}
- monitoring_subscriptions:
- {get_attr: [PuppetServices, role_data, monitoring_subscriptions]}
- logging_sources:
- {get_attr: [PuppetServices, role_data, logging_sources]}
- logging_groups:
- {get_attr: [PuppetServices, role_data, logging_groups]}
- service_config_settings:
- {get_attr: [PuppetServices, role_data, service_config_settings]}
- config_settings:
- {get_attr: [PuppetServices, role_data, config_settings]}
- global_config_settings:
- {get_attr: [PuppetServices, role_data, global_config_settings]}
- step_config:
- {get_attr: [ServiceChain, role_data, step_config]}
- puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
- kolla_config:
- map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
- docker_config:
- {get_attr: [ServiceChain, role_data, docker_config]}
- docker_puppet_tasks:
- {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
- host_prep_tasks:
- yaql:
- # Note we use distinct() here to filter any identical tasks
- expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
- data: {get_attr: [ServiceChain, role_data]}
- upgrade_tasks:
- yaql:
- # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
- expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
- data: {get_attr: [ServiceChain, role_data]}
- upgrade_batch_tasks:
- yaql:
- # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
- expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
- data: {get_attr: [ServiceChain, role_data]}
- service_metadata_settings:
- get_attr: [PuppetServices, role_data, service_metadata_settings]
default: true
description: 'Use a local directory for Swift storage services when building rings'
type: boolean
+ SwiftRingGetTempurl:
+ default: ''
+ description: A temporary Swift URL to download rings from.
+ type: string
+ SwiftRingPutTempurl:
+ default: ''
+ description: A temporary Swift URL to upload rings to.
+ type: string
resources:
description: Role data for Swift Ringbuilder configuration in containers.
value:
service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]}
- config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+ - tripleo::profile::base::swift::ringbuilder:skip_consistency_check: true
step_config: &step_config
get_attr: [SwiftRingbuilderBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: 'swift'
- puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
+ puppet_tags: exec,fetch_swift_ring_tarball,extract_swift_ring_tarball,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance,create_swift_ring_tarball,upload_swift_ring_tarball
step_config: *step_config
config_image:
list_join:
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
+ SwiftRawDisks:
+ default: {}
+ description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
+ type: json
+
resources:
description: Role data for the swift storage services.
value:
service_name: {get_attr: [SwiftStorageBase, role_data, service_name]}
- config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - {get_attr: [SwiftStorageBase, role_data, config_settings]}
+ # FIXME (cschwede): re-enable this once checks works inside containers
+ - swift::storage::all::mount_check: false
step_config: &step_config
get_attr: [SwiftStorageBase, role_data, step_config]
service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
with_items:
- /var/log/containers/swift
- /srv/node
+ - name: Format and mount devices defined in SwiftRawDisks
+ mount:
+ name: /srv/node/{{ item }}
+ src: /dev/{{ item }}
+ fstype: xfs
+ opts: noatime
+ state: mounted
+ with_items:
+ - repeat:
+ template: 'DEVICE'
+ for_each:
+ DEVICE: {get_param: SwiftRawDisks}
upgrade_tasks:
- name: Stop and disable swift storage services
tags: step2
recurse: true
docker_config:
# db sync runs before permissions set by kolla_config
- step_3:
+ step_2:
tacker_init_logs:
- start_order: 0
image: &tacker_image
list_join:
- '/'
volumes:
- /var/log/containers/tacker:/var/log/tacker
command: ['/bin/bash', '-c', 'chown -R tacker:tacker /var/log/tacker']
+ step_3:
tacker_db_sync:
- start_order: 1
image: *tacker_image
net: host
privileged: false
CinderDellScSecondarySanLogin: 'Admin'
CinderDellScSecondarySanPassword: ''
CinderDellScSecondaryScApiPort: 3033
+ CinderDellScExcludedDomainIp: ''
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/storage/cinder-netapp-config.yaml
+# instead.
+# *************************************************************************************
# A Heat environment file which can be used to enable a
# a Cinder NetApp backend, configured via puppet
resource_registry:
OS::TripleO::PostDeploySteps: ../docker/post.yaml
OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
- OS::TripleO::Services: ../docker/services/services.yaml
-
parameter_defaults:
- # Defaults to 'tripleoupstream'. Specify a local docker registry
- # Example: 192.168.24.1:8787/tripleoupstream
- DockerNamespace: tripleoupstream
- DockerNamespaceIsRegistry: false
+ # To specify a local docker registry, enable these
+ # where 192.168.24.1 is the host running docker-distribution
+ #DockerNamespace: 192.168.24.1:8787/tripleoupstream
+ #DockerNamespaceIsRegistry: true
ComputeServices:
- OS::TripleO::Services::CACerts
# This can be used when you don't want to run puppet on the host,
# e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
# OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
- OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
# The compute node still needs extra initialization steps
OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
+ OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+
#NOTE (dprince) add roles to be docker enabled as we support them
OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
OS::TripleO::Services::NovaApi: ../docker/services/nova-api.yaml
OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
+ OS::TripleO::Services::NovaConsoleauth: ../docker/services/nova-consoleauth.yaml
OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
+ OS::TripleO::Services::NovaVncProxy: ../docker/services/nova-vnc-proxy.yaml
OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
- OS::TripleO::Services::HAProxy: ../docker/services/haproxy.yaml
+ OS::TripleO::Services::MySQLClient: ../docker/services/database/mysql-client.yaml
OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
OS::TripleO::Services::CeilometerAgentCentral: ../docker/services/ceilometer-agent-central.yaml
+ OS::TripleO::Services::CeilometerAgentIpmi: ../docker/services/ceilometer-agent-ipmi.yaml
OS::TripleO::Services::CeilometerAgentCompute: ../docker/services/ceilometer-agent-compute.yaml
OS::TripleO::Services::CeilometerAgentNotification: ../docker/services/ceilometer-agent-notification.yaml
OS::TripleO::Services::Horizon: ../docker/services/horizon.yaml
+ OS::TripleO::Services::Iscsid: ../docker/services/iscsid.yaml
+ OS::TripleO::Services::Multipathd: ../docker/services/multipathd.yaml
+ # FIXME: Had to remove these to unblock containers CI. They should be put back when fixed.
+ # OS::TripleO::Services::CinderApi: ../docker/services/cinder-api.yaml
+ # OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
+ # OS::TripleO::Services::CinderBackup: ../docker/services/cinder-backup.yaml
+ # OS::TripleO::Services::CinderVolume: ../docker/services/cinder-volume.yaml
OS::TripleO::PostDeploySteps: ../docker/post.yaml
OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
- OS::TripleO::Services: ../docker/services/services.yaml
-
parameter_defaults:
# To specify a local docker registry, enable these
# where 192.168.24.1 is the host running docker-distribution
+# ********************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/enable-tls.yaml instead.
+# ********************************************************************************
# Use this environment to pass in certificates for SSL deployments.
# For these values to take effect, one of the tls-endpoints-*.yaml environments
# must also be used.
parameter_defaults:
+ HorizonSecureCookies: True
SSLCertificate: |
The contents of your certificate go here
SSLIntermediateCertificate: ''
--- /dev/null
+resource_registry:
+{% for role in roles %}
+ OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/host_config_and_reboot.yaml
+{% endfor %}
+
+#parameter_defaults:
+ # Note: There are no global parameters which can be applied to all roles as
+ # these configuration have to be specific to role.
+
+ # Sample parameters for Compute and ComputeOvsDpdk roles
+ #ComputeParameters:
+ #KernelArgs: ""
+ #TunedProfileName: ""
+ #HostIsolatedCoreList: ""
+ #ComputeOvsDpdkParameters:
+ #KernelArgs: ""
+ #TunedProfileName: ""
+ #HostIsolatedCoreList: ""
+++ /dev/null
-resource_registry:
-# Create the registry only for roles with the word "Compute" in it. Like ComputeOvsDpdk, ComputeSriov, etc.,
-{%- for role in roles -%}
-{% if "Compute" in role.name %}
- OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/{{role.name.lower()}}-host_config_and_reboot.yaml
-{%- endif -%}
-{% endfor %}
-
-#parameter_defaults:
- # Sample parameters for Compute and ComputeOvsDpdk roles
- #ComputeKernelArgs: ""
- #ComputeTunedProfileName: ""
- #ComputeHostCpuList: ""
- #ComputeOvsDpdkKernelArgs: ""
- #ComputeOvsDpdkTunedProfileName: ""
- #ComputeOvsDpdkHostCpuList: ""
- OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Docker
+ - OS::TripleO::Services::Iscsid
+# **************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/inject-trust-anchor-hiera.yaml
+# instead.
+# **************************************************************************************
parameter_defaults:
CAMap:
first-ca-name:
+# ********************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/inject-trust-anchor.yaml
+# instead.
+# ********************************************************************************
parameter_defaults:
SSLRootCertificate: |
The contents of your root CA certificate go here
--- /dev/null
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# Enable the creation of Neutron networks for isolated Overcloud
+# traffic and configure each role to assign ports (related
+# to that role) on these networks.
+# primary role is: {{primary_role_name}}
+resource_registry:
+ # networks as defined in network_data.yaml
+ {%- for network in networks if network.enabled|default(true) %}
+ OS::TripleO::Network::{{network.name}}: ../network/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- endfor %}
+
+ # Port assignments for the VIPs
+ {%- for network in networks if network.vip %}
+ OS::TripleO::Network::Ports::{{network.name}}VipPort: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- endfor %}
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+
+ OS::TripleO::{{primary_role_name}}::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+{%- for role in roles %}
+ # Port assignments for the {{role.name}}
+ {%- for network in networks %}
+ {%- if network.name in role.networks|default([]) and network.enabled|default(true) %}
+ OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+ {%- else %}
+ OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/noop.yaml
+ {%- endif %}
+ {%- endfor %}
+{%- endfor %}
+++ /dev/null
-# Enable the creation of Neutron networks for isolated Overcloud
-# traffic and configure each role to assign ports (related
-# to that role) on these networks.
-resource_registry:
- OS::TripleO::Network::External: ../network/external.yaml
- OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
- OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
- OS::TripleO::Network::Storage: ../network/storage.yaml
- OS::TripleO::Network::Tenant: ../network/tenant.yaml
- # Management network is optional and disabled by default.
- # To enable it, include environments/network-management.yaml
- #OS::TripleO::Network::Management: ../network/management.yaml
-
- # Port assignments for the VIPs
- OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
- OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
-
- # Port assignments for the controller role
- OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
- OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
- #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the compute role
- OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
- OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
- #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the ceph storage role
- OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
- OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the swift storage role
- OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
- # Port assignments for the block storage role
- OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
- OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
- OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
- OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
- OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
- #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable the Neutron MidoNet Services
+# description: |
+# A Heat environment that can be used to deploy MidoNet Services
+parameter_defaults:
+ # Native Transport Port
+ # Type: string
+ CassandraClientPort: 9042
+
+ # The port for the Thrift RPC service, which is used for client connections
+ # Type: string
+ CassandraClientPortThrift: 9160
+
+ # The SSL port for encrypted communication. Unused unless enabled in encryption_options
+ # Type: string
+ CassandraSslStoragePort: 7001
+
+ # The Cassandra port for inter-node communication
+ # Type: string
+ CassandraStoragePort: 7000
+
+ # Name of the tunnel zone used to tunnel packages
+ # Type: string
+ TunnelZoneName: tunnelzone_tripleo
+
+ # Type of the tunnels on the overlay. Choose between `gre` and `vxlan`
+ # Type: string
+ TunnelZoneType: vxlan
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # Whether enable Cassandra cluster on Controller
+ # Type: boolean
+ EnableCassandraOnController: True
+
+ # Whether enable Zookeeper cluster on Controller
+ # Type: boolean
+ EnableZookeeperOnController: True
+
+ # The core plugin for Neutron. The value should be the entrypoint to be loaded
+ # from neutron.core_plugins namespace.
+ # Type: string
+ NeutronCorePlugin: midonet.neutron.plugin_v1.MidonetPluginV2
+
+ # If True, DHCP provide metadata route to VM.
+ # Type: boolean
+ NeutronEnableIsolatedMetadata: True
+
+ # *********************
+ # End static parameters
+ # *********************
+resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../../net-config-linux-bridge.yaml
+ OS::TripleO::Services::ComputeNeutronCorePlugin: ../../puppet/services/neutron-compute-plugin-midonet.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+# ******************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/networking/neutron-midonet
+# instead.
+# ******************************************************************************
# A Heat environment that can be used to deploy MidoNet Services
resource_registry:
OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
--- /dev/null
+# A Heat environment file which can be used to enable OVN
+# extensions, configured via puppet
+resource_registry:
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2OVN
+ OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-ovn.yaml
+ OS::TripleO::Services::OVNDBs: ../puppet/services/pacemaker/ovn-dbs.yaml
+# Disabling Neutron services that overlap with OVN
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+
+parameter_defaults:
+ NeutronMechanismDrivers: ovn
+ OVNVifType: ovs
+ OVNNeutronSyncMode: log
+ OVNQosDriver: ovn-qos
+ OVNTunnelEncapType: geneve
+ NeutronEnableDHCPAgent: false
+ NeutronTypeDrivers: 'geneve,vxlan,vlan,flat'
+ NeutronNetworkType: 'geneve'
+ NeutronServicePlugins: 'qos,ovn-router'
+ NeutronVniRanges: ['1:65536', ]
--- /dev/null
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK
+resource_registry:
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
+ OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+
+parameter_defaults:
+ NeutronEnableForceMetadata: true
+ NeutronMechanismDrivers: 'opendaylight_v2'
+ NeutronServicePlugins: 'odl-router_v2'
+ NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+ ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+ ## This can be done using ComputeKernelArgs as shown below.
+ ComputeParameters:
+ #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+ ## due to CPU contention of DPDK PMD threads.
+ OvsEnableDpdk: True
+ ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+ #OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
+ # It is recommended to use the socket closest to the PCIe slot used for the
+ # desired DPDK NIC. Format should be comma separated per socket string such as:
+ # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+ #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+ #OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ # location to cores on socket, number of hyper-threaded logical cores, and
+ # desired number of PMD threads can all play a role in configuring this setting.
+ # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+ # If using hyperthreading then specify both logical cores that would equal the
+ # physical core. Also, specifying more than one core will trigger multiple PMD
+ # threads to be spawned, which may improve dataplane performance.
+ #NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
+ # on the same NUMA node(s) selected for previous settings.
-## A Heat environment that can be used to deploy DPDK with OVS
+# A Heat environment that can be used to deploy DPDK with OVS
+# Deploying DPDK requires enabling hugepages for the overcloud nodes
resource_registry:
OS::TripleO::Services::ComputeNeutronOvsAgent: ../puppet/services/neutron-ovs-dpdk-agent.yaml
parameter_defaults:
- ## NeutronDpdkCoreList and NeutronDpdkMemoryChannels are REQUIRED settings.
- ## Attempting to deploy DPDK without appropriate values will cause deployment to fail or lead to unstable deployments.
- #NeutronDpdkCoreList: ""
- #NeutronDpdkMemoryChannels: ""
-
NeutronDatapathType: "netdev"
NeutronVhostuserSocketDir: "/var/lib/vhost_sockets"
-
- #NeutronDpdkSocketMemory: ""
- #NeutronDpdkDriverType: "vfio-pci"
- #NovaReservedHostMemory: 4096
- #NovaVcpuPinSet: ""
-
+ NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+ ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+ ## This can be done using ComputeKernelArgs as shown below.
+ #ComputeParameters:
+ #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+ ## due to CPU contention of DPDK PMD threads.
+ ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+ #OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
+ # It is recommended to use the socket closest to the PCIe slot used for the
+ # desired DPDK NIC. Format should be comma separated per socket string such as:
+ # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+ #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+ #OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ # location to cores on socket, number of hyper-threaded logical cores, and
+ # desired number of PMD threads can all play a role in configuring this setting.
+ # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+ # If using hyperthreading then specify both logical cores that would equal the
+ # physical core. Also, specifying more than one core will trigger multiple PMD
+ # threads to be spawned, which may improve dataplane performance.
+ #NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
+ # on the same NUMA node(s) selected for previous settings.
--- /dev/null
+# An environment which creates an Overcloud without the use of pacemaker
+# (i.e. only with keepalived and systemd for all resources)
+resource_registry:
+ OS::TripleO::Tasks::ControllerPreConfig: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostConfig: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: OS::Heat::None
+
+ OS::TripleO::Services::CinderVolume: ../puppet/services/cinder-volume.yaml
+ OS::TripleO::Services::RabbitMQ: ../puppet/services/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../puppet/services/haproxy.yaml
+ OS::TripleO::Services::Redis: ../puppet/services/database/redis.yaml
+ OS::TripleO::Services::MySQL: ../puppet/services/database/mysql.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+ OS::TripleO::Services::Pacemaker: OS::Heat::None
+ OS::TripleO::Services::PacemakerRemote: OS::Heat::None
+
--- /dev/null
+resource_registry:
+ OS::TripleO::AllNodes::SoftwareConfig: OS::Heat::None
+ OS::TripleO::PostDeploySteps: OS::Heat::None
+ OS::TripleO::DefaultPasswords: OS::Heat::None
+ OS::TripleO::RandomString: OS::Heat::None
+ OS::TripleO::AllNodesDeployment: OS::Heat::None
+
+parameter_defaults:
+ # Deploy no services
+{% for role in roles %}
+ {{role.name}}Services: []
+{% endfor %}
+
+ # Consistent Hostname format
+ ControllerHostnameFormat: overcloud-controller-%index%
+ ComputeHostnameFormat: overcloud-novacompute-%index%
+ ObjectStorageHostnameFormat: overcloud-objectstorage-%index%
+ CephStorageHostnameFormat: overcloud-cephstorage-%index%
+ BlockStorageHostnameFormat: overcloud-blockstorage-%index%
--- /dev/null
+parameter_defaults:
+ # Consistent Hostname format
+ ControllerDeployedServerHostnameFormat: overcloud-controller-%index%
+ ComputeDeployedServerHostnameFormat: overcloud-novacompute-%index%
+ ObjectStorageDeployedServerHostnameFormat: overcloud-objectstorage-%index%
+ CephStorageDeployedServerHostnameFormat: overcloud-cephstorage-%index%
+ BlockStorageDeployedServerHostnameFormat: overcloud-blockstorage-%index%
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Custom Hostnames
+# description: |
+# Hostname format for each role
+# Note %index% is translated into the index of the node, e.g 0/1/2 etc
+# and %stackname% is replaced with OS::stack_name in the template below.
+# If you want to use the heat generated names, pass '' (empty string).
+parameter_defaults:
+ # Format for BlockStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ BlockStorageHostnameFormat: '%stackname%-blockstorage-%index%'
+
+ # Format for CephStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ CephStorageHostnameFormat: '%stackname%-cephstorage-%index%'
+
+ # Format for Compute node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ ComputeHostnameFormat: '%stackname%-novacompute-%index%'
+
+ # Format for Controller node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ ControllerHostnameFormat: '%stackname%-controller-%index%'
+
+ # Format for ObjectStorage node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Type: string
+ ObjectStorageHostnameFormat: '%stackname%-objectstorage-%index%'
+
+# ******************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/storage/ceph-external.yaml
+# instead.
+# ******************************************************************************
# A Heat environment file which can be used to enable the
# use of an externally managed Ceph cluster.
resource_registry:
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+ OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
--- /dev/null
+
+resource_registry:
+ OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
resource_registry:
OS::TripleO::Services::UndercloudCeilometerAgentCentral: ../../docker/services/ceilometer-agent-central.yaml
OS::TripleO::Services::UndercloudCeilometerAgentNotification: ../../docker/services/ceilometer-agent-notification.yaml
+ OS::TripleO::Services::UndercloudCeilometerAgentIpmi: ../../docker/services/ceilometer-agent-ipmi.yaml
resource_registry:
OS::TripleO::Services::IronicApi: ../../puppet/services/ironic-api.yaml
OS::TripleO::Services::IronicConductor: ../../puppet/services/ironic-conductor.yaml
- OS::TripleO::Services::IronicPxe: ../../puppet/services/ironic-pxe.yaml
OS::TripleO::Services::NovaIronic: ../../puppet/services/nova-ironic.yaml
+parameter_defaults:
+ NovaSchedulerDiscoverHostsInCellsInterval: 15
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable SSL on OpenStack Public Endpoints
+# description: |
+# Use this environment to pass in certificates for SSL deployments.
+# For these values to take effect, one of the tls-endpoints-*.yaml environments
+# must also be used.
+parameter_defaults:
+ # The content of the SSL certificate (without Key) in PEM format.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ SSLCertificate: |
+ The contents of your certificate go here
+
+ # The content of an SSL intermediate CA certificate in PEM format.
+ # Type: string
+ SSLIntermediateCertificate: ''
+
+ # The content of the SSL Key in PEM format.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ SSLKey: |
+ The contents of the private key go here
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # The filepath of the certificate as it will be stored in the controller.
+ # Type: string
+ DeployedSSLCertificatePath: /etc/pki/tls/private/overcloud_endpoint.pem
+
+ # *********************
+ # End static parameters
+ # *********************
+resource_registry:
+ OS::TripleO::NodeTLSData: ../../puppet/extraconfig/tls/tls-cert-inject.yaml
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Inject SSL Trust Anchor on Overcloud Nodes
+# description: |
+# When using an SSL certificate signed by a CA that is not in the default
+# list of CAs, this environment allows adding a custom CA certificate to
+# the overcloud nodes.
+parameter_defaults:
+ # Map containing the CA certs and information needed for deploying them.
+ # Type: json
+ CAMap:
+ first-ca-name:
+ content: |
+ The content of the CA cert goes here
+ second-ca-name:
+ content: |
+ The content of the CA cert goes here
+
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Inject SSL Trust Anchor on Overcloud Nodes
+# description: |
+# When using an SSL certificate signed by a CA that is not in the default
+# list of CAs, this environment allows adding a custom CA certificate to
+# the overcloud nodes.
+parameter_defaults:
+ # The content of a CA's SSL certificate file in PEM format. This is evaluated on the client side.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ SSLRootCertificate: |
+ The contents of your certificate go here
+
+resource_registry:
+ OS::TripleO::NodeTLSCAData: ../../puppet/extraconfig/tls/ca-inject.yaml
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Public SSL Endpoints as DNS Names
+# description: |
+# Use this environment when deploying an SSL-enabled overcloud where the public
+# endpoint is a DNS name.
+parameter_defaults:
+ # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+ # Type: json
+ EndpointMap:
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Public SSL Endpoints as IP Addresses
+# description: |
+# Use this environment when deploying an SSL-enabled overcloud where the public
+# endpoint is an IP address.
+parameter_defaults:
+ # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+ # Type: json
+ EndpointMap:
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'}
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'}
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'IP_ADDRESS'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'IP_ADDRESS'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'IP_ADDRESS'}
+ IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'IP_ADDRESS'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'IP_ADDRESS'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'IP_ADDRESS'}
+ ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'IP_ADDRESS'}
+ MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'IP_ADDRESS'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
+
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy All SSL Endpoints as DNS Names
+# description: |
+# Use this environment when deploying an overcloud where all the endpoints are
+# DNS names and there's TLS in all endpoint types.
+parameter_defaults:
+ # Mapping of service endpoint -> protocol. Typically set via parameter_defaults in the resource registry.
+ # Type: json
+ EndpointMap:
+ AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatInternal: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnInternal: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
+ KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaInternal: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MistralAdmin: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+ MistralInternal: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'CLOUDNAME'}
+ NeutronAdmin: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronInternal: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerInternal: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable the Cinder NetApp Backend
+# description: |
+# A Heat environment file which can be used to enable a
+# a Cinder NetApp backend, configured via puppet
+parameter_defaults:
+ #
+ # Type: string
+ CinderNetappBackendName: tripleo_netapp
+
+ #
+ # Type: string
+ CinderNetappControllerIps: ''
+
+ #
+ # Type: string
+ CinderNetappCopyOffloadToolPath: ''
+
+ #
+ # Type: string
+ CinderNetappEseriesHostType: linux_dm_mp
+
+ #
+ # Type: string
+ CinderNetappHostType: ''
+
+ #
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CinderNetappLogin: <None>
+
+ #
+ # Type: string
+ CinderNetappNfsMountOptions: ''
+
+ #
+ # Type: string
+ CinderNetappNfsShares: ''
+
+ #
+ # Type: string
+ CinderNetappNfsSharesConfig: /etc/cinder/shares.conf
+
+ #
+ # Type: string
+ CinderNetappPartnerBackendName: ''
+
+ #
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CinderNetappPassword: <None>
+
+ #
+ # Type: string
+ CinderNetappSaPassword: ''
+
+ #
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CinderNetappServerHostname: <None>
+
+ #
+ # Type: string
+ CinderNetappServerPort: 80
+
+ #
+ # Type: string
+ CinderNetappSizeMultiplier: 1.2
+
+ #
+ # Type: string
+ CinderNetappStorageFamily: ontap_cluster
+
+ #
+ # Type: string
+ CinderNetappStoragePools: ''
+
+ #
+ # Type: string
+ CinderNetappStorageProtocol: nfs
+
+ #
+ # Type: string
+ CinderNetappTransportType: http
+
+ #
+ # Type: string
+ CinderNetappVfiler: ''
+
+ #
+ # Type: string
+ CinderNetappVolumeList: ''
+
+ #
+ # Type: string
+ CinderNetappVserver: ''
+
+ #
+ # Type: string
+ CinderNetappWebservicePath: /devmgr/v2
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ #
+ # Type: boolean
+ CinderEnableNetappBackend: True
+
+ # *********************
+ # End static parameters
+ # *********************
+resource_registry:
+ OS::TripleO::ControllerExtraConfigPre: ../../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Cinder NFS Backend
+# description: |
+# Configure and include this environment to enable the use of an NFS
+# share as the backend for Cinder.
+parameter_defaults:
+ # Whether to enable or not the Iscsi backend for Cinder
+ # Type: boolean
+ CinderEnableIscsiBackend: False
+
+ # Whether to enable or not the NFS backend for Cinder
+ # Type: boolean
+ CinderEnableNfsBackend: True
+
+ # Mount options for NFS mounts used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true.
+ # Type: string
+ CinderNfsMountOptions: ''
+
+ # NFS servers used by Cinder NFS backend. Effective when CinderEnableNfsBackend is true.
+ # Type: comma_delimited_list
+ CinderNfsServers: 192.168.122.1:/export/cinder
+
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Ceph Storage Backend
+# description: |
+# Include this environment to enable Ceph as the backend for
+# Cinder, Nova, Gnocchi, and Glance.
+parameter_defaults:
+ # The short name of the Cinder Backup backend to use.
+ # Type: string
+ CinderBackupBackend: rbd
+
+ # Whether to enable or not the Iscsi backend for Cinder
+ # Type: boolean
+ CinderEnableIscsiBackend: False
+
+ # Whether to enable or not the Rbd backend for Cinder
+ # Type: boolean
+ CinderEnableRbdBackend: True
+
+ # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GlanceBackend: rbd
+
+ # The short name of the Gnocchi backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GnocchiBackend: rbd
+
+ # Whether to enable or not the Rbd backend for Nova
+ # Type: boolean
+ NovaEnableRbdBackend: True
+
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Deploy Using an External Ceph Cluster
+# description: |
+# A Heat environment file which can be used to enable the
+# use of an externally managed Ceph cluster.
+parameter_defaults:
+ # The Ceph admin client key. Can be created with ceph-authtool --gen-print-key.
+ # Type: string
+ CephAdminKey: ''
+
+ # The Ceph client key. Can be created with ceph-authtool --gen-print-key. Currently only used for external Ceph deployments to create the openstack user keyring.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CephClientKey: <None>
+
+ #
+ # Type: string
+ CephClientUserName: openstack
+
+ # The Ceph cluster FSID. Must be a UUID.
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ CephClusterFSID: <None>
+
+ # List of externally managed Ceph Mon Host IPs. Only used for external Ceph deployments.
+ # Type: string
+ CephExternalMonHost: ''
+
+ # Whether to enable or not the Iscsi backend for Cinder
+ # Type: boolean
+ CinderEnableIscsiBackend: False
+
+ # Whether to enable or not the Rbd backend for Cinder
+ # Type: boolean
+ CinderEnableRbdBackend: True
+
+ #
+ # Type: string
+ CinderRbdPoolName: volumes
+
+ # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GlanceBackend: rbd
+
+ #
+ # Type: string
+ GlanceRbdPoolName: images
+
+ # The short name of the Gnocchi backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GnocchiBackend: rbd
+
+ #
+ # Type: string
+ GnocchiRbdPoolName: metrics
+
+ # Whether to enable or not the Rbd backend for Nova
+ # Type: boolean
+ NovaEnableRbdBackend: True
+
+ #
+ # Type: string
+ NovaRbdPoolName: vms
+
+ # The default features enabled when creating a block device image. Only applies to format 2 images. Set to '1' for Jewel clients using older Ceph servers.
+ # Type: string
+ RbdDefaultFeatures: ''
+
+resource_registry:
+ OS::TripleO::Services::CephClient: OS::Heat::None
+ OS::TripleO::Services::CephExternal: ../../puppet/services/ceph-external.yaml
+ OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephOSD: OS::Heat::None
--- /dev/null
+# *******************************************************************
+# This file was created automatically by the sample environment
+# generator. Developers should use `tox -e genconfig` to update it.
+# Users are recommended to make changes to a copy of the file instead
+# of the original, if any customizations are needed.
+# *******************************************************************
+# title: Enable Glance NFS Backend
+# description: |
+# Configure and include this environment to enable the use of an NFS
+# share as the backend for Glance.
+parameter_defaults:
+ # NFS mount options for image storage (when GlanceNfsEnabled is true)
+ # Type: string
+ GlanceNfsOptions: intr,context=system_u:object_r:glance_var_lib_t:s0
+
+ # NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ # Type: string
+ GlanceNfsShare: ''
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # Type: string
+ GlanceBackend: file
+
+ # When using GlanceBackend 'file', mount NFS share for image storage.
+ # Type: boolean
+ GlanceNfsEnabled: True
+
+ # *********************
+ # End static parameters
+ # *********************
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/tls-endpoints-public-dns.yaml
+# instead.
+# *************************************************************************************
# Use this environment when deploying an SSL-enabled overcloud where the public
# endpoint is a DNS name.
parameter_defaults:
OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
- PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
- PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
- PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+# *************************************************************************************
+# DEPRECATED: Use tripleo-heat-templates/environments/ssl/tls-endpoints-public-ip.yaml
+# instead.
+# *************************************************************************************
# Use this environment when deploying an SSL-enabled overcloud where the public
# endpoint is an IP address.
parameter_defaults:
OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
- PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
- PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
- PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+ PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13977', host: 'IP_ADDRESS'}
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
- IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
- IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorInternal: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
- PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
- PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
- PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
+ PankoInternal: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
+ PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'}
SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
HeatConvergenceEngine: false
HeatMaxResourcesPerStack: -1
HeatMaxJsonBodySize: 2097152
+ IronicInspectorInterface: br-ctlplane
+ IronicInspectorIpRange: '192.168.24.100,192.168.24.200'
lineinfile:
dest: /etc/tuned/cpu-partitioning-variables.conf
regexp: '^isolated_cores=.*'
- line: 'isolated_cores={{ _HOST_CPUS_LIST_ }}'
- when: _HOST_CPUS_LIST_|default("") != ""
+ line: 'isolated_cores={{ _TUNED_CORES_ }}'
+ when: _TUNED_CORES_|default("") != ""
- - name: Tune-d provile activation
+ - name: Tune-d profile activation
shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
become: true
when: _TUNED_PROFILE_NAME_|default("") != ""
when:
- item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') != "lo"
# This condition will list all the interfaces except the one with valid IP (which is Provisioning network at this stage)
- # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4']['address'] is undefined
- - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4']['address'] is undefined
+ # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4'] is undefined
+ - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4'] is undefined
with_items:
- "{{ ifcfg_files.files }}"
parameters:
server:
type: string
+ # Deprecated Parameters, these configuration are deprecated in favor or role-specific parameters.
+ # Use: extraconfig/pre_network/host_config_and_reboot.yaml.
+ # Deprecated in Pike and will be removed in Queens.
{{role}}KernelArgs:
type: string
default: ""
type: string
default: ""
+parameter_group:
+ - label: deprecated
+ parameters:
+ - {{role}}KernelArgs
+ - {{role}}TunedProfileName
+ - {{role}}HostCpusList
+
conditions:
param_exists:
or:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ All configurations which require reboot should be initiated via PreNetworkConfig. After
+ this configuration is completed, the corresponding node will be rebooted.
+
+parameters:
+ server:
+ type: string
+ RoleParameters:
+ type: json
+ description: Role Specific parameters
+ default: {}
+ ServiceNames:
+ type: comma_delimited_list
+ default: []
+ IsolCpusList:
+ default: "0"
+ description: List of cores to be isolated by tuned
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]+"
+ OvsEnableDpdk:
+ default: false
+ description: Whether or not to configure enable DPDK in OVS
+ type: boolean
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
+ description: >
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]+"
+ default: '0'
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
+
+conditions:
+ is_host_config_required: {not: {equals: [{get_param: [RoleParameters, KernelArgs]}, ""]}}
+ # YAQL is enabled in conditions with https://review.openstack.org/#/c/467506/
+ is_dpdk_config_required:
+ or:
+ - yaql:
+ expression: $.data.service_names.contains('neutron_ovs_dpdk_agent')
+ data:
+ service_names: {get_param: ServiceNames}
+ - {get_param: OvsEnableDpdk}
+ - {get_param: [RoleParameters, OvsEnableDpdk]}
+ is_reboot_config_required:
+ or:
+ - is_host_config_required
+ - is_dpdk_config_required
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+ isol_cpus_empty: {equals: [{get_param: IsolCpusList}, '0']}
+
+resources:
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - IsolCpusList: IsolCpusList
+ OvsDpdkCoreList: OvsDpdkCoreList
+ OvsDpdkMemoryChannels: OvsDpdkMemoryChannels
+ OvsDpdkSocketMemory: OvsDpdkSocketMemory
+ OvsDpdkDriverType: OvsDpdkDriverType
+ OvsPmdCoreList: OvsDpdkCoreList
+ - values: {get_param: [RoleParameters]}
+ - values:
+ IsolCpusList: {if: [isol_cpus_empty, {get_param: HostCpusList}, {get_param: IsolCpusList}]}
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+ HostParametersConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: is_host_config_required
+ properties:
+ group: ansible
+ inputs:
+ - name: _KERNEL_ARGS_
+ - name: _TUNED_PROFILE_NAME_
+ - name: _TUNED_CORES_
+ outputs:
+ - name: result
+ config:
+ get_file: ansible_host_config.yaml
+
+ HostParametersDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: is_host_config_required
+ properties:
+ name: HostParametersDeployment
+ server: {get_param: server}
+ config: {get_resource: HostParametersConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ input_values:
+ _KERNEL_ARGS_: {get_param: [RoleParameters, KernelArgs]}
+ _TUNED_PROFILE_NAME_: {get_param: [RoleParameters, TunedProfileName]}
+ _TUNED_CORES_: {get_param: [RoleParameters, IsolCpusList]}
+
+ EnableDpdkConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: is_dpdk_config_required
+ properties:
+ group: script
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+ set -x
+ # DO NOT use --detailed-exitcodes
+ puppet apply --logdest console \
+ --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ -e '
+ class {"vswitch::dpdk":
+ host_core_list => "$HOST_CORES",
+ pmd_core_list => "$PMD_CORES",
+ memory_channels => "$MEMORY_CHANNELS",
+ socket_mem => "$SOCKET_MEMORY",
+ }
+ '
+ params:
+ $HOST_CORES: {get_attr: [RoleParametersValue, value, OvsDpdkCoreList]}
+ $PMD_CORES: {get_attr: [RoleParametersValue, value, OvsPmdCoreList]}
+ $MEMORY_CHANNELS: {get_attr: [RoleParametersValue, value, OvsDpdkMemoryChannels]}
+ $SOCKET_MEMORY: {get_attr: [RoleParametersValue, value, OvsDpdkSocketMemory]}
+
+ EnableDpdkDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: is_dpdk_config_required
+ properties:
+ name: EnableDpdkDeployment
+ server: {get_param: server}
+ config: {get_resource: EnableDpdkConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+
+ RebootConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: is_reboot_config_required
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ # Stop os-collect-config to avoid any race collecting another
+ # deployment before reboot happens
+ systemctl stop os-collect-config.service
+ /sbin/reboot
+
+ RebootDeployment:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: HostParametersDeployment
+ condition: is_reboot_config_required
+ properties:
+ name: RebootDeployment
+ server: {get_param: server}
+ config: {get_resource: RebootConfig}
+ actions: ['CREATE'] # Only do this on CREATE
+ signal_transport: NO_SIGNAL
+
+outputs:
+ result:
+ condition: is_host_config_required
+ value:
+ get_attr: [HostParametersDeployment, result]
+ stdout:
+ condition: is_host_config_required
+ value:
+ get_attr: [HostParametersDeployment, deploy_stdout]
+ stderr:
+ condition: is_host_config_required
+ value:
+ get_attr: [HostParametersDeployment, deploy_stderr]
+ status_code:
+ condition: is_host_config_required
+ value:
+ get_attr: [HostParametersDeployment, deploy_status_code]
}
function is_bootstrap_node {
- if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid | tr '[:upper:]' '[:lower:]')" = "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]; then
log_debug "Node is bootstrap"
echo "true"
fi
resources:
-{%- for role in roles -%}
-{% if "controller" in role.tags %}
+{%- for role in roles %}
+ {%- if 'controller' in role.tags %}
{{role.name}}PostPuppetMaintenanceModeConfig:
type: OS::Heat::SoftwareConfig
properties:
properties:
servers: {get_param: [servers, {{role.name}}]}
input_values: {get_param: input_values}
-{%- endif -%}
-{% endfor %}
+ {%- endif %}
+{%- endfor %}
# of packages to update (the check for -z "$update_identifier" guarantees that this
# is run only on overcloud stack update -i)
if [[ "$pacemaker_status" == "active" && \
- "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \
+ "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name | tr '[:upper:]' '[:lower:]')" == "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]] ; then \
# OCF scripts don't cope with -eu
echo "Verifying if we need to fix up any IPv6 VIPs"
set +eu
parameters:
BondInterfaceOvsOptions:
default: ''
- description: 'The ovs_options string for the bond interface. Set things like
-
- lacp=active and/or bond_mode=balance-slb using this option.
-
- '
+ description: The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
- description: 'The balance-tcp bond mode is known to cause packet loss and
-
+ description: The balance-tcp bond mode is known to cause packet loss and
should not be used in BondInterfaceOvsOptions.
-
- '
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
ExternalNetworkVlanID:
default: 10
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: bond_mode=active-backup
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: bond_mode=active-backup
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
- description: 'The balance-tcp bond mode is known to cause packet loss and
-
+ description: The balance-tcp bond mode is known to cause packet loss and
should not be used in BondInterfaceOvsOptions.
-
- '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
net_param: Public
Admin:
net_param: PankoApi
- port: 8779
+ port: 8977
Cinder:
Internal:
OctaviaAdmin: {protocol: http, port: '9876', host: IP_ADDRESS}
OctaviaInternal: {protocol: http, port: '9876', host: IP_ADDRESS}
OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
- PankoAdmin: {protocol: http, port: '8779', host: IP_ADDRESS}
- PankoInternal: {protocol: http, port: '8779', host: IP_ADDRESS}
- PankoPublic: {protocol: http, port: '8779', host: IP_ADDRESS}
+ PankoAdmin: {protocol: http, port: '8977', host: IP_ADDRESS}
+ PankoInternal: {protocol: http, port: '8977', host: IP_ADDRESS}
+ PankoPublic: {protocol: http, port: '8977', host: IP_ADDRESS}
SaharaAdmin: {protocol: http, port: '8386', host: IP_ADDRESS}
SaharaInternal: {protocol: http, port: '8386', host: IP_ADDRESS}
SaharaPublic: {protocol: http, port: '8386', host: IP_ADDRESS}
resources:
VipPort:
- type: OS::Neutron::Port
+ type: OS::TripleO::Network::Ports::ControlPlaneVipPort
properties:
network: {get_param: ControlPlaneNetwork}
name: {get_param: PortName}
SERVICE: {get_attr: [EnabledServicesValue, value]}
- values: {get_param: ServiceNetMap}
- values: {get_attr: [NetIpMapValue, value]}
+ ctlplane_service_ips:
+ description: >
+ Map of enabled services to a list of their ctlplane IP addresses
+ value:
+ yaql:
+ expression: dict($.data.map.items().where(len($[1]) > 0))
+ data:
+ map:
+ map_merge:
+ repeat:
+ template:
+ SERVICE_ctlplane_node_ips: {get_param: ControlPlaneIpList}
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
service_hostnames:
description: >
Map of enabled services to a list of hostnames where they're running
}
if [ -n '$network_config' ]; then
- if [ -z "${disable_configure_safe_defaults:-''}" ]; then
+ if [ -z "${disable_configure_safe_defaults:-}" ]; then
trap configure_safe_defaults EXIT
fi
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
CongressApiNetwork: internal_api
- GlanceApiNetwork: storage
+ GlanceApiNetwork: internal_api
IronicApiNetwork: ctlplane
IronicNetwork: ctlplane
IronicInspectorNetwork: ctlplane
OS::TripleO::PostDeploySteps: puppet/post.yaml
OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
+ OS::TripleO::AllNodesDeployment: OS::Heat::StructuredDeployments
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
+ OS::TripleO::RandomString: OS::Heat::RandomString
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml
# services
- OS::TripleO::Services: puppet/services/services.yaml
+ OS::TripleO::Services: services.yaml
OS::TripleO::Services::Apache: puppet/services/apache.yaml
OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
OS::TripleO::Services::CephMds: OS::Heat::None
# Undercloud Telemetry services
OS::TripleO::Services::UndercloudCeilometerAgentCentral: OS::Heat::None
OS::TripleO::Services::UndercloudCeilometerAgentNotification: OS::Heat::None
+ OS::TripleO::Services::UndercloudCeilometerAgentIpmi: OS::Heat::None
#Gnocchi services
OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
OS::TripleO::Services::MistralExecutor: OS::Heat::None
OS::TripleO::Services::IronicApi: OS::Heat::None
OS::TripleO::Services::IronicConductor: OS::Heat::None
+ OS::TripleO::Services::IronicInspector: OS::Heat::None
OS::TripleO::Services::NovaIronic: OS::Heat::None
OS::TripleO::Services::TripleoPackages: puppet/services/tripleo-packages.yaml
OS::TripleO::Services::TripleoFirewall: puppet/services/tripleo-firewall.yaml
OS::TripleO::Services::NeutronVppAgent: OS::Heat::None
OS::TripleO::Services::Docker: OS::Heat::None
OS::TripleO::Services::CertmongerUser: OS::Heat::None
+ OS::TripleO::Services::Iscsid: OS::Heat::None
parameter_defaults:
EnablePackageInstall: false
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
ServerMetadata:
default: {}
description: >
HOST: {get_param: CloudNameStorageManagement}
HeatAuthEncryptionKey:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
PcsdPassword:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 16
HorizonSecret:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 10
servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}AllNodesDeployment:
- type: OS::Heat::StructuredDeployments
+ type: OS::TripleO::AllNodesDeployment
depends_on:
{% for role_inner in roles %}
- {{role_inner.name}}HostsDeployment
MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
+ RoleParameters: {get_param: {{role.name}}Parameters}
{% endfor %}
{% for role in roles %}
UpdateIdentifier: {get_param: UpdateIdentifier}
MysqlRootPassword:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 10
RabbitCookie:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 20
salt: {get_param: RabbitCookieSalt}
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
+ stack_name: {get_param: 'OS::stack_name'}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ ctlplane_service_ips:
+ # Note (shardy) this somewhat complex yaql may be replaced
+ # with a map_deep_merge function in ocata. It merges the
+ # list of maps, but appends to colliding lists when a service
+ # is deployed on more than one role
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, ctlplane_service_ips]}
+{% endfor %}
role_data:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{% endfor %}
+ ServerOsCollectConfigData:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{% endfor %}
+ ServerOsCollectConfigData:
+ description: The os-collect-config configuration associated with each server resource
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+ VipMap:
+ description: Mapping of each network to VIP addresses. Also includes the Redis VIP.
+ value:
+ map_merge:
+ - {get_attr: [VipMap, net_ip_map]}
+ - redis: {get_attr: [RedisVirtualIP, ip_address]}
--- /dev/null
+=================================
+Samples for plan-environment.yaml
+=================================
+
+The ``plan-environment.yaml`` file provides the details of the plan to be
+deployed by TripleO. Along with the details of the heat environments and
+parameters, it is also possible to provide workflow specific parameters to the
+TripleO mistral workflows. A new section ``workflow_parameters`` has been
+added to provide workflow specific parameters. This provides a clear
+separation of heat environment parameters and the workflow only parameters.
+These customized plan environment files can be provided as with ``-p`` option
+to the ``openstack overcloud deploy`` and ``openstack overcloud plan create``
+commands. The sample format to provide the workflow specific parameters::
+
+ workflow_parameters:
+ tripleo.derive_params.v1.derive_parameters:
+ # DPDK Parameters
+ number_of_pmd_cpu_threads_per_numa_node: 2
+
+
+All the parameters specified under the workflow name will be passed as
+``user_input`` to the workflow, while invoking from the tripleoclient.
\ No newline at end of file
--- /dev/null
+version: 1.0
+
+name: overcloud
+description: >
+ Default Deployment plan
+template: overcloud.yaml
+environments:
+ - path: overcloud-resource-registry-puppet.yaml
+workflow_parameters:
+ tripleo.derive_params.v1.derive_parameters:
+ ######### DPDK Parameters #########
+ # Specifices the minimum number of CPU threads to be allocated for DPDK
+ # PMD threads. The actual allocation will be based on network config, if
+ # the a DPDK port is associated with a numa node, then this configuration
+ # will be used, else 0.
+ number_of_pmd_cpu_threads_per_numa_node: 4
+ # Amount of memory to be configured as huge pages in percentage. Ouf the
+ # total available memory (excluding the NovaReservedHostMemory), the
+ # specified percentage of the remaining is configured as huge pages.
+ huge_page_allocation_percentage: 90
+ ######### HCI Parameters #########
+ hci_profile: default
+ hci_profile_config:
+ default:
+ average_guest_memory_size_in_mb: 2048
+ average_guest_cpu_utilization_percentage: 50
+ many_small_vms:
+ average_guest_memory_size_in_mb: 1024
+ average_guest_cpu_utilization_percentage: 20
+ few_large_vms:
+ average_guest_memory_size_in_mb: 4096
+ average_guest_cpu_utilization_percentage: 80
+ nfv_default:
+ average_guest_memory_size_in_mb: 8192
+ average_guest_cpu_utilization_percentage: 90
type: string
cloud_name_ctlplane:
type: string
- # FIXME(shardy) this can be comma_delimited_list when
- # https://bugs.launchpad.net/heat/+bug/1617019 is fixed
enabled_services:
- type: string
+ type: comma_delimited_list
controller_ips:
type: comma_delimited_list
logging_groups:
map_merge:
- tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
- tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
- - enabled_services: {get_param: enabled_services}
+ - enabled_services:
+ yaql:
+ expression: $.data.distinct()
+ data: {get_param: enabled_services}
# This writes out a mapping of service_name_enabled: 'true'
# For any services not enabled, hiera foo_enabled will
# return nil, as it's undefined
# https://bugs.launchpad.net/heat/+bug/1617203
SERVICE_enabled: 'true'
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
# Dynamically generate per-service network data
# This works as follows (outer->inner functions)
# yaql - filters services where no mapping exists in ServiceNetMap
template:
SERVICE_network: SERVICE_network
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
- values: {get_param: ServiceNetMap}
# Keystone doesn't provide separate entries for the public
# and admin endpoints, so we need to add them here manually
template:
SERVICE_vip: SERVICE_network
for_each:
- SERVICE:
- str_split: [',', {get_param: enabled_services}]
+ SERVICE: {get_param: enabled_services}
- values: {get_param: ServiceNetMap}
- values: {get_param: NetVipMap}
- keystone_admin_api_vip:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
BlockStorageServerMetadata:
default: {}
description: >
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
BlockStorage:
- {get_param: BlockStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
type: OS::TripleO::BlockStorage::PreNetworkConfig
properties:
server: {get_resource: BlockStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [BlockStorage, os_collect_config]}
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
CephStorageServerMetadata:
default: {}
description: >
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
CephStorage:
- {get_param: CephStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
type: OS::TripleO::CephStorage::PreNetworkConfig
properties:
server: {get_resource: CephStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [CephStorage, os_collect_config]}
type: string
NeutronPublicInterface:
default: nic1
- description: A port to add to the NeutronPhysicalBridge.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
NodeIndex:
type: number
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
NovaComputeServerMetadata:
default: {}
description: >
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
server_not_blacklisted:
not:
equals:
- {get_param: NovaComputeServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
type: OS::TripleO::Compute::PreNetworkConfig
properties:
server: {get_resource: NovaCompute}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
NovaComputeDeployment:
type: OS::TripleO::SoftwareDeployment
value:
{get_resource: NovaCompute}
condition: server_not_blacklisted
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [NovaCompute, os_collect_config]}
type: string
constraints:
- custom_constraint: nova.keypair
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing external networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
ControllerServerMetadata:
default: {}
description: >
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
parameter_groups:
- label: deprecated
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
-
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
- {get_param: ControllerServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
type: OS::TripleO::Controller::PreNetworkConfig
properties:
server: {get_resource: Controller}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
- {get_param: NetworkDeploymentActions}
- []
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
# Resource for site-specific injection of root certificate
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Hook for site-specific additional pre-deployment config, e.g extra hieradata
ControllerExtraConfigPre:
tls_cert_modulus_md5:
description: MD5 checksum of the TLS Certificate Modulus
value: {get_attr: [NodeTLSData, cert_modulus_md5]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [Controller, os_collect_config]}
for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
curl --globoff -o $TMP_DATA/file_data "$URL"
if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
- yum install -y $TMP_DATA/file_data
+ mv $TMP_DATA/file_data $TMP_DATA/file_data.rpm
+ yum install -y $TMP_DATA/file_data.rpm
+ rm $TMP_DATA/file_data.rpm
elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
pushd /
tar xvzf $TMP_DATA/file_data
popd
else
- echo "ERROR: Unsupported file format."
+ echo "ERROR: Unsupported file format: $URL"
exit 1
fi
- rm $TMP_DATA/file_data
+ if [ -f $TMP_DATA/file_data ]; then
+ rm $TMP_DATA/file_data
+ fi
done
else
echo "No artifact_urls was set. Skipping..."
parameters:
servers:
type: json
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
{%- endfor %}
properties:
servers: {get_param: servers}
+ stack_name: {get_param: stack_name}
role_data: {get_param: role_data}
outputs:
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
SwiftStorageServerMetadata:
default: {}
description: >
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ default: {}
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
- {get_param: SwiftStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
type: OS::TripleO::ObjectStorage::PreNetworkConfig
properties:
server: {get_resource: SwiftStorage}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
SwiftStorageHieraDeploy:
type: OS::Heat::StructuredDeployment
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [SwiftStorage, os_collect_config]}
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ ctlplane_service_ips:
+ type: json
-resources:
{% include 'puppet-steps.j2' %}
+{% set deploy_steps_max = 6 %}
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
+
+resources:
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
{% for role in roles %}
StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
# Step through a series of configuration steps
-{% for step in range(1, 6) %}
+{% for step in range(1, deploy_steps_max) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
{% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
{% endfor %}
- {% endif %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
update_identifier: {get_param: DeployIdentifier}
{% endfor %}
+ # Note, this should be the last step to execute configuration changes.
+ # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+ # after all the previous deployment steps.
+ {{role.name}}ExtraConfigPost:
+ depends_on:
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step5
+ {% endfor %}
+ type: OS::TripleO::NodeExtraConfigPost
+ properties:
+ servers: {get_param: [servers, {{role.name}}]}
+
+ # The {{role.name}}PostConfig steps are in charge of
+ # quiescing all services, i.e. in the Controller case,
+ # we should run a full service reload.
{{role.name}}PostConfig:
type: OS::TripleO::Tasks::{{role.name}}PostConfig
depends_on:
{% for dep in roles %}
- - {{dep.name}}Deployment_Step5
+ - {{dep.name}}ExtraConfigPost
{% endfor %}
properties:
servers: {get_param: servers}
input_values:
update_identifier: {get_param: DeployIdentifier}
- # Note, this should come last, so use depends_on to ensure
- # this is created after any other resources.
- {{role.name}}ExtraConfigPost:
+
+{% endfor %}
+
+# BEGIN service_workflow_tasks handling
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
depends_on:
- {% for dep in roles %}
- - {{dep.name}}PostConfig
- {% endfor %}
- type: OS::TripleO::NodeExtraConfigPost
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
properties:
- servers: {get_param: [servers, {{role.name}}]}
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
{% endfor %}
+# END service_workflow_tasks handling
constraints:
- custom_constraint: nova.keypair
{% endif %}
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing tenant networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
default: 'localdomain'
type: string
description: >
- The DNS domain used for the hosts. This should match the dhcp_domain
- configured in the Undercloud neutron. Defaults to localdomain.
+ The DNS domain used for the hosts. This must match the
+ overcloud_domain_name configured on the undercloud.
{{role}}ServerMetadata:
default: {}
description: >
Map of server hostnames to blacklist from any triggered
deployments. If the value is 1, the server will be blacklisted. This
parameter is generated from the parent template.
+ RoleParameters:
+ type: json
+ description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
{{role}}:
- type: OS::TripleO::{{role.name}}Server
+ type: OS::TripleO::{{role}}Server
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
- {get_param: {{role}}ServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
type: OS::TripleO::{{role}}::PreNetworkConfig
properties:
server: {get_resource: {{role}}}
+ RoleParameters: {get_param: RoleParameters}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
actions:
if:
fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+ fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
# Resource for site-specific injection of root certificate
NodeTLSCAData:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [{{role}}, os_collect_config]}
5) Service activation (Pacemaker)
+It is also possible to use Mistral actions or workflows together with
+a deployment step, these are executed before the main configuration run.
+To describe actions or workflows from within a service use:
+
+ * service_workflow_tasks: One or more workflow task properties
+
+which expects a map where the key is the step and the value a list of
+dictionaries descrbing each a workflow task, for example::
+
+ service_workflow_tasks:
+ step2:
+ - name: echo
+ action: std.echo output=Hello
+ step3:
+ - name: external
+ workflow: my-pre-existing-workflow-name
+ input:
+ workflow_param1: value
+ workflow_param2: value
+
+The Heat guide for the `OS::Mistral::Workflow task property
+<https://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Mistral::Workflow-prop-tasks>`_
+has more details about the expected dictionary.
+
Batch Upgrade Steps
-------------------
CinderDellScSecondaryScApiPort:
type: number
default: 3033
+ CinderDellScExcludedDomainIp:
+ type: string
+ default: ''
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
cinder::backend::dellsc_iscsi::secondary_san_login: {get_param: CinderDellScSecondarySanLogin}
cinder::backend::dellsc_iscsi::secondary_san_password: {get_param: CinderDellScSecondarySanPassword}
cinder::backend::dellsc_iscsi::secondary_sc_api_port: {get_param: CinderDellScSecondaryScApiPort}
+ cinder::backend::dellsc_iscsi::excluded_domain_ip: {get_param: CinderDellScExcludedDomainIp}
step_config: |
include ::tripleo::profile::base::cinder::volume
CinderNetappWebservicePath:
type: string
default: '/devmgr/v2'
+ CinderNetappNasSecureFileOperations:
+ type: string
+ default: 'false'
+ CinderNetappNasSecureFilePermissions:
+ type: string
+ default: 'false'
# DEPRECATED options for compatibility with older versions
CinderNetappEseriesHostType:
type: string
cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+ cinder::backend::netapp::nas_secure_file_operations: {get_param: CinderNetappNasSecureFileOperations}
+ cinder::backend::netapp::nas_secure_file_permissions: {get_param: CinderNetappNasSecureFilePermissions}
step_config: |
include ::tripleo::profile::base::cinder::volume
NFS servers used by Cinder NFS backend. Effective when
CinderEnableNfsBackend is true.
type: comma_delimited_list
+ CinderNasSecureFileOperations:
+ default: false
+ description: >
+ Controls whether security enhanced NFS file operations are enabled.
+ Valid values are 'auto', 'true' or 'false'. Effective when
+ CinderEnableNfsBackend is true.
+ type: string
+ CinderNasSecureFilePermissions:
+ default: false
+ description: >
+ Controls whether security enhanced NFS file permissions are enabled.
+ Valid values are 'auto', 'true' or 'false'. Effective when
+ CinderEnableNfsBackend is true.
+ type: string
CinderRbdPoolName:
default: volumes
type: string
tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_operations: {get_param: CinderNasSecureFileOperations}
+ tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_permissions: {get_param: CinderNasSecureFilePermissions}
tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
template: "%{hiera('cloud_name_NETWORK')}"
params:
NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ dnsnames:
+ - str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ - str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
principal:
str_replace:
template: "mysql/%{hiera('cloud_name_NETWORK')}"
- service: mysql
network: {get_param: [ServiceNetMap, MysqlNetwork]}
type: vip
+ - service: mysql
+ network: {get_param: [ServiceNetMap, MysqlNetwork]}
+ type: node
- null
upgrade_tasks:
- name: Check for galera root password
- 26379
step_config: |
include ::tripleo::profile::base::database::redis
+ upgrade_tasks:
+ - name: Check if redis is deployed
+ command: systemctl is-enabled redis
+ tags: common
+ ignore_errors: True
+ register: redis_enabled
+ - name: "PreUpgrade step0,validation: Check if redis is running"
+ shell: >
+ /usr/bin/systemctl show 'redis' --property ActiveState |
+ grep '\bactive\b'
+ when: redis_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop redis service
+ tags: step1
+ when: redis_enabled.rc == 0
+ service: name=redis state=stopped
+ - name: Install redis package if it was disabled
+ tags: step3
+ yum: name=redis state=latest
+ when: redis_enabled.rc != 0
default: 30
description: Delay between processing metrics.
type: number
+ NumberOfStorageSacks:
+ default: 128
+ description: Number of storage sacks to create.
+ type: number
GnocchiPassword:
description: The password for the gnocchi service and db account.
type: string
query:
read_default_file: /etc/my.cnf.d/tripleo.cnf
read_default_group: tripleo
- gnocchi::db::sync::extra_opts: ''
+ gnocchi::db::sync::extra_opts:
+ str_replace:
+ template: " --sacks-number NUM_SACKS"
+ params:
+ NUM_SACKS: {get_param: NumberOfStorageSacks}
gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 3
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ HAProxyStatsEnabled:
+ default: true
+ description: Whether or not to enable the HAProxy stats interface.
+ type: boolean
RedisPassword:
description: The password for Redis
type: string
tripleo::haproxy::redis_password: {get_param: RedisPassword}
tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
tripleo::haproxy::crl_file: {get_param: InternalTLSCRLPEMFile}
+ tripleo::haproxy::haproxy_stats: {get_param: HAProxyStatsEnabled}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
HorizonSecureCookies:
description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
type: boolean
- default: true
+ default: false
MemcachedIPv6:
default: false
description: Enable IPv6 features in Memcached.
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
horizon::vhost_extra_params:
- add_listen: false
priority: 10
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
options: ['FollowSymLinks','MultiViews']
e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
default: {}
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
IronicBase:
type: ./ironic-base.yaml
properties:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
ironic::api::authtoken::user_domain_name: 'Default'
ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
# This is used to build links in responses
ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+ ironic::api::service_name: 'httpd'
ironic::policy::policies: {get_param: IronicApiPolicies}
+ ironic::wsgi::apache::bind_host: {get_param: [ServiceNetMap, IronicApiNetwork]}
+ ironic::wsgi::apache::port: {get_param: [EndpointMap, IronicInternal, port]}
+ ironic::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, IronicApiNetwork]}
+ ironic::wsgi::apache::ssl: {get_param: EnableInternalTLS}
tripleo.ironic_api.firewall_rules:
'133 ironic api':
dport:
- '%'
- "%{hiera('mysql_bind_host')}"
upgrade_tasks:
- - name: Stop ironic_api service
+ - name: Stop ironic_api service (before httpd support)
+ tags: step1
+ service: name=openstack-ironic-api state=stopped enabled=no
+ - name: Stop ironic_api service (running under httpd)
tags: step1
- service: name=openstack-ironic-api state=stopped
+ service: name=httpd state=stopped
ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
# Credentials to access other services
+ ironic::cinder::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::cinder::username: 'ironic'
+ ironic::cinder::password: {get_param: IronicPassword}
+ ironic::cinder::project_name: 'service'
+ ironic::cinder::user_domain_name: 'Default'
+ ironic::cinder::project_domain_name: 'Default'
ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
ironic::glance::username: 'ironic'
ironic::glance::password: {get_param: IronicPassword}
--- /dev/null
+heat_template_version: ocata
+
+description: >
+ OpenStack Ironic Inspector configured with Puppet (EXPERIMENTAL)
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ MonitoringSubscriptionIronicInspector:
+ default: 'overcloud-ironic-inspector'
+ type: string
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ Debug:
+ default: ''
+ description: Set to True to enable debugging on all services.
+ type: string
+ IronicInspectorInterface:
+ default: br-ex
+ description: |
+ Network interface on which inspection dnsmasq will listen. Should allow
+ access to untagged traffic from nodes booted for inspection. The default
+ value only makes sense if you don't modify any networking configuration.
+ type: string
+ IronicInspectorIPXEEnabled:
+ default: true
+ description: Whether to use iPXE for inspection.
+ type: boolean
+ IronicInspectorIpRange:
+ description: |
+ Temporary IP range that will be given to nodes during the inspection
+ process. This should not overlap with any range that Neutron's DHCP
+ gives away, but it has to be routeable back to ironic-inspector API.
+ This option has no meaningful defaults, and thus is required.
+ type: string
+ IronicInspectorUseSwift:
+ default: true
+ description: Whether to use Swift for storing introspection data.
+ type: boolean
+ IronicIPXEPort:
+ default: 8088
+ description: Port to use for serving images when iPXE is used.
+ type: string
+ IronicPassword:
+ description: The password for the Ironic service and db account, used by the Ironic services
+ type: string
+ hidden: true
+
+conditions:
+ enable_ipxe: {equals : [{get_param: IronicInspectorIPXEEnabled}, true]}
+ use_swift: {equals : [{get_param: IronicInspectorUseSwift}, true]}
+
+outputs:
+ role_data:
+ description: Role data for the Ironic Inspector role.
+ value:
+ service_name: ironic_inspector
+ monitoring_subscription: {get_param: MonitoringSubscriptionIronicInspector}
+ config_settings:
+ map_merge:
+ - ironic::inspector::listen_address: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+ ironic::inspector::dnsmasq_local_ip: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+ ironic::inspector::dnsmasq_ip_range: {get_param: IronicInspectorIpRange}
+ ironic::inspector::dnsmasq_interface: {get_param: IronicInspectorInterface}
+ ironic::inspector::debug: {get_param: Debug}
+ ironic::inspector::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ ironic::inspector::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::authtoken::username: 'ironic'
+ ironic::inspector::authtoken::password: {get_param: IronicPassword}
+ ironic::inspector::authtoken::project_name: 'service'
+ ironic::inspector::authtoken::user_domain_name: 'Default'
+ ironic::inspector::authtoken::project_domain_name: 'Default'
+ tripleo.ironic_inspector.firewall_rules:
+ '137 ironic-inspector':
+ dport:
+ - 5050
+ ironic::inspector::ironic_username: 'ironic'
+ ironic::inspector::ironic_password: {get_param: IronicPassword}
+ ironic::inspector::ironic_tenant_name: 'service'
+ ironic::inspector::ironic_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::ironic_max_retries: 6
+ ironic::inspector::ironic_retry_interval: 10
+ ironic::inspector::ironic_user_domain_name: 'Default'
+ ironic::inspector::ironic_project_domain_name: 'Default'
+ ironic::inspector::http_port: {get_param: IronicIPXEPort}
+ ironic::inspector::db::database_connection:
+ list_join:
+ - ''
+ - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+ - '://ironic-inspector:'
+ - {get_param: IronicPassword}
+ - '@'
+ - {get_param: [EndpointMap, MysqlInternal, host]}
+ - '/ironic-inspector'
+ - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ -
+ if:
+ - enable_ipxe
+ - ironic::inspector::pxe_transfer_protocol: 'http'
+ - {}
+ -
+ if:
+ - use_swift
+ - ironic::inspector::store_data: 'swift'
+ ironic::inspector::swift_username: 'ironic'
+ ironic::inspector::swift_password: {get_param: IronicPassword}
+ ironic::inspector::swift_tenant_name: 'service'
+ ironic::inspector::swift_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ ironic::inspector::swift_user_domain_name: 'Default'
+ ironic::inspector::swift_project_domain_name: 'Default'
+ - {}
+ step_config: |
+ include ::tripleo::profile::base::ironic_inspector
+ service_config_settings:
+ keystone:
+ ironic::keystone::auth_inspector::tenant: 'service'
+ ironic::keystone::auth_inspector::public_url: {get_param: [EndpointMap, IronicInspectorPublic, uri]}
+ ironic::keystone::auth_inspector::internal_url: {get_param: [EndpointMap, IronicInspectorInternal, uri]}
+ ironic::keystone::auth_inspector::admin_url: {get_param: [EndpointMap, IronicInspectorAdmin, uri]}
+ ironic::keystone::auth_inspector::password: {get_param: IronicPassword}
+ ironic::keystone::auth_inspector::region: {get_param: KeystoneRegion}
+ mysql:
+ ironic::inspector::db::mysql::password: {get_param: IronicPassword}
+ ironic::inspector::db::mysql::user: ironic-inspector
+ ironic::inspector::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+ ironic::inspector::db::mysql::dbname: ironic-inspector
+ ironic::inspector::db::mysql::allowed_hosts:
+ - '%'
+ - "%{hiera('mysql_bind_host')}"
description: The second Keystone credential key. Must be a valid key.
KeystoneFernetKey0:
type: string
- description: The first Keystone fernet key. Must be a valid key.
+ default: ''
+ description: (DEPRECATED) The first Keystone fernet key. Must be a valid key.
KeystoneFernetKey1:
type: string
- description: The second Keystone fernet key. Must be a valid key.
+ default: ''
+ description: (DEPRECATED) The second Keystone fernet key. Must be a valid key.
+ KeystoneFernetKeys:
+ type: json
+ description: Mapping containing keystone's fernet keys and their paths.
+ KeystoneFernetMaxActiveKeys:
+ type: number
+ description: The maximum active keys in the keystone fernet key repository.
+ default: 5
+ ManageKeystoneFernetKeys:
+ type: boolean
+ default: true
+ description: Whether TripleO should manage the keystone fernet keys or not.
+ If set to true, the fernet keys will get the values from the
+ saved keys repository in mistral (the KeystoneFernetKeys
+ variable). If set to false, only the stack creation
+ initializes the keys, but subsequent updates won't touch them.
KeystoneLoggingSource:
type: json
default:
default: {}
hidden: true
+parameter_groups:
+- label: deprecated
+ description: |
+ The following parameters are deprecated and will be removed. They should not
+ be relied on for new deployments. If you have concerns regarding deprecated
+ parameters, please contact the TripleO development team on IRC or the
+ OpenStack mailing list.
+ parameters:
+ - KeystoneFernetKey0
+ - KeystoneFernetKey1
+
resources:
ApacheServiceBase:
keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
keystone::token_provider: {get_param: KeystoneTokenProvider}
keystone::enable_fernet_setup: {if: [keystone_fernet_tokens, true, false]}
+ keystone::fernet_max_active_keys: {get_param: KeystoneFernetMaxActiveKeys}
keystone::enable_proxy_headers_parsing: true
keystone::enable_credential_setup: true
keystone::credential_keys:
content: {get_param: KeystoneCredential0}
'/etc/keystone/credential-keys/1':
content: {get_param: KeystoneCredential1}
- keystone::fernet_keys:
- '/etc/keystone/fernet-keys/0':
- content: {get_param: KeystoneFernetKey0}
- '/etc/keystone/fernet-keys/1':
- content: {get_param: KeystoneFernetKey1}
- keystone::fernet_replace_keys: false
+ keystone::fernet_keys: {get_param: KeystoneFernetKeys}
+ keystone::fernet_replace_keys: {get_param: ManageKeystoneFernetKeys}
keystone::debug:
if:
- service_debug_unset
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
neutron_ovs_upgrade:
- name: Check if neutron_ovs_agent is deployed
command: systemctl is-enabled neutron-openvswitch-agent
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- HostCpusList:
- default: "0"
- description: List of cores to be used for host process
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]+"
- NeutronDpdkCoreList:
- default: ""
- description: List of cores to be used for DPDK Poll Mode Driver
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]*"
- NeutronDpdkMemoryChannels:
- default: ""
- description: Number of memory channels to be used for DPDK
- type: string
- constraints:
- - allowed_pattern: "[0-9]*"
- NeutronDpdkSocketMemory:
- default: ""
- description: Memory allocated for each socket
- type: string
- NeutronDpdkDriverType:
- default: "vfio-pci"
- description: DPDK Driver type
- type: string
# below parameters has to be set in neutron agent only for compute nodes.
# as of now there is no other usecase for these parameters except dpdk.
# should be moved to compute only ovs agent in case of any other usecases.
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
-
# Merging role-specific parameters (RoleParameters) with the default parameters.
# RoleParameters will have the precedence over the default parameters.
RoleParametersValue:
- map_replace:
- neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
- vswitch::dpdk::driver_type: NeutronDpdkDriverType
- vswitch::dpdk::host_core_list: HostCpusList
- vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
- vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
- vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
- values: {get_param: [RoleParameters]}
- values:
NeutronDatapathType: {get_param: NeutronDatapathType}
NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
- NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
- HostCpusList: {get_param: HostCpusList}
- NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
- NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
- NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
+
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- keys:
tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
+ - get_attr: [Ovs, role_data, config_settings]
- get_attr: [RoleParametersValue, value]
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
upgrade_tasks:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
+ # Merging role-specific parameters (RoleParameters) with the default parameters.
+ # RoleParameters will have the precedence over the default parameters.
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - neutron::agents::ml2::sriov::physical_device_mappings: NeutronPhysicalDevMappings
+ neutron::agents::ml2::sriov::exclude_devices: NeutronExcludeDevices
+ tripleo::host::sriov::number_of_vfs: NeutronSriovNumVFs
+ - values: {get_param: [RoleParameters]}
+ - values:
+ NeutronPhysicalDevMappings: {get_param: NeutronPhysicalDevMappings}
+ NeutronExcludeDevices: {get_param: NeutronExcludeDevices}
+ NeutronSriovNumVFs: {get_param: NeutronSriovNumVFs}
+
outputs:
role_data:
description: Role data for the Neutron SR-IOV nic agent service.
config_settings:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- - neutron::agents::ml2::sriov::physical_device_mappings: {get_param: NeutronPhysicalDevMappings}
- neutron::agents::ml2::sriov::exclude_devices: {get_param: NeutronExcludeDevices}
- tripleo::host::sriov::number_of_vfs: {get_param: NeutronSriovNumVFs}
+ - get_attr: [RoleParametersValue, value]
step_config: |
include ::tripleo::profile::base::neutron::sriov
register: bootstrap_node
- name: set is_bootstrap_node fact
tags: common
- set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- name: Extra migration for nova tripleo/+bug/1656791
tags: step0,pre-upgrade
when: is_bootstrap_node
default:
tag: openstack.nova.scheduler
path: /var/log/nova/nova-scheduler.log
+ NovaSchedulerDiscoverHostsInCellsInterval:
+ type: number
+ default: -1
+ description: >
+ This value controls how often (in seconds) the scheduler should
+ attempt to discover new hosts that have been added to cells.
+ The default value of -1 disables the periodic task completely.
+ It is recommended to set this parameter for deployments using Ironic.
resources:
NovaBase:
- nova::ram_allocation_ratio: '1.0'
nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
+ nova::scheduler::discover_hosts_in_cells_interval: {get_param: NovaSchedulerDiscoverHostsInCellsInterval}
step_config: |
include tripleo::profile::base::nova::scheduler
upgrade_tasks:
type: json
resources:
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
value:
service_name: opendaylight_ovs
config_settings:
- opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
- opendaylight::username: {get_param: OpenDaylightUsername}
- opendaylight::password: {get_param: OpenDaylightPassword}
- opendaylight_check_url: {get_param: OpenDaylightCheckURL}
- opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
- neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
- tripleo.opendaylight_ovs.firewall_rules:
- '118 neutron vxlan networks':
- proto: 'udp'
- dport: 4789
- '136 neutron gre networks':
- proto: 'gre'
+ map_merge:
+ - opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+ opendaylight::username: {get_param: OpenDaylightUsername}
+ opendaylight::password: {get_param: OpenDaylightPassword}
+ opendaylight_check_url: {get_param: OpenDaylightCheckURL}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
+ neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
+ tripleo.opendaylight_ovs.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '136 neutron gre networks':
+ proto: 'gre'
+ - get_attr: [Ovs, role_data, config_settings]
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
upgrade_tasks:
expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
opendaylight_upgrade:
- name: Check if openvswitch is deployed
command: systemctl is-enabled openvswitch
+++ /dev/null
-heat_template_version: pike
-
-description: >
- Openvswitch package special handling for upgrade.
-
-outputs:
- role_data:
- description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
- value:
- service_name: openvswitch_upgrade
- upgrade_tasks:
- - name: Check openvswitch version.
- tags: step2
- register: ovs_version
- ignore_errors: true
- shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
- - name: Check openvswitch packaging.
- tags: step2
- shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
- register: ovs_packaging_issue
- ignore_errors: true
- - block:
- - name: "Ensure empty directory: emptying."
- file:
- state: absent
- path: /root/OVS_UPGRADE
- - name: "Ensure empty directory: creating."
- file:
- state: directory
- path: /root/OVS_UPGRADE
- owner: root
- group: root
- mode: 0750
- - name: Download OVS packages.
- command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
- - name: Get rpm list for manual upgrade of OVS.
- shell: ls -1 /root/OVS_UPGRADE/*.rpm
- register: ovs_list_of_rpms
- - name: Manual upgrade of OVS
- shell: |
- rpm -U --test {{item}} 2>&1 | grep "already installed" || \
- rpm -U --replacepkgs --notriggerun --nopostun {{item}};
- args:
- chdir: /root/OVS_UPGRADE
- with_items:
- - "{{ovs_list_of_rpms.stdout_lines}}"
- tags: step2
- when: "'2.5.0-14' in '{{ovs_version.stdout}}'
- or
- ovs_packaging_issue|succeeded"
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Open vSwitch Configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
+ description: >
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - HostCpusList
+ - NeutronDpdkCoreList
+ - NeutronDpdkMemoryChannels
+ - NeutronDpdkSocketMemory
+ - NeutronDpdkDriverType
+
+conditions:
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+
+outputs:
+ role_data:
+ description: Role data for the Open vSwitch service.
+ value:
+ service_name: openvswitch
+ config_settings:
+ map_replace:
+ - map_replace:
+ - vswitch::dpdk::driver_type: OvsDpdkDriverType
+ vswitch::dpdk::host_core_list: OvsDpdkCoreList
+ vswitch::dpdk::pmd_core_list: OvsPmdCoreList
+ vswitch::dpdk::memory_channels: OvsDpdkMemoryChannels
+ vswitch::dpdk::socket_mem: OvsDpdkSocketMemory
+ - values: {get_param: [RoleParameters]}
+ - values:
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+ upgrade_tasks:
+ - name: Check openvswitch version.
+ tags: step2
+ register: ovs_version
+ ignore_errors: true
+ shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+ - name: Check openvswitch packaging.
+ tags: step2
+ shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+ register: ovs_packaging_issue
+ ignore_errors: true
+ - block:
+ - name: "Ensure empty directory: emptying."
+ file:
+ state: absent
+ path: /root/OVS_UPGRADE
+ - name: "Ensure empty directory: creating."
+ file:
+ state: directory
+ path: /root/OVS_UPGRADE
+ owner: root
+ group: root
+ mode: 0750
+ - name: Download OVS packages.
+ command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+ - name: Get rpm list for manual upgrade of OVS.
+ shell: ls -1 /root/OVS_UPGRADE/*.rpm
+ register: ovs_list_of_rpms
+ - name: Manual upgrade of OVS
+ shell: |
+ rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+ rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+ args:
+ chdir: /root/OVS_UPGRADE
+ with_items:
+ - "{{ovs_list_of_rpms.stdout_lines}}"
+ tags: step2
+ when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ or
+ ovs_packaging_issue|succeeded"
ovn::northbound::port: {get_param: OVNNorthboundServerPort}
ovn::southbound::port: {get_param: OVNSouthboundServerPort}
ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+ tripleo::haproxy::ovn_dbs_manage_lb: true
tripleo.ovn_dbs.firewall_rules:
'121 OVN DB server ports':
proto: 'tcp'
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
resources:
# internal_api_subnet - > IP/CIDR
tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr:
get_param: [ServiceNetMap, MysqlNetwork]
+ tripleo::profile::pacemaker::database::mysql::ca_file:
+ get_param: InternalTLSCAFile
step_config: |
include ::tripleo::profile::pacemaker::database::mysql
metadata_settings:
--- /dev/null
+heat_template_version: ocata
+
+description: >
+ OVN databases configured with puppet in HA mode
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OVNNorthboundServerPort:
+ description: Port of the OVN Northbound DB server
+ type: number
+ default: 6641
+ OVNSouthboundServerPort:
+ description: Port of the OVN Southbound DB server
+ type: number
+ default: 6642
+
+resources:
+
+ OVNDBsBase:
+ type: ../ovn-dbs.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the OVN northd service
+ value:
+ service_name: ovn_dbs
+ config_settings:
+ map_merge:
+ - get_attr: [OVNDBsBase, role_data, config_settings]
+ - tripleo::haproxy::ovn_dbs_manage_lb: false
+ tripleo::profile::pacemaker::ovn_northd::nb_db_port: {get_param: OVNNorthboundServerPort}
+ tripleo::profile::pacemaker::ovn_northd::sb_db_port: {get_param: OVNSouthboundServerPort}
+ step_config: |
+ include ::tripleo::profile::pacemaker::ovn_northd
MonitoringSubscriptionPacemakerRemote:
default: 'overcloud-pacemaker_remote'
type: string
+ EnableFencing:
+ default: false
+ description: Whether to enable fencing in Pacemaker or not.
+ type: boolean
+ FencingConfig:
+ default: {}
+ description: |
+ Pacemaker fencing configuration. The JSON should have
+ the following structure:
+ {
+ "devices": [
+ {
+ "agent": "AGENT_NAME",
+ "host_mac": "HOST_MAC_ADDRESS",
+ "params": {"PARAM_NAME": "PARAM_VALUE"}
+ }
+ ]
+ }
+ For instance:
+ {
+ "devices": [
+ {
+ "agent": "fence_xvm",
+ "host_mac": "52:54:00:aa:bb:cc",
+ "params": {
+ "multicast_address": "225.0.0.12",
+ "port": "baremetal_0",
+ "manage_fw": true,
+ "manage_key_file": true,
+ "key_file": "/etc/fence_xvm.key",
+ "key_file_password": "abcdef"
+ }
+ }
+ ]
+ }
+ type: json
PacemakerRemoteLoggingSource:
type: json
default:
proto: 'tcp'
dport:
- 3121
+ tripleo::fencing::config: {get_param: FencingConfig}
+ enable_fencing: {get_param: EnableFencing}
tripleo::profile::base::pacemaker_remote::remote_authkey: {get_param: PacemakerRemoteAuthkey}
step_config: |
include ::tripleo::profile::base::pacemaker_remote
tripleo.panko_api.firewall_rules:
'140 panko-api':
dport:
- - 8779
- - 13779
+ - 8977
+ - 13977
panko::api::host:
str_replace:
template:
--- /dev/null
+---
+features:
+ - This introduces the ManageKeystoneFernetKeys parameter, which tells
+ heat/puppet if it should replace the existing fernet keys on a stack
+ deployment or not. This is useful if the deployer wants to do key rotations
+ out of band.
--- /dev/null
+---
+features:
+ - The HAProxy stats interface can now be enabled/disabled with the
+ HAProxyStatsEnabled flag. Note that it's still enabled by default.
--- /dev/null
+---
+features:
+ - The KeystoneFernetKeys parameter was introduced, which is able to take any
+ amount of keys as long as it's in the right format. It's generated by the
+ same mechanism as the rest of the passwords; so it's value is also
+ available via mistral's "password" environment variable. This will also
+ allow for rotations to be made via mistral and via stack updates.
+deprecations:
+ - The individual keystone fernet key parameters (KeystoneFernetKey0 and
+ KeystoneFernetKey1) were deprecated in favor of KeystoneFernetKeys.
--- /dev/null
+---
+features:
+ - Add parameters to control the Cinder NAS security settings associated
+ with the NFS and NetApp Cinder back ends. The settings are disabled
+ by default.
--- /dev/null
+---
+features:
+ - Added new DeploymentSwiftDataMap parameter, which is used to set the
+ deployment_swift_data property on the Server resoures. The parameter is a
+ map where the keys are the Heat assigned hostnames, and the value is a map
+ of the container/object name in Swift.
--- /dev/null
+---
+features:
+ - Adds a new output, ServerOsCollectConfigData, which is the
+ os-collect-config configuration associated with each server resource.
+ This can be used to [pre]configure the os-collect-config agents on
+ deployed-server's.
--- /dev/null
+---
+fixes:
+ - |
+ When ``environments/services/ironic.yaml`` is used, enable periodic task
+ in nova-scheduler to automatically discover new nodes. Otherwise a user
+ has to run nova management command on controllers each time.
--- /dev/null
+---
+features:
+ - |
+ Add an example role ``roles/IronicConductor.yaml`` for a node with only
+ ironic-conductor and its (i)PXE service.
--- /dev/null
+---
+fixes:
+ - Changed panko api port to run on 8977 instead of 8779. 8779 is reserved
+ for trove. Hence changing to avoid conflicts.
--- /dev/null
+---
+features:
+ - Added a custom plan-environment file for providing workflow specific
+ inputs for the derived parameters workflow.
--- /dev/null
+---
+features:
+ - DPDK is enabled in OvS before the NetworkDeployment to ensure DPDK
+ is ready to handle new port additions.
+upgrade:
+ - A new parameter ServiceNames is added to the PreNeworkConfig resource.
+ All templates associated with PreNeworkConfig should add this new
+ parameter during the upgrade.
--- /dev/null
+---
+fixes:
+ - Incorrect network used for Glance API service.
--- /dev/null
+---
+fixes:
+ - |
+ Fix support for RPMs to be installed via DeployArtifactURLs. LP#1697102
--- /dev/null
+---
+features:
+ - |
+ Add basic support for **ironic-inspector** in the overcloud. It is highly
+ experimental and is not yet recommended for production use.
--- /dev/null
+---
+features:
+ - KeystoneFernetMaxActiveKeys was introduced as a parameter to the keystone
+ profile. It sets the max_active_keys value of the keystone.conf file and
+ will subsequently be used by mistral to purge the keys in a mistral task.
--- /dev/null
+---
+features:
+ - Add support to configure number of sacks in gnocchi.
--- /dev/null
+---
+features:
+ - Support HA for OVN db servers and ovn-northd using Pacemaker.
--- /dev/null
+---
+features:
+ - PreNetworkConfig is modified to support role-specific parameters.
+upgrade:
+ - PreNetworkConfig takes a new parameter, RoleParameters. All the templates
+ associated with PreNetworkConfig should add this new parameter during
+ upgrade.
+deprecations:
+ - Parameters {{role}}KernelArgs, {{role}}TunedProfileName and
+ {{role}}HostCpusList are deprecated. Alternatively, role-specific
+ parameter support has been added with the same names.
--- /dev/null
+---
+features:
+ - Adds common openvswitch service template to be
+ inherited by other services.
+ - Adds environment file to be used for deploying
+ OpenDaylight + OVS DPDK.
+ - Adds first boot and ovs configuration scripts
+deprecations:
+ - The ``HostCpusList`` parameter is deprecated in
+ favor of ``OvsDpdkCoreList`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkCoreList`` parameter is deprecated in
+ favor of ``OvsPmdCoreList`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkMemoryChannels`` parameter is deprecated in
+ favor of ``OvsDpdkMemoryChannels`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkSocketMemory`` parameter is deprecated in
+ favor of ``OvsDpdkSocketMemory`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkDriverType`` parameter is deprecated in
+ favor of ``OvsDpdkDriverType`` and will be removed
+ in a future release.
--- /dev/null
+---
+features:
+ - |
+ It is now possible to trigger Mistral workflows or workflow actions
+ before a deployment step is applied. This can be defined within the
+ scope of a service template and is described as a task property
+ for the Heat OS::Mistral::Workflow resource, for more details also
+ see the puppet/services/README.rst file.
\ No newline at end of file
--- /dev/null
+---
+features:
+ - Add 2 new example environments to facilitate deploying split-stack,
+ environments/overcloud-baremetal.j2.yaml and
+ environments/overcloud-services.yaml. The environments are used to deploy two
+ separate Heat stacks, one for just the baremetal+network configuration and one
+ for the service configuration.
--- /dev/null
+---
+features:
+ - Add VipMap output to the top level stack output. VipMap is a mapping from
+ each network to the VIP address on that network. Also includes the Redis
+ VIP.
- name: BlockStorage
description: |
Cinder Block Storage node role
+ networks:
+ - InternalApi
+ - Storage
+ - StorageMgmt
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Ntp
- name: CephStorage
description: |
Ceph OSD Storage node role
+ networks:
+ - Storage
+ - StorageMgmt
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::CACerts
description: |
Basic Compute Node role
CountDefault: 1
+ networks:
+ - InternalApi
+ - Tenant
+ - Storage
HostnameFormatDefault: '%stackname%-novacompute-%index%'
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::ComputeNeutronOvsAgent
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronLinuxbridgeAgent
tags:
- primary
- controller
+ networks:
+ - External
+ - InternalApi
+ - Storage
+ - StorageMgmt
+ - Tenant
HostnameFormatDefault: '%stackname%-controller-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::Horizon
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
tags:
- primary
- controller
+ networks:
+ - External
+ - InternalApi
+ - Storage
+ - StorageMgmt
+ - Tenant
HostnameFormatDefault: '%stackname%-controller-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
- name: Database
description: |
Standalone database role with the database being managed via Pacemaker
+ networks:
+ - InternalApi
HostnameFormatDefault: '%stackname%-database-%index%'
ServicesDefault:
- OS::TripleO::Services::AuditD
--- /dev/null
+###############################################################################
+# Role: IronicConductor #
+###############################################################################
+- name: IronicConductor
+ description: |
+ Ironic Conductor node role
+ HostnameFormatDefault: '%stackname%-ironic-%index%'
+ ServicesDefault:
+ - OS::TripleO::Services::AuditD
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Collectd
+ - OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SensuClient
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::TripleoPackages
- name: Messaging
description: |
Standalone messaging role with RabbitMQ being managed via Pacemaker
+ networks:
+ - InternalApi
HostnameFormatDefault: '%stackname%-messaging-%index%'
ServicesDefault:
- OS::TripleO::Services::AuditD
description: |
Standalone networking role to run Neutron services their own. Includes
Pacemaker integration via PacemakerRemote
+ networks:
+ - InternalApi
HostnameFormatDefault: '%stackname%-networker-%index%'
ServicesDefault:
- OS::TripleO::Services::AuditD
- name: ObjectStorage
description: |
Swift Object Storage node role
+ networks:
+ - InternalApi
+ - Storage
+ - StorageMgmt
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::AuditD
* description: (string) as few sentences describing the role and information
pertaining to the usage of the role.
+ * networks: (list), optional list of networks which the role will have
+ access to when network isolation is enabled. The names should match
+ those defined in network_data.yaml.
+
Working with Roles
==================
The tripleoclient provides a series of commands that can be used to view
- name: Telemetry
description: |
Telemetry role that has all the telemetry services.
+ networks:
+ - InternalApi
HostnameFormatDefault: '%stackname%-telemetry-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
- OS::TripleO::Services::IronicPxe
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::MistralApi
tags:
- primary
- controller
+ networks:
+ - External
+ - InternalApi
+ - Storage
+ - StorageMgmt
+ - Tenant
HostnameFormatDefault: '%stackname%-controller-%index%'
ServicesDefault:
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::Horizon
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
description: |
Basic Compute Node role
CountDefault: 1
+ networks:
+ - InternalApi
+ - Tenant
+ - Storage
HostnameFormatDefault: '%stackname%-novacompute-%index%'
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::ComputeNeutronOvsAgent
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronLinuxbridgeAgent
- name: BlockStorage
description: |
Cinder Block Storage node role
+ networks:
+ - InternalApi
+ - Storage
+ - StorageMgmt
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::BlockStorageCinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Docker
- OS::TripleO::Services::FluentdClient
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Ntp
- name: ObjectStorage
description: |
Swift Object Storage node role
+ networks:
+ - InternalApi
+ - Storage
+ - StorageMgmt
disable_upgrade_deployment: True
ServicesDefault:
- OS::TripleO::Services::AuditD
- name: CephStorage
description: |
Ceph OSD Storage node role
+ networks:
+ - Storage
+ - StorageMgmt
ServicesDefault:
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::IronicApi
- OS::TripleO::Services::IronicConductor
+ - OS::TripleO::Services::IronicInspector
- OS::TripleO::Services::IronicPxe
+ - OS::TripleO::Services::Iscsid
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::MistralApi
- OS::TripleO::Services::UndercloudAodhListener
- OS::TripleO::Services::UndercloudAodhNotifier
- OS::TripleO::Services::UndercloudCeilometerAgentCentral
+ - OS::TripleO::Services::UndercloudCeilometerAgentIpmi
- OS::TripleO::Services::UndercloudCeilometerAgentNotification
- OS::TripleO::Services::UndercloudGnocchiApi
- OS::TripleO::Services::UndercloudGnocchiMetricd
--- /dev/null
+Sample Environment Generator
+----------------------------
+
+This is a tool to automate the generation of our sample environment
+files. It takes a yaml file as input, and based on the environments
+defined in that file generates a number of sample environment files
+from the parameters in the Heat templates.
+
+Usage
+=====
+
+The simplest case is when an existing sample environment needs to be
+updated to reflect changes in the templates. Use the tox ``genconfig``
+target to do this::
+
+ tox -e genconfig
+
+.. note:: The tool should be run from the root directory of the
+ ``tripleo-heat-templates`` project.
+
+If a new sample environment is needed, it should be added to the
+appropriate file in the ``sample-env-generator/`` directory. The existing
+entries in the files can be used as examples, and a more detailed
+explanation of the different available keys is below:
+
+Top-level:
+- **environments**: This is the top-level key in the file. All other keys
+ below should appear in a list of dictionaries that define environments.
+
+Environment-specific:
+- **name**: the output file will be this name + .yaml, in the
+ ``environments`` directory.
+- **title**: a human-readable title for the environment.
+- **description**: A description of the environment. Will be included
+ as a comment at the top of the sample file.
+- **files**: The Heat templates containing the parameter definitions
+ for the environment. Should be specified as a path relative to the
+ root of the ``tripleo-heat-templates`` project. For example:
+ ``puppet/extraconfig/tls/tls-cert-inject.yaml:``. Each filename
+ should be a YAML dictionary that contains a ``parameters`` entry.
+- **parameters**: There should be one ``parameters`` entry per file in the
+ ``files`` section (see the example configuration below).
+ This can be either a list of parameters related to
+ the environment, which is necessary for templates like
+ overcloud.yaml, or the string 'all', which indicates that all
+ parameters from the file should be included.
+- **static**: Can be used to specify that certain parameters must
+ not be changed. Examples would be the EnableSomething params
+ in the templates. When writing a sample config for Something,
+ ``EnableSomething: True`` would be a static param, since it
+ would be nonsense to include the environment with it set to any other
+ value.
+- **sample_values**: Sometimes it is useful to include a sample value
+ for a parameter that is not the parameter's actual default.
+ An example of this is the SSLCertificate param in the enable-tls
+ environment file.
+- **resource_registry**: Many environments also need to pass
+ resource_registry entries when they are used. This can be used
+ to specify that in the configuration file.
+- **children**: For environments that share a lot of common values but may
+ need minor variations for different use cases, sample environment entries
+ can be nested. ``children`` takes a list of environments with the same
+ structure as the top-level ``environments`` key. The main difference is
+ that all keys are optional, and any that are omitted will be inherited from
+ the parent environment definition.
+
+Some behavioral notes:
+
+- Parameters without default values will be marked as mandatory to indicate
+ that the user must set a value for them.
+- It is no longer recommended to set parameters using the ``parameters``
+ section. Instead, all parameters should be set as ``parameter_defaults``
+ which will work regardless of whether the parameter is top-level or nested.
+ Therefore, the tool will always set parameters in the ``parameter_defaults``
+ section.
+- Parameters whose name begins with the _ character are treated as private.
+ This indicates that the parameter value will be passed in from another
+ template and does not need to be exposed directly to the user.
+
+If adding a new environment, don't forget to add the new file to the
+git repository so it will be included with the review.
+
+Example
+=======
+
+Given a Heat template named ``example.yaml`` that looks like::
+
+ parameters:
+ EnableExample:
+ default: False
+ description: Enable the example feature
+ type: boolean
+ ParamOne:
+ default: one
+ description: First example param
+ type: string
+ ParamTwo:
+ description: Second example param
+ type: number
+ _PrivateParam:
+ default: does not matter
+ description: Will not show up
+ type: string
+
+And an environment generator entry that looks like::
+
+ environments:
+ -
+ name: example
+ title: Example Environment
+ description: |
+ An example environment demonstrating how to use the sample
+ environment generator. This text will be included at the top
+ of the generated file as a comment.
+ files:
+ example.yaml:
+ parameters: all
+ sample_values:
+ EnableExample: True
+ static:
+ - EnableExample
+ resource_registry:
+ OS::TripleO::ExampleData: ../extraconfig/example.yaml
+
+The generated environment file would look like::
+
+ # *******************************************************************
+ # This file was created automatically by the sample environment
+ # generator. Developers should use `tox -e genconfig` to update it.
+ # Users are recommended to make changes to a copy of the file instead
+ # of the original, if any customizations are needed.
+ # *******************************************************************
+ # title: Example Environment
+ # description: |
+ # An example environment demonstrating how to use the sample
+ # environment generator. This text will be included at the top
+ # of the generated file as a comment.
+ parameter_defaults:
+ # First example param
+ # Type: string
+ ParamOne: one
+
+ # Second example param
+ # Mandatory. This parameter must be set by the user.
+ # Type: number
+ ParamTwo: <None>
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # Enable the example feature
+ # Type: boolean
+ EnableExample: True
+
+ # *********************
+ # End static parameters
+ # *********************
+ resource_registry:
+ OS::TripleO::ExampleData: ../extraconfig/example.yaml
--- /dev/null
+environments:
+ -
+ name: networking/neutron-midonet
+ title: Enable the Neutron MidoNet Services
+ description: A Heat environment that can be used to deploy MidoNet Services
+ files:
+ puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml:
+ parameters: all
+ puppet/services/neutron-base.yaml:
+ parameters:
+ - NeutronCorePlugin
+ puppet/services/neutron-dhcp.yaml:
+ parameters:
+ - NeutronEnableIsolatedMetadata
+ sample_values:
+ NeutronCorePlugin: 'midonet.neutron.plugin_v1.MidonetPluginV2'
+ NeutronEnableIsolatedMetadata: true
+ EnableZookeeperOnController: true
+ EnableCassandraOnController: true
+ static:
+ - NeutronCorePlugin
+ - NeutronEnableIsolatedMetadata
+ - EnableZookeeperOnController
+ - EnableCassandraOnController
+ resource_registry:
+ OS::TripleO::AllNodesExtraConfig: ../../puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
+ OS::TripleO::Controller::Net::SoftwareConfig: ../../net-config-linux-bridge.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginMidonet
+ OS::TripleO::Services::ComputeNeutronCorePlugin: ../../puppet/services/neutron-compute-plugin-midonet.yaml
--- /dev/null
+environments:
+ -
+ name: predictable-placement/custom-hostnames
+ title: Custom Hostnames
+ files:
+ overcloud.yaml:
+ parameters:
+ - ControllerHostnameFormat
+ - ComputeHostnameFormat
+ - BlockStorageHostnameFormat
+ - ObjectStorageHostnameFormat
+ - CephStorageHostnameFormat
+ description: |
+ Hostname format for each role
+ Note %index% is translated into the index of the node, e.g 0/1/2 etc
+ and %stackname% is replaced with OS::stack_name in the template below.
+ If you want to use the heat generated names, pass '' (empty string).
--- /dev/null
+environments:
+ -
+ name: ssl/enable-tls
+ title: Enable SSL on OpenStack Public Endpoints
+ description: |
+ Use this environment to pass in certificates for SSL deployments.
+ For these values to take effect, one of the tls-endpoints-*.yaml environments
+ must also be used.
+ files:
+ puppet/extraconfig/tls/tls-cert-inject.yaml:
+ parameters: all
+ static:
+ # This should probably be private, but for testing static params I'm
+ # setting it as such for now.
+ - DeployedSSLCertificatePath
+ sample_values:
+ SSLCertificate: |-
+ |
+ The contents of your certificate go here
+ SSLKey: |-
+ |
+ The contents of the private key go here
+ resource_registry:
+ OS::TripleO::NodeTLSData: ../../puppet/extraconfig/tls/tls-cert-inject.yaml
+ - name: ssl/inject-trust-anchor
+ title: Inject SSL Trust Anchor on Overcloud Nodes
+ description: |
+ When using an SSL certificate signed by a CA that is not in the default
+ list of CAs, this environment allows adding a custom CA certificate to
+ the overcloud nodes.
+ files:
+ puppet/extraconfig/tls/ca-inject.yaml:
+ parameters:
+ - SSLRootCertificate
+ sample_values:
+ SSLRootCertificate: |-
+ |
+ The contents of your certificate go here
+ resource_registry:
+ OS::TripleO::NodeTLSCAData: ../../puppet/extraconfig/tls/ca-inject.yaml
+ children:
+ - name: ssl/inject-trust-anchor-hiera
+ files:
+ puppet/services/ca-certs.yaml:
+ parameters:
+ - CAMap
+ # Need to clear this so we don't inherit the parent registry
+ resource_registry: {}
+ sample_values:
+ CAMap: |-2
+
+ first-ca-name:
+ content: |
+ The content of the CA cert goes here
+ second-ca-name:
+ content: |
+ The content of the CA cert goes here
+ -
+ name: ssl/tls-endpoints-public-ip
+ title: Deploy Public SSL Endpoints as IP Addresses
+ description: |
+ Use this environment when deploying an SSL-enabled overcloud where the public
+ endpoint is an IP address.
+ files:
+ network/endpoints/endpoint_map.yaml:
+ parameters:
+ - EndpointMap
+ sample_values:
+ # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+ # works. The |-2 tells YAML to strip two spaces off the indentation of
+ # the value, which because it's indented six spaces gets us to the four
+ # that we actually want. Note that zero is not a valid value here, so
+ # two seemed like the most sane option.
+ EndpointMap: |-2
+
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'IP_ADDRESS'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'IP_ADDRESS'}
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'IP_ADDRESS'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'IP_ADDRESS'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'IP_ADDRESS'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'IP_ADDRESS'}
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'IP_ADDRESS'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'IP_ADDRESS'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'IP_ADDRESS'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'IP_ADDRESS'}
+ IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'IP_ADDRESS'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'IP_ADDRESS'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'IP_ADDRESS'}
+ ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'IP_ADDRESS'}
+ MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'IP_ADDRESS'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'IP_ADDRESS'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'IP_ADDRESS'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'IP_ADDRESS'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'IP_ADDRESS'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'IP_ADDRESS'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'IP_ADDRESS'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'IP_ADDRESS'}
+ -
+ name: ssl/tls-endpoints-public-dns
+ title: Deploy Public SSL Endpoints as DNS Names
+ description: |
+ Use this environment when deploying an SSL-enabled overcloud where the public
+ endpoint is a DNS name.
+ files:
+ network/endpoints/endpoint_map.yaml:
+ parameters:
+ - EndpointMap
+ sample_values:
+ # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+ # works. The |-2 tells YAML to strip two spaces off the indentation of
+ # the value, which because it's indented six spaces gets us to the four
+ # that we actually want. Note that zero is not a valid value here, so
+ # two seemed like the most sane option.
+ EndpointMap: |-2
+
+ AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanInternal: {protocol: 'http', port: '9311', host: 'IP_ADDRESS'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressInternal: {protocol: 'http', port: '1789', host: 'IP_ADDRESS'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiInternal: {protocol: 'http', port: '8788', host: 'IP_ADDRESS'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnInternal: {protocol: 'http', port: '8000', host: 'IP_ADDRESS'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
+ KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaInternal: {protocol: 'http', port: '8786', host: 'IP_ADDRESS'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MistralAdmin: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralInternal: {protocol: 'http', port: '8989', host: 'IP_ADDRESS'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'IP_ADDRESS'}
+ NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementInternal: {protocol: 'http', port: '8778', host: 'IP_ADDRESS'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerInternal: {protocol: 'http', port: '9890', host: 'IP_ADDRESS'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarInternal: {protocol: 'http', port: '8888', host: 'IP_ADDRESS'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketInternal: {protocol: 'ws', port: '9000', host: 'IP_ADDRESS'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ -
+ name: ssl/tls-everywhere-endpoints-dns
+ title: Deploy All SSL Endpoints as DNS Names
+ description: |
+ Use this environment when deploying an overcloud where all the endpoints are
+ DNS names and there's TLS in all endpoint types.
+ files:
+ network/endpoints/endpoint_map.yaml:
+ parameters:
+ - EndpointMap
+ sample_values:
+ # NOTE(bnemec): This is a bit odd, but it's the only way I've found that
+ # works. The |-2 tells YAML to strip two spaces off the indentation of
+ # the value, which because it's indented six spaces gets us to the four
+ # that we actually want. Note that zero is not a valid value here, so
+ # two seemed like the most sane option.
+ EndpointMap: |-2
+
+ AodhAdmin: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhInternal: {protocol: 'https', port: '8042', host: 'CLOUDNAME'}
+ AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
+ BarbicanAdmin: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanInternal: {protocol: 'https', port: '9311', host: 'CLOUDNAME'}
+ BarbicanPublic: {protocol: 'https', port: '13311', host: 'CLOUDNAME'}
+ CeilometerAdmin: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerInternal: {protocol: 'https', port: '8777', host: 'CLOUDNAME'}
+ CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
+ CephRgwAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ CephRgwPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ CinderAdmin: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderInternal: {protocol: 'https', port: '8776', host: 'CLOUDNAME'}
+ CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
+ CongressAdmin: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressInternal: {protocol: 'https', port: '1789', host: 'CLOUDNAME'}
+ CongressPublic: {protocol: 'https', port: '13789', host: 'CLOUDNAME'}
+ ContrailAnalyticsApiAdmin: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiInternal: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsApiPublic: {protocol: 'http', port: '8081', host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpAdmin: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpInternal: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorHttpPublic: {protocol: 'http', port: '8089',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshAdmin: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshInternal: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsCollectorSandeshPublic: {protocol: 'http', port: '8086',
+ host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpAdmin: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpInternal: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsHttpPublic: {protocol: 'http', port: '8090', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisAdmin: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisInternal: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailAnalyticsRedisPublic: {protocol: 'http', port: '6379', host: 'IP_ADDRESS'}
+ ContrailConfigAdmin: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigInternal: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailConfigPublic: {protocol: 'http', port: '8082', host: 'IP_ADDRESS'}
+ ContrailDiscoveryAdmin: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryInternal: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailDiscoveryPublic: {protocol: 'http', port: '5998', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpPublic: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsAdmin: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsInternal: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ ContrailWebuiHttpsPublic: {protocol: 'http', port: '8143', host: 'IP_ADDRESS'}
+ Ec2ApiAdmin: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiInternal: {protocol: 'https', port: '8788', host: 'CLOUDNAME'}
+ Ec2ApiPublic: {protocol: 'https', port: '13788', host: 'CLOUDNAME'}
+ GlanceAdmin: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlanceInternal: {protocol: 'https', port: '9292', host: 'CLOUDNAME'}
+ GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
+ GnocchiAdmin: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiInternal: {protocol: 'https', port: '8041', host: 'CLOUDNAME'}
+ GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
+ HeatAdmin: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatInternal: {protocol: 'https', port: '8004', host: 'CLOUDNAME'}
+ HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
+ HeatCfnAdmin: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnInternal: {protocol: 'https', port: '8000', host: 'CLOUDNAME'}
+ HeatCfnPublic: {protocol: 'https', port: '13005', host: 'CLOUDNAME'}
+ HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
+ IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
+ IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
+ KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
+ KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
+ KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
+ ManilaAdmin: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaInternal: {protocol: 'https', port: '8786', host: 'CLOUDNAME'}
+ ManilaPublic: {protocol: 'https', port: '13786', host: 'CLOUDNAME'}
+ MistralAdmin: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+ MistralInternal: {protocol: 'https', port: '8989', host: 'CLOUDNAME'}
+ MistralPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ MysqlInternal: {protocol: 'mysql+pymysql', port: '3306', host: 'CLOUDNAME'}
+ NeutronAdmin: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronInternal: {protocol: 'https', port: '9696', host: 'CLOUDNAME'}
+ NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
+ NovaAdmin: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaInternal: {protocol: 'https', port: '8774', host: 'CLOUDNAME'}
+ NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
+ NovaPlacementAdmin: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementInternal: {protocol: 'https', port: '8778', host: 'CLOUDNAME'}
+ NovaPlacementPublic: {protocol: 'https', port: '13778', host: 'CLOUDNAME'}
+ NovaVNCProxyAdmin: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyInternal: {protocol: 'https', port: '6080', host: 'CLOUDNAME'}
+ NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
+ OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
+ OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
+ PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
+ PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+ SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
+ SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
+ SwiftAdmin: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftInternal: {protocol: 'https', port: '8080', host: 'CLOUDNAME'}
+ SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
+ TackerAdmin: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerInternal: {protocol: 'https', port: '9890', host: 'CLOUDNAME'}
+ TackerPublic: {protocol: 'https', port: '13989', host: 'CLOUDNAME'}
+ ZaqarAdmin: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarInternal: {protocol: 'https', port: '8888', host: 'CLOUDNAME'}
+ ZaqarPublic: {protocol: 'https', port: '13888', host: 'CLOUDNAME'}
+ ZaqarWebSocketAdmin: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketInternal: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
+ ZaqarWebSocketPublic: {protocol: 'wss', port: '9000', host: 'CLOUDNAME'}
--- /dev/null
+environments:
+ -
+ name: storage/enable-ceph
+ title: Enable Ceph Storage Backend
+ files:
+ puppet/services/cinder-volume.yaml:
+ parameters:
+ - CinderEnableIscsiBackend
+ - CinderEnableRbdBackend
+ puppet/services/cinder-backup.yaml:
+ parameters:
+ - CinderBackupBackend
+ puppet/services/nova-compute.yaml:
+ parameters:
+ - NovaEnableRbdBackend
+ puppet/services/glance-api.yaml:
+ parameters:
+ - GlanceBackend
+ puppet/services/gnocchi-api.yaml:
+ parameters:
+ - GnocchiBackend
+ sample_values:
+ CinderEnableIscsiBackend: False
+ CinderEnableRbdBackend: True
+ CinderBackupBackend: rbd
+ NovaEnableRbdBackend: True
+ GlanceBackend: rbd
+ GnocchiBackend: rbd
+ description: |
+ Include this environment to enable Ceph as the backend for
+ Cinder, Nova, Gnocchi, and Glance.
+ -
+ name: storage/cinder-nfs
+ title: Enable Cinder NFS Backend
+ files:
+ puppet/services/cinder-volume.yaml:
+ parameters:
+ - CinderNfsMountOptions
+ - CinderNfsServers
+ - CinderEnableNfsBackend
+ - CinderEnableIscsiBackend
+ sample_values:
+ CinderEnableNfsBackend: True
+ CinderEnableIscsiBackend: False
+ CinderNfsServers: '192.168.122.1:/export/cinder'
+ description: |
+ Configure and include this environment to enable the use of an NFS
+ share as the backend for Cinder.
+ -
+ name: storage/glance-nfs
+ title: Enable Glance NFS Backend
+ files:
+ puppet/services/glance-api.yaml:
+ parameters:
+ - GlanceBackend
+ - GlanceNfsEnabled
+ - GlanceNfsShare
+ - GlanceNfsOptions
+ sample_values:
+ GlanceBackend: file
+ GlanceNfsEnabled: True
+ static:
+ - GlanceBackend
+ - GlanceNfsEnabled
+ description: |
+ Configure and include this environment to enable the use of an NFS
+ share as the backend for Glance.
+ -
+ name: storage/external-ceph
+ title: Deploy Using an External Ceph Cluster
+ files:
+ puppet/services/nova-compute.yaml:
+ parameters:
+ - NovaRbdPoolName
+ - NovaEnableRbdBackend
+ - CephClientUserName
+ puppet/services/cinder-volume.yaml:
+ parameters:
+ - CinderRbdPoolName
+ - CinderEnableIscsiBackend
+ - CinderEnableRbdBackend
+ puppet/services/glance-api.yaml:
+ parameters:
+ - GlanceRbdPoolName
+ - GlanceBackend
+ puppet/services/gnocchi-api.yaml:
+ parameters:
+ - GnocchiBackend
+ puppet/services/gnocchi-base.yaml:
+ parameters:
+ - GnocchiRbdPoolName
+ puppet/services/ceph-external.yaml:
+ parameters:
+ - CephClusterFSID
+ - CephClientKey
+ - CephExternalMonHost
+ - RbdDefaultFeatures
+ puppet/services/ceph-base.yaml:
+ parameters:
+ - CephAdminKey
+ sample_values:
+ CinderEnableIscsiBackend: False
+ CinderEnableRbdBackend: True
+ NovaEnableRbdBackend: True
+ GlanceBackend: rbd
+ GnocchiBackend: rbd
+ NovaRbdPoolName: vms
+ CinderRbdPoolName: volumes
+ GlanceRbdPoolName: images
+ GnocchiRbdPoolName: metrics
+ CephClientUserName: openstack
+ CephAdminKey: ''
+ description: |
+ A Heat environment file which can be used to enable the
+ use of an externally managed Ceph cluster.
+ resource_registry:
+ OS::TripleO::Services::CephExternal: ../../puppet/services/ceph-external.yaml
+ OS::TripleO::Services::CephMon: OS::Heat::None
+ OS::TripleO::Services::CephClient: OS::Heat::None
+ OS::TripleO::Services::CephOSD: OS::Heat::None
+ -
+ name: storage/cinder-netapp-config
+ title: Enable the Cinder NetApp Backend
+ description: |
+ A Heat environment file which can be used to enable a
+ a Cinder NetApp backend, configured via puppet
+ files:
+ puppet/services/cinder-backend-netapp.yaml:
+ parameters: all
+ static:
+ - CinderEnableNetappBackend
+ resource_registry:
+ OS::TripleO::ControllerExtraConfigPre: ../../puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
+#FIXME move into common when specfile adds it
heat_template_version: pike
description: >
yaql:
expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
+ service_workflow_tasks:
+ yaql:
+ expression: $.data.role_data.where($ != null).select($.get('service_workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
step_config: {get_attr: [ServiceChain, role_data, step_config]}
upgrade_tasks:
yaql:
expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
data: {get_attr: [ServiceChain, role_data]}
service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
+
+ # Keys to support docker/services
+ puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
+ kolla_config:
+ map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
+ docker_config:
+ {get_attr: [ServiceChain, role_data, docker_config]}
+ docker_puppet_tasks:
+ {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+ host_prep_tasks:
+ yaql:
+ # Note we use distinct() here to filter any identical tasks
+ expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+ data: {get_attr: [ServiceChain, role_data]}
sphinx!=1.6.1,>=1.5.1 # BSD
oslosphinx>=4.7.0 # Apache-2.0
reno!=2.3.1,>=1.8.0 # Apache-2.0
+coverage!=4.4,>=4.0 # Apache-2.0
+fixtures>=3.0.0 # Apache-2.0/BSD
+python-subunit>=0.0.18 # Apache-2.0/BSD
+testrepository>=0.0.18 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+testtools>=1.4.0 # MIT
+mock>=2.0 # BSD
+oslotest>=1.10.0 # Apache-2.0
required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
'RoleName', 'RoleParameters']
+# NOTE(bnemec): The duplication in this list is intentional. For the
+# transition to generated environments we have two copies of these files,
+# so they need to be listed twice. Once the deprecated version can be removed
+# the duplicate entries can be as well.
envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
+ 'tls-endpoints-public-ip.yaml',
+ 'tls-everywhere-endpoints-dns.yaml',
+ 'tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
'tls-everywhere-endpoints-dns.yaml']
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
'config_image']
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
+# Mapping of parameter names to a list of the fields we should _not_ enforce
+# consistency across files on. This should only contain parameters whose
+# definition we cannot change for backwards compatibility reasons. New
+# parameters to the templates should not be added to this list.
+PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
+ 'ManagementAllocationPools': ['default'],
+ 'ExternalNetCidr': ['default'],
+ 'ExternalAllocationPools': ['default'],
+ 'StorageNetCidr': ['default'],
+ 'StorageAllocationPools': ['default'],
+ 'StorageMgmtNetCidr': ['default'],
+ 'StorageMgmtAllocationPools': ['default'],
+ }
def exit_usage():
return 0
-def validate(filename):
+def validate(filename, param_map):
+ """Validate a Heat template
+
+ :param filename: The path to the file to validate
+ :param param_map: A dict which will be populated with the details of the
+ parameters in the template. The dict will have the
+ following structure:
+
+ {'ParameterName': [
+ {'filename': ./file1.yaml,
+ 'data': {'description': '',
+ 'type': string,
+ 'default': '',
+ ...}
+ },
+ {'filename': ./file2.yaml,
+ 'data': {'description': '',
+ 'type': string,
+ 'default': '',
+ ...}
+ },
+ ...
+ ]}
+ """
print('Validating %s' % filename)
retval = 0
try:
# qdr aliases rabbitmq service to provide alternative messaging backend
if (filename.startswith('./puppet/services/') and
- filename not in ['./puppet/services/services.yaml',
- './puppet/services/qdr.yaml']):
+ filename not in ['./puppet/services/qdr.yaml']):
retval = validate_service(filename, tpl)
- if (filename.startswith('./docker/services/') and
- filename != './docker/services/services.yaml'):
+ if filename.startswith('./docker/services/'):
retval = validate_docker_service(filename, tpl)
if filename.endswith('hyperconverged-ceph.yaml'):
return 1
# yaml is OK, now walk the parameters and output a warning for unused ones
if 'heat_template_version' in tpl:
- for p in tpl.get('parameters', {}):
+ for p, data in tpl.get('parameters', {}).items():
+ definition = {'data': data, 'filename': filename}
+ param_map.setdefault(p, []).append(definition)
if p in required_params:
continue
str_p = '\'%s\'' % p
failed_files = []
base_endpoint_map = None
env_endpoint_maps = list()
+param_map = {}
for base_path in path_args:
if os.path.isdir(base_path):
for f in files:
if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
file_path = os.path.join(subdir, f)
- failed = validate(file_path)
+ failed = validate(file_path, param_map)
if failed:
failed_files.append(file_path)
exit_val |= failed
if env_endpoint_map:
env_endpoint_maps.append(env_endpoint_map)
elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
- failed = validate(base_path)
+ failed = validate(base_path, param_map)
if failed:
failed_files.append(base_path)
exit_val |= failed
else:
print("%s matches base endpoint map" % env_endpoint_map['file'])
else:
- print("ERROR: Can't validate endpoint maps since a file is missing. "
- "If you meant to delete one of these files you should update this "
- "tool as well.")
+ print("ERROR: Did not find expected number of environments containing the "
+ "EndpointMap parameter. If you meant to add or remove one of these "
+ "environments then you also need to update this tool.")
if not base_endpoint_map:
failed_files.append(ENDPOINT_MAP_FILE)
if len(env_endpoint_maps) != len(envs_containing_endpoint_map):
failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
exit_val |= 1
+# Validate that duplicate parameters defined in multiple files all have the
+# same definition.
+mismatch_count = 0
+for p, defs in param_map.items():
+ # Nothing to validate if the parameter is only defined once
+ if len(defs) == 1:
+ continue
+ check_data = [d['data'] for d in defs]
+ # Override excluded fields so they don't affect the result
+ exclusions = PARAMETER_DEFINITION_EXCLUSIONS.get(p, [])
+ ex_dict = {}
+ for field in exclusions:
+ ex_dict[field] = 'IGNORED'
+ for d in check_data:
+ d.update(ex_dict)
+ # If all items in the list are not == the first, then the check fails
+ if check_data.count(check_data[0]) != len(check_data):
+ mismatch_count += 1
+ # TODO(bnemec): Make this a hard failure once all the templates have
+ # been fixed.
+ #exit_val |= 1
+ #failed_files.extend([d['filename'] for d in defs])
+ print('Mismatched parameter definitions found for "%s"' % p)
+ print('Definitions found:')
+ for d in defs:
+ print(' %s:\n %s' % (d['filename'], d['data']))
+print('Mismatched parameter definitions: %d' % mismatch_count)
+
if failed_files:
print('Validation failed on:')
for f in failed_files:
[tox]
minversion = 1.6
skipsdist = True
+envlist = py35,py27,pep8
[testenv]
usedevelop = True
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
+commands = python setup.py testr --slowest --testr-args='{posargs}'
[testenv:venv]
commands = {posargs}
[testenv:releasenotes]
commands = bash -c tools/releasenotes_tox.sh
+
+[testenv:cover]
+commands = python setup.py test --coverage --coverage-package-name=tripleo_heat_templates --testr-args='{posargs}'
+
+[testenv:genconfig]
+commands =
+ python ./tools/process-templates.py
+ python ./tripleo_heat_templates/environment_generator.py sample-env-generator/
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import os
+import sys
+import yaml
+
+
+_PARAM_FORMAT = u""" # %(description)s
+ %(mandatory)s# Type: %(type)s
+ %(name)s:%(default)s
+"""
+_STATIC_MESSAGE_START = (
+ ' # ******************************************************\n'
+ ' # Static parameters - these are values that must be\n'
+ ' # included in the environment but should not be changed.\n'
+ ' # ******************************************************\n'
+ )
+_STATIC_MESSAGE_END = (' # *********************\n'
+ ' # End static parameters\n'
+ ' # *********************\n'
+ )
+_FILE_HEADER = (
+ '# *******************************************************************\n'
+ '# This file was created automatically by the sample environment\n'
+ '# generator. Developers should use `tox -e genconfig` to update it.\n'
+ '# Users are recommended to make changes to a copy of the file instead\n'
+ '# of the original, if any customizations are needed.\n'
+ '# *******************************************************************\n'
+ )
+# Certain parameter names can't be changed, but shouldn't be shown because
+# they are never intended for direct user input.
+_PRIVATE_OVERRIDES = ['server', 'servers', 'NodeIndex', 'DefaultPasswords']
+# Hidden params are not included by default when the 'all' option is used,
+# but can be explicitly included by referencing them in sample_defaults or
+# static. This allows us to generate sample environments using them when
+# necessary, but they won't be improperly included by accident.
+_HIDDEN_PARAMS = ['EndpointMap', 'RoleName', 'RoleParameters',
+ 'ServiceNetMap',
+ ]
+
+
+def _create_output_dir(target_file):
+ try:
+ os.makedirs(os.path.dirname(target_file))
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+
+def _generate_environment(input_env, parent_env=None):
+ if parent_env is None:
+ parent_env = {}
+ env = dict(parent_env)
+ env.pop('children', None)
+ env.update(input_env)
+ parameter_defaults = {}
+ param_names = []
+ sample_values = env.get('sample_values', {})
+ static_names = env.get('static', [])
+ for template_file, template_data in env['files'].items():
+ with open(template_file) as f:
+ f_data = yaml.safe_load(f)
+ f_params = f_data['parameters']
+ parameter_defaults.update(f_params)
+ if template_data['parameters'] == 'all':
+ new_names = [k for k, v in f_params.items()]
+ for hidden in _HIDDEN_PARAMS:
+ if (hidden not in (static_names + sample_values.keys()) and
+ hidden in new_names):
+ new_names.remove(hidden)
+ else:
+ new_names = template_data['parameters']
+ missing_params = [name for name in new_names
+ if name not in f_params]
+ if missing_params:
+ raise RuntimeError('Did not find specified parameter names %s '
+ 'in file %s for environment %s' %
+ (missing_params, template_file,
+ env['name']))
+ param_names += new_names
+
+ static_defaults = {k: v for k, v in parameter_defaults.items()
+ if k in param_names and
+ k in static_names
+ }
+ parameter_defaults = {k: v for k, v in parameter_defaults.items()
+ if k in param_names and
+ k not in _PRIVATE_OVERRIDES and
+ not k.startswith('_') and
+ k not in static_names
+ }
+
+ for k, v in sample_values.items():
+ if k in parameter_defaults:
+ parameter_defaults[k]['sample'] = v
+ if k in static_defaults:
+ static_defaults[k]['sample'] = v
+
+ def write_sample_entry(f, name, value):
+ default = value.get('default')
+ mandatory = ''
+ if default is None:
+ mandatory = ('# Mandatory. This parameter must be set by the '
+ 'user.\n ')
+ default = '<None>'
+ if value.get('sample') is not None:
+ default = value['sample']
+ # We ultimately cast this to str for output anyway
+ default = str(default)
+ if default == '':
+ default = "''"
+ # If the default value is something like %index%, yaml won't
+ # parse the output correctly unless we wrap it in quotes.
+ # However, not all default values can be wrapped so we need to
+ # do it conditionally.
+ if default.startswith('%'):
+ default = "'%s'" % default
+ if not default.startswith('\n'):
+ default = ' ' + default
+
+ values = {'name': name,
+ 'type': value['type'],
+ 'description':
+ value.get('description', '').rstrip().replace('\n',
+ '\n # '),
+ 'default': default,
+ 'mandatory': mandatory,
+ }
+ f.write(_PARAM_FORMAT % values + '\n')
+
+ target_file = os.path.join('environments', env['name'] + '.yaml')
+ _create_output_dir(target_file)
+ with open(target_file, 'w') as env_file:
+ env_file.write(_FILE_HEADER)
+ # TODO(bnemec): Once Heat allows the title and description to live in
+ # the environment itself, uncomment these entries and make them
+ # top-level keys in the YAML.
+ env_title = env.get('title', '')
+ env_file.write(u'# title: %s\n' % env_title)
+ env_desc = env.get('description', '')
+ env_file.write(u'# description: |\n')
+ for line in env_desc.splitlines():
+ env_file.write(u'# %s\n' % line)
+
+ if parameter_defaults:
+ env_file.write(u'parameter_defaults:\n')
+ for name, value in sorted(parameter_defaults.items()):
+ write_sample_entry(env_file, name, value)
+ if static_defaults:
+ env_file.write(_STATIC_MESSAGE_START)
+ for name, value in sorted(static_defaults.items()):
+ write_sample_entry(env_file, name, value)
+ if static_defaults:
+ env_file.write(_STATIC_MESSAGE_END)
+
+ if env.get('resource_registry'):
+ env_file.write(u'resource_registry:\n')
+ for res, value in sorted(env.get('resource_registry', {}).items()):
+ env_file.write(u' %s: %s\n' % (res, value))
+ print('Wrote sample environment "%s"' % target_file)
+
+ for e in env.get('children', []):
+ _generate_environment(e, env)
+
+
+def generate_environments(config_path):
+ if os.path.isdir(config_path):
+ config_files = os.listdir(config_path)
+ config_files = [os.path.join(config_path, i) for i in config_files
+ if os.path.splitext(i)[1] == '.yaml']
+ else:
+ config_files = [config_path]
+ for config_file in config_files:
+ print('Reading environment definitions from %s' % config_file)
+ with open(config_file) as f:
+ config = yaml.safe_load(f)
+ for env in config['environments']:
+ _generate_environment(env)
+
+
+def usage(exit_code=1):
+ print('Usage: %s [<filename.yaml> | <directory>]' % sys.argv[0])
+ sys.exit(exit_code)
+
+
+def main():
+ try:
+ config_path = sys.argv[1]
+ except IndexError:
+ usage()
+ generate_environments(config_path)
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import tempfile
+
+import mock
+from oslotest import base
+import six
+import testscenarios
+
+from tripleo_heat_templates import environment_generator
+
+load_tests = testscenarios.load_tests_apply_scenarios
+
+basic_template = '''
+parameters:
+ FooParam:
+ default: foo
+ description: Foo description
+ type: string
+ BarParam:
+ default: 42
+ description: Bar description
+ type: number
+ EndpointMap:
+ default: {}
+ description: Parameter that should not be included by default
+ type: json
+resources:
+ # None
+'''
+basic_private_template = '''
+parameters:
+ FooParam:
+ default: foo
+ description: Foo description
+ type: string
+ _BarParam:
+ default: 42
+ description: Bar description
+ type: number
+resources:
+ # None
+'''
+mandatory_template = '''
+parameters:
+ FooParam:
+ description: Mandatory param
+ type: string
+resources:
+ # None
+'''
+index_template = '''
+parameters:
+ FooParam:
+ description: Param with %index% as its default
+ type: string
+ default: '%index%'
+resources:
+ # None
+'''
+multiline_template = '''
+parameters:
+ FooParam:
+ description: |
+ Parameter with
+ multi-line description
+ type: string
+ default: ''
+resources:
+ # None
+'''
+
+
+class GeneratorTestCase(base.BaseTestCase):
+ content_scenarios = [
+ ('basic',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+''',
+ }),
+ ('basic-one-param',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters:
+ - FooParam
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+''',
+ }),
+ ('basic-static-param',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ static:
+ - BarParam
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # *********************
+ # End static parameters
+ # *********************
+''',
+ }),
+ ('basic-static-param-sample',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ static:
+ - BarParam
+ sample_values:
+ BarParam: 1
+ FooParam: ''
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Foo description
+ # Type: string
+ FooParam: ''
+
+ # ******************************************************
+ # Static parameters - these are values that must be
+ # included in the environment but should not be changed.
+ # ******************************************************
+ # Bar description
+ # Type: number
+ BarParam: 1
+
+ # *********************
+ # End static parameters
+ # *********************
+''',
+ }),
+ ('basic-private',
+ {'template': basic_private_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+''',
+ }),
+ ('mandatory',
+ {'template': mandatory_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Mandatory param
+ # Mandatory. This parameter must be set by the user.
+ # Type: string
+ FooParam: <None>
+
+''',
+ }),
+ ('basic-sample',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ sample_values:
+ FooParam: baz
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # Foo description
+ # Type: string
+ FooParam: baz
+
+''',
+ }),
+ ('basic-resource-registry',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ resource_registry:
+ OS::TripleO::FakeResource: fake-filename.yaml
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+resource_registry:
+ OS::TripleO::FakeResource: fake-filename.yaml
+''',
+ }),
+ ('basic-hidden',
+ {'template': basic_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ sample_values:
+ EndpointMap: |-2
+
+ foo: bar
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Bar description
+ # Type: number
+ BarParam: 42
+
+ # Parameter that should not be included by default
+ # Type: json
+ EndpointMap:
+ foo: bar
+
+ # Foo description
+ # Type: string
+ FooParam: foo
+
+''',
+ }),
+ ('missing-param',
+ {'template': basic_template,
+ 'exception': RuntimeError,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters:
+ - SomethingNonexistent
+''',
+ 'expected_output': None,
+ }),
+ ('percent-index',
+ {'template': index_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Param with %index% as its default
+ # Type: string
+ FooParam: '%index%'
+
+''',
+ }),
+ ('nested',
+ {'template': multiline_template,
+ 'exception': None,
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+ children:
+ - name: nested
+ title: Nested Environment
+ description: Nested description
+ sample_values:
+ FooParam: bar
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Parameter with
+ # multi-line description
+ # Type: string
+ FooParam: ''
+
+''',
+ 'nested_output': '''# title: Nested Environment
+# description: |
+# Nested description
+parameter_defaults:
+ # Parameter with
+ # multi-line description
+ # Type: string
+ FooParam: bar
+
+''',
+ }),
+ ('multi-line-desc',
+ {'template': multiline_template,
+ 'exception': None,
+ 'nested_output': '',
+ 'input_file': '''environments:
+ -
+ name: basic
+ title: Basic Environment
+ description: Basic description
+ files:
+ foo.yaml:
+ parameters: all
+''',
+ 'expected_output': '''# title: Basic Environment
+# description: |
+# Basic description
+parameter_defaults:
+ # Parameter with
+ # multi-line description
+ # Type: string
+ FooParam: ''
+
+''',
+ }),
+ ]
+
+ @classmethod
+ def generate_scenarios(cls):
+ cls.scenarios = testscenarios.multiply_scenarios(
+ cls.content_scenarios)
+
+ def test_generator(self):
+ fake_input = io.StringIO(six.text_type(self.input_file))
+ fake_template = io.StringIO(six.text_type(self.template))
+ _, fake_output_path = tempfile.mkstemp()
+ fake_output = open(fake_output_path, 'w')
+ with mock.patch('tripleo_heat_templates.environment_generator.open',
+ create=True) as mock_open:
+ mock_se = [fake_input, fake_template, fake_output]
+ if self.nested_output:
+ _, fake_nested_output_path = tempfile.mkstemp()
+ fake_nested_output = open(fake_nested_output_path, 'w')
+ fake_template2 = io.StringIO(six.text_type(self.template))
+ mock_se = [fake_input, fake_template, fake_output,
+ fake_template2, fake_nested_output]
+ mock_open.side_effect = mock_se
+ if not self.exception:
+ environment_generator.generate_environments('ignored.yaml')
+ else:
+ self.assertRaises(self.exception,
+ environment_generator.generate_environments,
+ 'ignored.yaml')
+ return
+ expected = environment_generator._FILE_HEADER + self.expected_output
+ with open(fake_output_path) as f:
+ self.assertEqual(expected, f.read())
+ if self.nested_output:
+ with open(fake_nested_output_path) as f:
+ expected = (environment_generator._FILE_HEADER +
+ self.nested_output)
+ self.assertEqual(expected, f.read())
+
+GeneratorTestCase.generate_scenarios()