--- /dev/null
+parameter_defaults:
+ CephPoolDefaultSize: 1
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
- OS::TripleO::Services::BarbicanApi: ../../puppet/services/barbican-api.yaml
- OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
- OS::TripleO::Services::Ec2Api: ../../puppet/services/ec2-api.yaml
+ # TODO: Barbican is not yet containerized: https://review.openstack.org/#/c/474327
+ # OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
+ OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
+ OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
- OS::TripleO::Services::SaharaApi: ../../puppet/services/sahara-api.yaml
- OS::TripleO::Services::SaharaEngine: ../../puppet/services/sahara-engine.yaml
- OS::TripleO::Services::MistralApi: ../../puppet/services/mistral-api.yaml
- OS::TripleO::Services::MistralEngine: ../../puppet/services/mistral-engine.yaml
- OS::TripleO::Services::MistralExecutor: ../../puppet/services/mistral-executor.yaml
+ OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+ OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
+ OS::TripleO::Services::MistralApi: ../../docker/services/mistral-api.yaml
+ OS::TripleO::Services::MistralEngine: ../../docker/services/mistral-engine.yaml
+ OS::TripleO::Services::MistralExecutor: ../../docker/services/mistral-executor.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/
OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml
OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
- OS::TripleO::Services::ManilaApi: ../../puppet/services/manila-api.yaml
- OS::TripleO::Services::ManilaScheduler: ../../puppet/services/manila-scheduler.yaml
+ OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+ OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
+ # NOTE: being containerized here: https://review.openstack.org/#/c/471527/
OS::TripleO::Services::ManilaShare: ../../puppet/services/manila-share.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
+ # TODO: containerize NeutronBgpVpnApi
OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
Command or script snippet to run on all overcloud nodes to
initialize the upgrade process. E.g. a repository switch.
default: ''
+ deployment_swift_data:
+ type: json
+ default: {}
resources:
deployed-server:
properties:
name: {get_param: name}
software_config_transport: {get_param: software_config_transport}
+ deployment_swift_data: {get_param: deployment_swift_data}
UpgradeInitConfig:
type: OS::Heat::SoftwareConfig
- {get_attr: [ControlPlanePort, fixed_ips, 0, ip_address]}
name:
value: {get_attr: [HostsEntryDeployment, hostname]}
+ os_collect_config:
+ value: {get_attr: [deployed-server, os_collect_config]}
shell: python /var/lib/docker-puppet/docker-puppet.py
environment:
NET_HOST: 'true'
+ DEBUG: '{{docker_puppet_debug}}'
when: step == "1"
changed_when: false
check_mode: no
import multiprocessing
log = logging.getLogger()
-log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
-ch.setLevel(logging.DEBUG)
+if os.environ.get('DEBUG', False):
+ log.setLevel(logging.DEBUG)
+ ch.setLevel(logging.DEBUG)
+else:
+ log.setLevel(logging.INFO)
+ ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
if not manifest or not config_image:
continue
- log.debug('config_volume %s' % config_volume)
- log.debug('puppet_tags %s' % puppet_tags)
- log.debug('manifest %s' % manifest)
- log.debug('config_image %s' % config_image)
- log.debug('volumes %s' % volumes)
+ log.info('config_volume %s' % config_volume)
+ log.info('puppet_tags %s' % puppet_tags)
+ log.info('manifest %s' % manifest)
+ log.info('config_image %s' % config_image)
+ log.info('volumes %s' % volumes)
# We key off of config volume for all configs.
if config_volume in configs:
# Append puppet tags and manifest.
subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
cmd_stdout, cmd_stderr = subproc.communicate()
- if cmd_stdout:
- log.debug(cmd_stdout)
- if cmd_stderr:
- log.debug(cmd_stderr)
if subproc.returncode != 0:
log.error('Failed running docker-puppet.py for %s' % config_volume)
+ if cmd_stdout:
+ log.error(cmd_stdout)
+ if cmd_stderr:
+ log.error(cmd_stderr)
else:
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr:
+ log.debug(cmd_stderr)
# only delete successful runs, for debugging
rm_container('docker-puppet-%s' % config_volume)
return subproc.returncode
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ DockerPuppetDebug:
+ type: string
+ default: ''
+ description: Set to True to enable debug logging with docker-puppet.py
+ ctlplane_service_ips:
+ type: json
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
resources:
- name: role_name
- name: update_identifier
- name: bootstrap_server_id
+ - name: docker_puppet_debug
config: {get_file: deploy-steps-playbook.yaml}
+{%- for step in range(1, deploy_steps_max) %}
+# BEGIN service_workflow_tasks handling
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on:
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
+# END service_workflow_tasks handling
+{% endfor %}
+
{% for role in roles %}
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
- {% for dep in roles %}
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
- {% endfor %}
- {% endif %}
+ {% endfor %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
role_name: {{role.name}}
update_identifier: {get_param: DeployIdentifier}
bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+ docker_puppet_debug: {get_param: DockerPuppetDebug}
{% endfor %}
# END CONFIG STEPS
DockerNamespace:
type: string
default: tripleoupstream
+ description: namespace
DockerNamespaceIsRegistry:
type: boolean
default: false
description: image
default: 'centos-binary-aodh-api:latest'
type: string
+ DockerAodhConfigImage:
+ description: The container image to use for the aodh config_volume
+ default: 'centos-binary-aodh-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: aodh
puppet_tags: aodh_api_paste_ini,aodh_config
step_config: *step_config
- config_image: &aodh_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/aodh_api.json:
command: /usr/sbin/httpd -DFOREGROUND
# db sync runs before permissions set by kolla_config
step_2:
aodh_init_log:
- image: *aodh_image
+ image: &aodh_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhApiImage} ]
user: root
volumes:
- /var/log/containers/aodh:/var/log/aodh
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
step_3:
aodh_db_sync:
- image: *aodh_image
+ image: *aodh_api_image
net: host
privileged: false
detach: false
command: "/usr/bin/bootstrap_host_exec aodh_api su aodh -s /bin/bash -c /usr/bin/aodh-dbsync"
step_4:
aodh_api:
- image: *aodh_image
+ image: *aodh_api_image
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-aodh-evaluator:latest'
type: string
+ DockerAodhConfigImage:
+ description: The container image to use for the aodh config_volume
+ default: 'centos-binary-aodh-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: aodh
puppet_tags: aodh_config
step_config: *step_config
- config_image: &aodh_evaluator_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/aodh_evaluator.json:
command: /usr/bin/aodh-evaluator
docker_config:
step_4:
aodh_evaluator:
- image: *aodh_evaluator_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhEvaluatorImage} ]
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-aodh-listener:latest'
type: string
+ DockerAodhConfigImage:
+ description: The container image to use for the aodh config_volume
+ default: 'centos-binary-aodh-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: aodh
puppet_tags: aodh_config
step_config: *step_config
- config_image: &aodh_listener_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/aodh_listener.json:
command: /usr/bin/aodh-listener
docker_config:
step_4:
aodh_listener:
- image: *aodh_listener_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhListenerImage} ]
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-aodh-notifier:latest'
type: string
+ DockerAodhConfigImage:
+ description: The container image to use for the aodh config_volume
+ default: 'centos-binary-aodh-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: aodh
puppet_tags: aodh_config
step_config: *step_config
- config_image: &aodh_notifier_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/aodh_notifier.json:
command: /usr/bin/aodh-notifier
docker_config:
step_4:
aodh_notifier:
- image: *aodh_notifier_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerAodhNotifierImage} ]
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-ceilometer-central:latest'
type: string
+ DockerCeilometerConfigImage:
+ description: The container image to use for the ceilometer config_volume
+ default: 'centos-binary-ceilometer-central:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: ceilometer
puppet_tags: ceilometer_config
step_config: *step_config
- config_image: &ceilometer_agent_central_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/ceilometer_agent_central.json:
command: /usr/bin/ceilometer-polling --polling-namespaces central
step_3:
ceilometer_init_log:
start_order: 0
- image: *ceilometer_agent_central_image
+ image: &ceilometer_agent_central_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
user: root
command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
volumes:
description: image
default: 'centos-binary-ceilometer-compute:latest'
type: string
+ DockerCeilometerConfigImage:
+ description: The container image to use for the ceilometer config_volume
+ default: 'centos-binary-ceilometer-central:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: ceilometer
puppet_tags: ceilometer_config
step_config: *step_config
- config_image: &ceilometer_agent_compute_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/ceilometer_agent_compute.json:
command: /usr/bin/ceilometer-polling --polling-namespaces compute
docker_config:
step_4:
ceilometer_agent_compute:
- image: *ceilometer_agent_compute_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-ceilometer-ipmi:latest'
type: string
+ DockerCeilometerConfigImage:
+ description: The container image to use for the ceilometer config_volume
+ default: 'centos-binary-ceilometer-central:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: ceilometer
puppet_tags: ceilometer_config
step_config: *step_config
- config_image: &ceilometer_agent_ipmi_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerIpmiImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/ceilometer-agent-ipmi.json:
command: /usr/bin/ceilometer-polling --polling-namespaces ipmi
step_3:
ceilometer_init_log:
start_order: 0
- image: *ceilometer_agent_ipmi_image
+ image: &ceilometer_agent_ipmi_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerIpmiImage} ]
user: root
command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
volumes:
- /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_5:
- ceilometer_gnocchi_upgrade:
- start_order: 1
- image: *ceilometer_agent_ipmi_image
- net: host
- detach: false
- privileged: false
- volumes:
- list_concat:
- - {get_attr: [ContainersCommon, volumes]}
- -
- - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
- - /var/log/containers/ceilometer:/var/log/ceilometer
- command: "/usr/bin/bootstrap_host_exec ceilometer su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'"
upgrade_tasks:
- name: Stop and disable ceilometer agent ipmi service
tags: step2
description: image
default: 'centos-binary-ceilometer-notification:latest'
type: string
+ DockerCeilometerConfigImage:
+ description: The container image to use for the ceilometer config_volume
+ default: 'centos-binary-ceilometer-central:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: ceilometer
puppet_tags: ceilometer_config
step_config: *step_config
- config_image: &ceilometer_agent_notification_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/ceilometer_agent_notification.json:
command: /usr/bin/ceilometer-agent-notification
step_3:
ceilometer_init_log:
start_order: 0
- image: *ceilometer_agent_notification_image
+ image: &ceilometer_agent_notification_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
user: root
command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
volumes:
- /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_5:
- ceilometer_gnocchi_upgrade:
- start_order: 1
- image: *ceilometer_agent_notification_image
- net: host
- detach: false
- privileged: false
- volumes:
- list_concat:
- - {get_attr: [ContainersCommon, volumes]}
- -
- - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
- - /var/log/containers/ceilometer:/var/log/ceilometer
- command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
upgrade_tasks:
- name: Stop and disable ceilometer agent notification service
tags: step2
description: image
default: 'centos-binary-cinder-api:latest'
type: string
- # we configure all cinder services in the same cinder base container
DockerCinderConfigImage:
- description: image
+ description: The container image to use for the cinder config_volume
default: 'centos-binary-cinder-api:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-cinder-backup:latest'
type: string
- # we configure all cinder services in the same cinder base container
DockerCinderConfigImage:
- description: image
+ description: The container image to use for the cinder config_volume
default: 'centos-binary-cinder-api:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-cinder-scheduler:latest'
type: string
- # we configure all cinder services in the same cinder base container
DockerCinderConfigImage:
- description: image
+ description: The container image to use for the cinder config_volume
default: 'centos-binary-cinder-api:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-cinder-volume:latest'
type: string
- # we configure all cinder services in the same cinder base container
DockerCinderConfigImage:
- description: image
+ description: The container image to use for the cinder config_volume
default: 'centos-binary-cinder-api:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-collectd:latest'
type: string
+ DockerCollectdConfigImage:
+ description: The container image to use for the collectd config_volume
+ default: 'centos-binary-collectd:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: Role data for the collectd role.
value:
service_name: {get_attr: [CollectdBase, role_data, service_name]}
- config_settings: {get_attr: [CollectdBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - get_attr: [CollectdBase, role_data, config_settings]
+ - tripleo::profile::base::metrics::collectd::enable_file_logging: true
+ collectd::plugin::logfile::log_file: /var/log/collectd/collectd.log
step_config: &step_config
get_attr: [CollectdBase, role_data, step_config]
service_config_settings: {get_attr: [CollectdBase, role_data, service_config_settings]}
config_volume: collectd
puppet_tags: collectd_client_config
step_config: *step_config
- config_image: &collectd_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerCollectdImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerCollectdConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/collectd.json:
command: /usr/sbin/collectd -f
+ permissions:
+ - path: /var/log/collectd
+ owner: collectd:collectd
+ recurse: true
docker_config:
step_3:
collectd:
- image: *collectd_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCollectdImage} ]
net: host
privileged: true
restart: always
-
- /var/run/docker.sock:/var/run/docker.sock:rw
- /var/lib/kolla/config_files/collectd.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/collectd/etc/collectd/:/etc/collectd/:ro
+ - /var/lib/config-data/collectd/etc/collectd.conf:/etc/collectd.conf:ro
+ - /var/lib/config-data/collectd/etc/collectd.d:/etc/collectd.d:ro
+ - /var/log/containers/collectd:/var/log/collectd:rw
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/collectd
+ state: directory
upgrade_tasks:
- name: Stop and disable collectd service
tags: step2
service: name=collectd.service state=stopped enabled=no
-
default: 'centos-binary-congress-api:latest'
type: string
DockerCongressConfigImage:
- description: image
+ description: The container image to use for the congress config_volume
default: 'centos-binary-congress-api:latest'
type: string
EndpointMap:
# db sync runs before permissions set by kolla_config
step_2:
congress_init_logs:
- image: &congress_image
+ image: &congress_api_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerCongressApiImage} ]
command: ['/bin/bash', '-c', 'chown -R congress:congress /var/log/congress']
step_3:
congress_db_sync:
- image: *congress_image
+ image: *congress_api_image
net: host
privileged: false
detach: false
step_4:
congress_api:
start_order: 15
- image: *congress_image
+ image: *congress_api_image
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-mongodb:latest'
type: string
+ DockerMongodbConfigImage:
+ description: The container image to use for the mongodb config_volume
+ default: 'centos-binary-mongodb:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: mongodb
puppet_tags: file # set this even though file is the default
step_config: *step_config
- config_image: &mongodb_image
+ config_image: &mongodb_config_image
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerMongodbConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/mongodb.json:
command: /usr/bin/mongod --unixSocketPrefix=/var/run/mongodb --config /etc/mongod.conf run
docker_config:
step_2:
mongodb:
- image: *mongodb_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMongodbImage} ]
net: host
privileged: false
volumes: &mongodb_volumes
config_volume: 'mongodb_init_tasks'
puppet_tags: 'mongodb_database,mongodb_user,mongodb_replset'
step_config: 'include ::tripleo::profile::base::database::mongodb'
- config_image: *mongodb_image
+ config_image: *mongodb_config_image
volumes:
- /var/lib/mongodb:/var/lib/mongodb
- /var/log/containers/mongodb:/var/log/mongodb
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Configuration for containerized MySQL clients
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMysqlClientConfigImage:
+ description: The container image to use for the mysql_client config_volume
+ default: 'centos-binary-mariadb:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
+
+outputs:
+ role_data:
+ description: Role for setting mysql client parameters
+ value:
+ service_name: mysql_client
+ config_settings:
+ tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
+ tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+ tripleo::profile::base::database::mysql::client::ssl_ca: {get_param: InternalTLSCAFile}
+ # BEGIN DOCKER SETTINGS #
+ step_config: ""
+ puppet_config:
+ config_volume: mysql_client
+ puppet_tags: file # set this even though file is the default
+ step_config: "include ::tripleo::profile::base::database::mysql::client"
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlClientConfigImage} ]
+ # no need for a docker config, this service only generates configuration files
+ docker_config: {}
description: image
default: 'centos-binary-mariadb:latest'
type: string
+ DockerMysqlConfigImage:
+ description: The container image to use for the mysql config_volume
+ default: 'centos-binary-mariadb:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: mysql
puppet_tags: file # set this even though file is the default
step_config: *step_config
- config_image: &mysql_image
+ config_image: &mysql_config_image
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/mysql.json:
command: /usr/bin/mysqld_safe
# Kolla_bootstrap runs before permissions set by kolla_config
step_1:
mysql_init_logs:
- image: *mysql_image
+ image: &mysql_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlImage} ]
privileged: false
user: root
volumes:
config_volume: 'mysql_init_tasks'
puppet_tags: 'mysql_database,mysql_grant,mysql_user'
step_config: 'include ::tripleo::profile::base::database::mysql'
- config_image: *mysql_image
+ config_image: *mysql_config_image
volumes:
- /var/lib/mysql:/var/lib/mysql/:ro
- /var/log/containers/mysql:/var/log/mariadb
description: image
default: 'centos-binary-redis:latest'
type: string
+ DockerRedisConfigImage:
+ description: The container image to use for the redis config_volume
+ default: 'centos-binary-redis:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
# https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763
puppet_tags: 'exec'
step_config: *step_config
- config_image: &redis_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerRedisConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/redis.json:
command: /usr/bin/redis-server /etc/redis.conf
redis_init_logs:
start_order: 0
detach: false
- image: *redis_image
+ image: &redis_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
privileged: false
user: root
volumes:
description: image
default: 'centos-binary-ec2-api:latest'
type: string
+ DockerEc2ApiConfigImage:
+ description: The container image to use for the ec2api config_volume
+ default: 'centos-binary-ec2-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: ec2api
puppet_tags: ec2api_api_paste_ini,ec2api_config
step_config: *step_config
- config_image: &ec2_api_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/ec2_api.json:
command: /usr/bin/ec2-api
# db sync runs before permissions set by kolla_config
step_2:
ec2_api_init_logs:
- image: *ec2_api_image
+ image: &ec2_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiImage} ]
privileged: false
user: root
volumes:
description: image
default: 'centos-binary-etcd:latest'
type: string
+ DockerEtcdConfigImage:
+ description: The container image to use for the etcd config_volume
+ default: 'centos-binary-etcd:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
puppet_config:
config_volume: etcd
step_config: *step_config
- config_image: &etcd_image
+ config_image: &etcd_config_image
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerEtcdConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/etcd.json:
command: /usr/bin/etcd --config-file /etc/etcd/etcd.yml
docker_config:
step_2:
etcd:
- image: *etcd_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
net: host
privileged: false
restart: always
config_volume: 'etcd_init_tasks'
puppet_tags: 'etcd_key'
step_config: 'include ::tripleo::profile::base::etcd'
- config_image: *etcd_image
+ config_image: *etcd_config_image
volumes:
- /var/lib/config-data/etcd/etc/etcd/:/etc/etcd:ro
- /var/lib/etcd:/var/lib/etcd:ro
description: image
default: 'centos-binary-glance-api:latest'
type: string
+ DockerGlanceApiConfigImage:
+ description: The container image to use for the glance_api config_volume
+ default: 'centos-binary-glance-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: glance_api
puppet_tags: glance_api_config,glance_api_paste_ini,glance_swift_config,glance_cache_config
step_config: *step_config
- config_image: &glance_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/glance_api.json:
command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
# Kolla_bootstrap/db_sync runs before permissions set by kolla_config
step_2:
glance_init_logs:
- image: *glance_image
+ image: &glance_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGlanceApiImage} ]
privileged: false
user: root
volumes:
command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
step_3:
glance_api_db_sync:
- image: *glance_image
+ image: *glance_api_image
net: host
privileged: false
detach: false
map_merge:
- glance_api:
start_order: 2
- image: *glance_image
+ image: *glance_api_image
net: host
privileged: false
restart: always
- internal_tls_enabled
- glance_api_tls_proxy:
start_order: 2
- image: *glance_image
+ image: *glance_api_image
net: host
user: root
restart: always
description: image
default: 'centos-binary-gnocchi-api:latest'
type: string
+ DockerGnocchiConfigImage:
+ description: The container image to use for the gnocchi config_volume
+ default: 'centos-binary-gnocchi-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: gnocchi
puppet_tags: gnocchi_api_paste_ini,gnocchi_config
step_config: *step_config
- config_image: &gnocchi_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/gnocchi_api.json:
command: /usr/sbin/httpd -DFOREGROUND
# db sync runs before permissions set by kolla_config
step_2:
gnocchi_init_log:
- image: *gnocchi_image
+ image: &gnocchi_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiApiImage} ]
user: root
volumes:
- /var/log/containers/gnocchi:/var/log/gnocchi
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
step_3:
gnocchi_db_sync:
- image: *gnocchi_image
+ image: *gnocchi_api_image
net: host
detach: false
privileged: false
command: "/usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c '/usr/bin/gnocchi-upgrade --skip-storage'"
step_4:
gnocchi_api:
- image: *gnocchi_image
+ image: *gnocchi_api_image
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-gnocchi-metricd:latest'
type: string
+ DockerGnocchiConfigImage:
+ description: The container image to use for the gnocchi config_volume
+ default: 'centos-binary-gnocchi-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: gnocchi
puppet_tags: gnocchi_config
step_config: *step_config
- config_image: &gnocchi_metricd_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/gnocchi_metricd.json:
command: /usr/bin/gnocchi-metricd
docker_config:
step_4:
gnocchi_metricd:
- image: *gnocchi_metricd_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiMetricdImage} ]
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-gnocchi-statsd:latest'
type: string
+ DockerGnocchiConfigImage:
+ description: The container image to use for the gnocchi config_volume
+ default: 'centos-binary-gnocchi-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: gnocchi
puppet_tags: gnocchi_config
step_config: *step_config
- config_image: &gnocchi_statsd_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/gnocchi_statsd.json:
command: /usr/bin/gnocchi-statsd
docker_config:
step_4:
gnocchi_statsd:
- image: *gnocchi_statsd_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerGnocchiStatsdImage} ]
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-haproxy:latest'
type: string
+ DockerHAProxyConfigImage:
+ description: The container image to use for the haproxy config_volume
+ default: 'centos-binary-haproxy:latest'
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
config_volume: haproxy
puppet_tags: haproxy_config
step_config: *step_config
- config_image: &haproxy_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/haproxy.json:
command: haproxy -f /etc/haproxy/haproxy.cfg
docker_config:
step_1:
haproxy:
- image: *haproxy_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
net: host
privileged: false
restart: always
default: 'centos-binary-heat-api-cfn:latest'
type: string
# puppet needs the heat-wsgi-api-cfn binary from centos-binary-heat-api-cfn
- DockerHeatConfigImage:
- description: image
+ DockerHeatApiCfnConfigImage:
+ description: The container image to use for the heat_api_cfn config_volume
default: 'centos-binary-heat-api-cfn:latest'
type: string
EndpointMap:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiCfnConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/heat_api_cfn.json:
command: /usr/sbin/httpd -DFOREGROUND
default: 'centos-binary-heat-api:latest'
type: string
# puppet needs the heat-wsgi-api binary from centos-binary-heat-api
- DockerHeatConfigImage:
- description: image
+ DockerHeatApiConfigImage:
+ description: The container image to use for the heat_api config_volume
default: 'centos-binary-heat-api:latest'
type: string
EndpointMap:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatApiConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/heat_api.json:
command: /usr/sbin/httpd -DFOREGROUND
description: image
default: 'centos-binary-heat-engine:latest'
type: string
+ DockerHeatConfigImage:
+ description: The container image to use for the heat config_volume
+ default: 'centos-binary-heat-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: heat
puppet_tags: heat_config,file,concat,file_line
step_config: *step_config
- config_image: &heat_engine_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/heat_engine.json:
command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
# db sync runs before permissions set by kolla_config
step_2:
heat_init_log:
- image: *heat_engine_image
+ image: &heat_engine_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHeatEngineImage} ]
user: root
volumes:
- /var/log/containers/heat:/var/log/heat
description: image
default: 'centos-binary-horizon:latest'
type: string
+ DockerHorizonConfigImage:
+ description: The container image to use for the horizon config_volume
+ default: 'centos-binary-horizon:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: horizon
puppet_tags: horizon_config
step_config: {get_attr: [HorizonBase, role_data, step_config]}
- config_image: &horizon_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerHorizonImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerHorizonConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/horizon.json:
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
step_2:
horizon_fix_perms:
- image: *horizon_image
+ image: &horizon_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHorizonImage} ]
user: root
# NOTE Set ownership for /var/log/horizon/horizon.log file here,
# otherwise it's created by root when generating django cache.
default: 'centos-binary-ironic-api:latest'
type: string
DockerIronicConfigImage:
- description: image
+ description: The container image to use for the ironic config_volume
default: 'centos-binary-ironic-pxe:latest'
type: string
EndpointMap:
config_settings:
map_merge:
- get_attr: [IronicApiBase, role_data, config_settings]
+ - apache::default_vhost: false
step_config: &step_config
get_attr: [IronicApiBase, role_data, step_config]
service_config_settings: {get_attr: [IronicApiBase, role_data, service_config_settings]}
- [ {get_param: DockerNamespace}, {get_param: DockerIronicConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/ironic_api.json:
- command: /usr/bin/ironic-api
+ command: /usr/sbin/httpd -DFOREGROUND
permissions:
- path: /var/log/ironic
owner: ironic:ironic
# db sync runs before permissions set by kolla_config
step_2:
ironic_init_logs:
- image: &ironic_image
+ image: &ironic_api_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
step_3:
ironic_db_sync:
start_order: 1
- image: *ironic_image
+ image: *ironic_api_image
net: host
privileged: false
detach: false
step_4:
ironic_api:
start_order: 10
- image: *ironic_image
+ image: *ironic_api_image
net: host
- privileged: false
+ user: root
restart: always
volumes:
list_concat:
-
- /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/ironic/etc/ironic:/etc/ironic:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf/:/etc/httpd/conf/:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf.d/:/etc/httpd/conf.d/:ro
+ - /var/lib/config-data/ironic/etc/httpd/conf.modules.d/:/etc/httpd/conf.modules.d/:ro
+ - /var/lib/config-data/ironic/var/www/:/var/www/:ro
- /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
default: 'centos-binary-ironic-conductor:latest'
type: string
DockerIronicConfigImage:
- description: image
+ description: The container image to use for the ironic config_volume
default: 'centos-binary-ironic-pxe:latest'
type: string
EndpointMap:
default: 'centos-binary-ironic-pxe:latest'
type: string
DockerIronicConfigImage:
- description: image
+ description: The container image to use for the ironic config_volume
default: 'centos-binary-ironic-pxe:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-iscsid:latest'
type: string
+ DockerIscsidConfigImage:
+ description: The container image to use for the iscsid config_volume
+ default: 'centos-binary-iscsid:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: iscsid
#puppet_tags: file
step_config: ''
- config_image: &iscsid_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerIscsidImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerIscsidConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/iscsid.json:
command: /usr/sbin/iscsid -f
step_3:
iscsid:
start_order: 2
- image: *iscsid_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerIscsidImage} ]
net: host
privileged: true
restart: always
description: image
default: 'centos-binary-keystone:latest'
type: string
+ DockerKeystoneConfigImage:
+ description: The container image to use for the keystone config_volume
+ default: 'centos-binary-keystone:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: keystone
puppet_tags: keystone_config
step_config: *step_config
- config_image: &keystone_image
+ config_image: &keystone_config_image
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/keystone.json:
command: /usr/sbin/httpd -DFOREGROUND
# Kolla_bootstrap/db sync runs before permissions set by kolla_config
step_2:
keystone_init_log:
- image: *keystone_image
+ image: &keystone_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerKeystoneImage} ]
user: root
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
volumes:
config_volume: 'keystone_init_tasks'
puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
step_config: 'include ::tripleo::profile::base::keystone'
- config_image: *keystone_image
+ config_image: *keystone_config_image
host_prep_tasks:
- name: create persistent logs directory
file:
default: 'centos-binary-manila-api:latest'
type: string
DockerManilaConfigImage:
- description: image
- default: 'centos-binary-manila-base:latest'
+ description: The container image to use for the manila config_volume
+ default: 'centos-binary-manila-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
resources:
+ ContainersCommon:
+ type: ./containers-common.yaml
+
ManilaApiPuppetBase:
type: ../../puppet/services/manila-api.yaml
properties:
owner: manila:manila
recurse: true
docker_config:
- step_3:
- manila_api_db_sync:
- user: root
+ step_2:
+ manila_init_logs:
image: &manila_api_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerManilaApiImage} ]
+ user: root
+ volumes:
+ - /var/log/containers/manila:/var/log/manila
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R manila:manila /var/log/manila']
+ step_3:
+ manila_api_db_sync:
+ user: root
+ image: *manila_api_image
net: host
detach: false
volumes:
- - /var/lib/config-data/manila/etc/manila/:/etc/manila:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - logs:/var/log
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+ - /var/log/containers/manila:/var/log/manila
command: "/usr/bin/bootstrap_host_exec manila_api su manila -s /bin/bash -c '/usr/bin/manila-manage db sync'"
step_4:
manila_api:
net: host
restart: always
volumes:
- - /var/lib/kolla/config_files/manila_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- - /var/log/containers/manila:/var/log/manila
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/manila_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+ - /var/log/containers/manila:/var/log/manila
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
default: 'centos-binary-manila-scheduler:latest'
type: string
DockerManilaConfigImage:
- description: image
+ description: The container image to use for the manila config_volume
default: 'centos-binary-manila-api:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-memcached:latest'
type: string
+ DockerMemcachedConfigImage:
+ description: The container image to use for the memcached config_volume
+ default: 'centos-binary-memcached:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: 'memcached'
puppet_tags: 'file'
step_config: *step_config
- config_image: &memcached_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedConfigImage} ]
kolla_config: {}
docker_config:
step_1:
memcached_init_logs:
start_order: 0
detach: false
- image: *memcached_image
+ image: &memcached_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMemcachedImage} ]
privileged: false
user: root
volumes:
default: 'centos-binary-mistral-api:latest'
type: string
DockerMistralConfigImage:
- description: image
+ description: The container image to use for the mistral config_volume
default: 'centos-binary-mistral-api:latest'
type: string
EndpointMap:
# db sync runs before permissions set by kolla_config
step_2:
mistral_init_logs:
- image: &mistral_image
+ image: &mistral_api_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
step_3:
mistral_db_sync:
start_order: 0
- image: *mistral_image
+ image: *mistral_api_image
net: host
privileged: false
detach: false
command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head'"
mistral_db_populate:
start_order: 1
- image: *mistral_image
+ image: *mistral_api_image
net: host
privileged: false
detach: false
step_4:
mistral_api:
start_order: 15
- image: *mistral_image
+ image: *mistral_api_image
net: host
privileged: false
restart: always
default: 'centos-binary-mistral-engine:latest'
type: string
DockerMistralConfigImage:
- description: image
+ description: The container image to use for the mistral config_volume
default: 'centos-binary-mistral-api:latest'
type: string
EndpointMap:
default: 'centos-binary-mistral-executor:latest'
type: string
DockerMistralConfigImage:
- description: image
+ description: The container image to use for the mistral config_volume
default: 'centos-binary-mistral-api:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-multipathd:latest'
type: string
+ DockerMultipathdConfigImage:
+ description: The container image to use for the multipathd config_volume
+ default: 'centos-binary-multipathd:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: multipathd
#puppet_tags: file
step_config: ''
- config_image: &multipathd_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/multipathd.json:
command: /usr/sbin/multipathd -d
step_3:
multipathd:
start_order: 1
- image: *multipathd_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdImage} ]
net: host
privileged: true
restart: always
description: image
default: 'centos-binary-neutron-server:latest'
type: string
- # we configure all neutron services in the same neutron
DockerNeutronConfigImage:
- description: image
+ description: The container image to use for the neutron config_volume
default: 'centos-binary-neutron-server:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-neutron-dhcp-agent:latest'
type: string
- # we configure all neutron services in the same neutron
DockerNeutronConfigImage:
- description: image
+ description: The container image to use for the neutron config_volume
default: 'centos-binary-neutron-server:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-neutron-l3-agent:latest'
type: string
- # we configure all neutron services in the same neutron
DockerNeutronConfigImage:
- description: image
+ description: The container image to use for the neutron config_volume
default: 'centos-binary-neutron-server:latest'
type: string
ServiceNetMap:
description: image
default: 'centos-binary-neutron-metadata-agent:latest'
type: string
- # we configure all neutron services in the same neutron
DockerNeutronConfigImage:
- description: image
+ description: The container image to use for the neutron config_volume
default: 'centos-binary-neutron-server:latest'
type: string
ServiceNetMap:
default: 'centos-binary-neutron-openvswitch-agent:latest'
type: string
DockerNeutronConfigImage:
- description: image
+ description: The container image to use for the neutron config_volume
default: 'centos-binary-neutron-server:latest'
type: string
ServiceNetMap:
docker_config:
step_4:
neutron_ovs_agent:
- image: &neutron_ovs_agent_image
+ image:
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
default: 'tripleoupstream'
type: string
DockerNeutronConfigImage:
- description: image
+ description: The container image to use for the neutron config_volume
default: 'centos-binary-neutron-server:latest'
type: string
DefaultPasswords:
default: 'centos-binary-nova-api:latest'
type: string
DockerNovaConfigImage:
- description: image
+ description: The container image to use for the nova config_volume
default: 'centos-binary-nova-base:latest'
type: string
EndpointMap:
map_merge:
- get_attr: [NovaApiBase, role_data, config_settings]
- apache::default_vhost: false
+ nova_wsgi_enabled: false
+ nova::api::service_name: '%{::nova::params::api_service_name}'
+ nova::wsgi::apache_api::ssl: false
step_config: &step_config
list_join:
- "\n"
description: image
default: 'centos-binary-nova-compute:latest'
type: string
+ DockerNovaLibvirtConfigImage:
+ description: The container image to use for the nova_libvirt config_volume
+ default: 'centos-binary-nova-compute:latest'
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
config_volume: nova_libvirt
puppet_tags: nova_config,nova_paste_api_ini
step_config: *step_config
- config_image: &nova_compute_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_compute.json:
command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
# FIXME: run discover hosts here
step_4:
nova_compute:
- image: *nova_compute_image
+ image: &nova_compute_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
net: host
privileged: true
user: nova
default: 'centos-binary-nova-conductor:latest'
type: string
DockerNovaConfigImage:
- description: image
+ description: The container image to use for the nova config_volume
default: 'centos-binary-nova-base:latest'
type: string
EndpointMap:
docker_config:
step_4:
nova_conductor:
- image: &nova_conductor_image
+ image:
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNovaConductorImage} ]
default: 'centos-binary-nova-consoleauth:latest'
type: string
DockerNovaConfigImage:
- description: image
+ description: The container image to use for the nova config_volume
default: 'centos-binary-nova-base:latest'
type: string
EndpointMap:
description: namespace
default: 'tripleoupstream'
type: string
- DockerNovaComputeImage:
+ DockerNovaComputeIronicImage:
description: image
default: 'centos-binary-nova-compute-ironic:latest'
type: string
DockerNovaConfigImage:
- description: image
+ description: The container image to use for the nova config_volume
default: 'centos-binary-nova-base:latest'
type: string
ServiceNetMap:
image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaComputeIronicImage} ]
net: host
privileged: true
user: root
description: namespace
default: 'tripleoupstream'
type: string
- DockerLibvirtImage:
+ DockerNovaLibvirtImage:
description: image
default: 'centos-binary-nova-libvirt:latest'
type: string
# we configure libvirt via the nova-compute container due to coupling
# in the puppet modules
- DockerNovaConfigImage:
- description: image
+ DockerNovaLibvirtConfigImage:
+ description: The container image to use for the nova_libvirt config_volume
default: 'centos-binary-nova-compute:latest'
type: string
EnablePackageInstall:
step_config: *step_config
config_image:
list_join:
- - '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_libvirt.json:
command:
image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerLibvirtImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaLibvirtImage} ]
net: host
pid: host
privileged: true
description: image
default: 'centos-binary-nova-placement-api:latest'
type: string
+ DockerNovaPlacementConfigImage:
+ description: The container image to use for the nova_placement config_volume
+ default: 'centos-binary-nova-placement-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: nova_placement
puppet_tags: nova_config
step_config: *step_config
- config_image: &nova_placement_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_placement.json:
command: /usr/sbin/httpd -DFOREGROUND
step_3:
nova_placement:
start_order: 1
- image: *nova_placement_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNovaPlacementImage} ]
net: host
user: root
restart: always
default: 'centos-binary-nova-scheduler:latest'
type: string
DockerNovaConfigImage:
- description: image
+ description: The container image to use for the nova config_volume
default: 'centos-binary-nova-base:latest'
type: string
EndpointMap:
default: 'centos-binary-nova-novncproxy:latest'
type: string
DockerNovaConfigImage:
- description: image
+ description: The container image to use for the nova config_volume
default: 'centos-binary-nova-base:latest'
type: string
EndpointMap:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Octavia service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerOctaviaApiImage:
+ description: image
+ default: 'centos-binary-octavia-api:latest'
+ type: string
+ DockerOctaviaConfigImage:
+ description: The container image to use for the octavia config_volume
+ default: 'centos-binary-octavia-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ OctaviaApiPuppetBase:
+ type: ../../puppet/services/octavia-api.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia API role.
+ value:
+ service_name: {get_attr: [OctaviaApiPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [OctaviaApiPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [OctaviaApiPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [OctaviaApiPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: octavia
+ puppet_tags: octavia_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/octavia_api.json:
+ command: /usr/bin/octavia-api --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/api.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-api
+ /var/lib/kolla/config_files/octavia_api_tls_proxy.json:
+ command: /usr/sbin/httpd -DFOREGROUND
+ docker_config:
+ # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
+ step_2:
+ octavia_api_init_dirs:
+ start_order: 0
+ image: &octavia_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaApiImage} ]
+ user: root
+ volumes:
+ - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+ - /var/log/containers/octavia:/var/log/octavia
+ command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-api; chown -R octavia:octavia /etc/octavia/conf.d/octavia-api; chown -R octavia:octavia /var/log/octavia']
+ step_3:
+ octavia_db_sync:
+ start_order: 0
+ image: *octavia_api_image
+ net: host
+ privileged: false
+ detach: false
+ user: root
+ volumes: &octavia_volumes
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/octavia_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+ - /var/log/containers/octavia:/var/log/octavia
+ command: "/usr/bin/bootstrap_host_exec octavia_api su octavia -s /bin/bash -c '/usr/bin/octavia-db-manage upgrade head'"
+ step_4:
+ map_merge:
+ - octavia_api:
+ start_order: 2
+ image: *octavia_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *octavia_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - if:
+ - internal_tls_enabled
+ - octavia_api_tls_proxy:
+ start_order: 2
+ image: *octavia_api_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/octavia_api_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/octavia/etc/httpd/:/etc/httpd/:ro
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/octavia
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable octavia_api service
+ tags: step2
+ service: name=openstack-octavia-api state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Octavia health-manager service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerOctaviaHealthManagerImage:
+ description: image
+ default: 'centos-binary-octavia-health-manager:latest'
+ type: string
+ DockerOctaviaConfigImage:
+ description: The container image to use for the octavia config_volume
+ default: 'centos-binary-octavia-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ OctaviaHealthManagerPuppetBase:
+ type: ../../puppet/services/octavia-health-manager.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia health-manager role.
+ value:
+ service_name: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [OctaviaHealthManagerPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [OctaviaHealthManagerPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: octavia
+ puppet_tags: octavia_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/octavia_health_manager.json:
+ command: /usr/bin/octavia-health-manager --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/health-manager.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-health-manager
+ docker_config:
+ step_2:
+ octavia_health_manager_init_dirs:
+ start_order: 0
+ image: &octavia_health_manager_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaHealthManagerImage} ]
+ user: root
+ volumes:
+ - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+ command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-health-manager; chown -R octavia:octavia /etc/octavia/conf.d/octavia-health-manager']
+ step_4:
+ octavia_health_manager:
+ start_order: 2
+ image: *octavia_health_manager_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/octavia_health_manager.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+ - /var/log/containers/octavia:/var/log/octavia
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/octavia
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable octavia_health_manager service
+ tags: step2
+ service: name=openstack-octavia-health-manager state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Octavia service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerOctaviaHousekeepingImage:
+ description: image
+ default: 'centos-binary-octavia-housekeeping:latest'
+ type: string
+ DockerOctaviaConfigImage:
+ description: The container image to use for the octavia config_volume
+ default: 'centos-binary-octavia-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ OctaviaHousekeepingPuppetBase:
+ type: ../../puppet/services/octavia-housekeeping.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia housekeeping role.
+ value:
+ service_name: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [OctaviaHousekeepingPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [OctaviaHousekeepingPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: octavia
+ puppet_tags: octavia_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/octavia_housekeeping.json:
+ command: /usr/bin/octavia-housekeeping --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/housekeeping.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-housekeeping
+ docker_config:
+ step_2:
+ octavia_housekeeping_init_dirs:
+ start_order: 0
+ image: &octavia_housekeeping_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaHousekeepingImage} ]
+ user: root
+ volumes:
+ - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+ command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-housekeeping; chown -R octavia:octavia /etc/octavia/conf.d/octavia-housekeeping']
+ step_4:
+ octavia_housekeeping:
+ start_order: 2
+ image: *octavia_housekeeping_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/octavia_housekeeping.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+ - /var/log/containers/octavia:/var/log/octavia
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/octavia
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable octavia_housekeeping service
+ tags: step2
+ service: name=openstack-octavia-housekeeping state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Octavia worker service configured with Puppet
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerOctaviaWorkerImage:
+ description: image
+ default: 'centos-binary-octavia-worker:latest'
+ type: string
+ DockerOctaviaConfigImage:
+ description: The container image to use for the octavia config_volume
+ default: 'centos-binary-octavia-api:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ OctaviaWorkerPuppetBase:
+ type: ../../puppet/services/octavia-worker.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Octavia worker role.
+ value:
+ service_name: {get_attr: [OctaviaWorkerPuppetBase, role_data, service_name]}
+ config_settings: {get_attr: [OctaviaWorkerPuppetBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [OctaviaWorkerPuppetBase, role_data, step_config]
+ service_config_settings: {get_attr: [OctaviaWorkerPuppetBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: octavia
+ puppet_tags: octavia_config
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/octavia_worker.json:
+ command: /usr/bin/octavia-worker --config-file /usr/share/octavia/octavia-dist.conf --config-file /etc/octavia/octavia.conf --log-file /var/log/octavia/worker.log --config-dir /etc/octavia/conf.d/common --config-dir /etc/octavia/conf.d/octavia-worker
+ docker_config:
+ step_2:
+ octavia_worker_init_dirs:
+ start_order: 0
+ image: &octavia_worker_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOctaviaWorkerImage} ]
+ user: root
+ volumes:
+ - /var/lib/config-data/octavia/etc/octavia:/etc/octavia/
+ command: ['/bin/bash', '-c', 'mkdir -p /etc/octavia/conf.d/octavia-worker; chown -R octavia:octavia /etc/octavia/conf.d/octavia-worker']
+ step_4:
+ octavia_worker:
+ start_order: 2
+ image: *octavia_worker_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/octavia_worker.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/octavia/etc/octavia/:/etc/octavia/:ro
+ - /var/log/containers/octavia:/var/log/octavia
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/octavia
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable octavia_worker service
+ tags: step2
+ service: name=openstack-octavia-worker state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Cinder Backup service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCinderBackupImage:
+ description: image
+ default: 'centos-binary-cinder-backup:latest'
+ type: string
+ DockerCinderConfigImage:
+ description: The container image to use for the cinder config_volume
+ default: 'centos-binary-cinder-api:latest'
+ type: string
+ CinderBackupBackend:
+ default: swift
+ description: The short name of the Cinder Backup backend to use.
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'ceph']
+ CinderBackupRbdPoolName:
+ default: backups
+ type: string
+ CephClientUserName:
+ default: openstack
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+
+resources:
+
+ CinderBackupBase:
+ type: ../../../puppet/services/cinder-backup.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ CinderBackupBackend: {get_param: CinderBackupBackend}
+ CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
+ CephClientUserName: {get_param: CephClientUserName}
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Backup role.
+ value:
+ service_name: {get_attr: [CinderBackupBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [CinderBackupBase, role_data, config_settings]
+ - tripleo::profile::pacemaker::cinder::backup_bundle::cinder_backup_docker_image: &cinder_backup_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+ cinder::backup::manage_service: false
+ cinder::backup::enabled: false
+ step_config: ""
+ service_config_settings: {get_attr: [CinderBackupBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: cinder
+ puppet_tags: cinder_config,file,concat,file_line
+ step_config: {get_attr: [CinderBackupBase, role_data, step_config]}
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/cinder_backup.json:
+ command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+ permissions:
+ - path: /var/lib/cinder
+ owner: cinder:cinder
+ recurse: true
+ - path: /var/log/cinder
+ owner: cinder:cinder
+ recurse: true
+ docker_config:
+ step_3:
+ cinder_backup_init_logs:
+ start_order: 0
+ image: *cinder_backup_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/cinder:/var/log/cinder
+ command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+ step_5:
+ cinder_backup_init_bundle:
+ start_order: 1
+ detach: false
+ net: host
+ user: root
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::backup_bundle'
+ image: *cinder_backup_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/lib/cinder
+ - /var/log/containers/cinder
+ upgrade_tasks:
+ - name: Stop and disable cinder_backup service
+ tags: step2
+ service: name=openstack-cinder-backup state=stopped enabled=no
description: image
default: 'centos-binary-cinder-volume:latest'
type: string
- # we configure all cinder services in the same cinder base container
DockerCinderConfigImage:
- description: image
+ description: The container image to use for the cinder config_volume
default: 'centos-binary-cinder-api:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-mariadb:latest'
type: string
+ DockerClustercheckConfigImage:
+ description: The container image to use for the clustercheck config_volume
+ default: 'centos-binary-mariadb:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: clustercheck
puppet_tags: file # set this even though file is the default
step_config: "include ::tripleo::profile::pacemaker::clustercheck"
- config_image: &clustercheck_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/clustercheck.json:
command: /usr/sbin/xinetd -dontfork
step_2:
clustercheck:
start_order: 1
- image: *clustercheck_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckImage} ]
restart: always
net: host
volumes:
description: image
default: 'centos-binary-mariadb:latest'
type: string
+ DockerMysqlConfigImage:
+ description: The container image to use for the mysql config_volume
+ default: 'centos-binary-mariadb:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
- - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }"
- "exec {'wait-for-settle': command => '/bin/true' }"
- "include ::tripleo::profile::pacemaker::database::mysql_bundle"
- config_image: *mysql_image
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerMysqlConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/mysql.json:
command: /usr/sbin/pacemaker_remoted
description: image
default: 'centos-binary-redis:latest'
type: string
+ DockerRedisConfigImage:
+ description: The container image to use for the redis config_volume
+ default: 'centos-binary-redis:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
puppet_tags: 'exec'
step_config:
get_attr: [RedisBase, role_data, step_config]
- config_image: *redis_image
+ config_image: &redis_config_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRedisConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/redis.json:
command: /usr/sbin/pacemaker_remoted
params:
TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::redis_bundle'
- image: *redis_image
+ image: *redis_config_image
volumes:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
description: image
default: 'centos-binary-haproxy:latest'
type: string
+ DockerHAProxyConfigImage:
+ description: The container image to use for the haproxy config_volume
+ default: 'centos-binary-haproxy:latest'
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
- "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
- "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
- 'include ::tripleo::profile::pacemaker::haproxy_bundle'
- config_image: *haproxy_image
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/haproxy.json:
command: haproxy -f /etc/haproxy/haproxy.cfg
description: image
default: 'centos-binary-rabbitmq:latest'
type: string
+ DockerRabbitmqConfigImage:
+ description: The container image to use for the rabbitmq config_volume
+ default: 'centos-binary-rabbitmq:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: rabbitmq
puppet_tags: file
step_config: *step_config
- config_image: *rabbitmq_image
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/rabbitmq.json:
command: /usr/sbin/pacemaker_remoted
description: image
default: 'centos-binary-panko-api:latest'
type: string
+ DockerPankoConfigImage:
+ description: The container image to use for the panko config_volume
+ default: 'centos-binary-panko-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: panko
puppet_tags: panko_api_paste_ini,panko_config
step_config: *step_config
- config_image: &panko_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerPankoConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/panko_api.json:
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
step_2:
panko_init_log:
- image: *panko_image
+ image: &panko_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerPankoApiImage} ]
user: root
volumes:
- /var/log/containers/panko:/var/log/panko
command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
step_3:
panko_db_sync:
- image: *panko_image
+ image: *panko_api_image
net: host
detach: false
privileged: false
step_4:
panko_api:
start_order: 2
- image: *panko_image
+ image: *panko_api_image
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-rabbitmq:latest'
type: string
+ DockerRabbitmqConfigImage:
+ description: The container image to use for the rabbitmq config_volume
+ default: 'centos-binary-rabbitmq:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
puppet_config:
config_volume: rabbitmq
step_config: *step_config
- config_image: &rabbitmq_image
+ config_image: &rabbitmq_config_image
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/rabbitmq.json:
command: /usr/lib/rabbitmq/bin/rabbitmq-server
rabbitmq_init_logs:
start_order: 0
detach: false
- image: *rabbitmq_image
+ image: &rabbitmq_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRabbitmqImage} ]
privileged: false
user: root
volumes:
config_volume: 'rabbit_init_tasks'
puppet_tags: 'rabbitmq_policy,rabbitmq_user'
step_config: 'include ::tripleo::profile::base::rabbitmq'
- config_image: *rabbitmq_image
+ config_image: *rabbitmq_config_image
volumes:
- /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
- /var/lib/rabbitmq:/var/lib/rabbitmq:ro
description: image
default: 'centos-binary-sahara-api:latest'
type: string
+ DockerSaharaConfigImage:
+ description: The container image to use for the sahara config_volume
+ default: 'centos-binary-sahara-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: sahara
puppet_tags: sahara_api_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
step_config: *step_config
- config_image: &sahara_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerSaharaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/sahara-api.json:
command: /usr/bin/sahara-api --config-file /etc/sahara/sahara.conf
docker_config:
step_3:
sahara_db_sync:
- image: *sahara_image
+ image: &sahara_api_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
net: host
privileged: false
detach: false
command: "/usr/bin/bootstrap_host_exec sahara_api su sahara -s /bin/bash -c 'sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head'"
step_4:
sahara_api:
- image: *sahara_image
+ image: *sahara_api_image
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-sahara-engine:latest'
type: string
+ DockerSaharaConfigImage:
+ description: The container image to use for the sahara config_volume
+ default: 'centos-binary-sahara-api:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: sahara
puppet_tags: sahara_engine_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
step_config: *step_config
- config_image: &sahara_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerSaharaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/sahara-engine.json:
command: /usr/bin/sahara-engine --config-file /etc/sahara/sahara.conf
docker_config:
step_4:
sahara_engine:
- image: *sahara_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
net: host
privileged: false
restart: always
description: image
default: 'centos-binary-sensu-client:latest'
type: string
+ DockerSensuConfigImage:
+ description: The container image to use for the sensu config_volume
+ default: 'centos-binary-sensu-client:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: sensu
puppet_tags: sensu_rabbitmq_config,sensu_client_config,sensu_check_config,sensu_check
step_config: *step_config
- config_image: &sensu_client_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerSensuConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/sensu-client.json:
- command: /usr/bin/sensu-client -d /etc/sensu/conf.d/
+ command: /usr/bin/sensu-client -d /etc/sensu/conf.d/ -l /var/log/sensu/sensu-client.log
+ permissions:
+ - path: /var/log/sensu
+ owner: sensu:sensu
+ recurse: true
docker_config:
step_3:
sensu_client:
- image: *sensu_client_image
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
net: host
privileged: true
# NOTE(mmagr) kolla image changes the user to 'sensu', we need it
- /var/run/docker.sock:/var/run/docker.sock:rw
- /var/lib/kolla/config_files/sensu-client.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/sensu/etc/sensu/:/etc/sensu/:ro
+ - /var/log/containers/sensu:/var/log/sensu:rw
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/sensu
+ state: directory
upgrade_tasks:
- name: Stop and disable sensu-client service
tags: step2
description: image
default: 'centos-binary-swift-proxy-server:latest'
type: string
+ DockerSwiftConfigImage:
+ description: The container image to use for the swift config_volume
+ default: 'centos-binary-swift-proxy-server:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: swift
puppet_tags: swift_proxy_config
step_config: *step_config
- config_image: &swift_proxy_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/swift_proxy.json:
command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
step_4:
map_merge:
- swift_proxy:
- image: *swift_proxy_image
+ image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
net: host
user: swift
restart: always
description: namespace
default: 'tripleoupstream'
type: string
- DockerSwiftProxyImage:
- description: image
+ DockerSwiftConfigImage:
+ description: The container image to use for the swift config_volume
default: 'centos-binary-swift-proxy-server:latest'
type: string
ServiceNetMap:
config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
kolla_config: {}
docker_config: {}
description: image
default: 'centos-binary-swift-object:latest'
type: string
+ DockerSwiftConfigImage:
+ description: The container image to use for the swift config_volume
+ default: 'centos-binary-swift-proxy-server:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: swift
puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
step_config: *step_config
- config_image: &swift_proxy_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/swift_account_auditor.json:
command: /usr/bin/swift-account-auditor /etc/swift/account-server.conf
- /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_expirer:
- image: *swift_proxy_image
+ image: &swift_proxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerSwiftProxyImage} ]
net: host
user: swift
restart: always
default: 'centos-binary-tacker:latest'
type: string
DockerTackerConfigImage:
- description: image
+ description: The container image to use for the tacker config_volume
default: 'centos-binary-tacker:latest'
type: string
EndpointMap:
description: image
default: 'centos-binary-zaqar:latest'
type: string
+ DockerZaqarConfigImage:
+ description: The container image to use for the zaqar config_volume
+ default: 'centos-binary-zaqar:latest'
+ type: string
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_volume: zaqar
puppet_tags: zaqar_config
step_config: *step_config
- config_image: &zaqar_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerZaqarConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/zaqar.json:
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
step_4:
zaqar:
- image: *zaqar_image
+ image: &zaqar_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
net: host
privileged: false
restart: always
OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
+ OS::TripleO::Services::MySQLClient: ../docker/services/database/mysql-client.yaml
OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
OS::TripleO::Services::Horizon: ../docker/services/horizon.yaml
OS::TripleO::Services::Iscsid: ../docker/services/iscsid.yaml
OS::TripleO::Services::Multipathd: ../docker/services/multipathd.yaml
- OS::TripleO::Services::CinderApi: ../docker/services/cinder-api.yaml
- OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
# FIXME: Had to remove these to unblock containers CI. They should be put back when fixed.
+ # OS::TripleO::Services::CinderApi: ../docker/services/cinder-api.yaml
+ # OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
# OS::TripleO::Services::CinderBackup: ../docker/services/cinder-backup.yaml
# OS::TripleO::Services::CinderVolume: ../docker/services/cinder-volume.yaml
--- /dev/null
+# A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK
+resource_registry:
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+ OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
+ OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+
+parameter_defaults:
+ NeutronEnableForceMetadata: true
+ NeutronMechanismDrivers: 'opendaylight_v2'
+ NeutronServicePlugins: 'odl-router_v2'
+ NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+ ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+ ## This can be done using ComputeKernelArgs as shown below.
+ ComputeParameters:
+ #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+ ## due to CPU contention of DPDK PMD threads.
+ OvsEnableDpdk: True
+ ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+ #OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
+ # It is recommended to use the socket closest to the PCIe slot used for the
+ # desired DPDK NIC. Format should be comma separated per socket string such as:
+ # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+ #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+ #OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ # location to cores on socket, number of hyper-threaded logical cores, and
+ # desired number of PMD threads can all play a role in configuring this setting.
+ # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+ # If using hyperthreading then specify both logical cores that would equal the
+ # physical core. Also, specifying more than one core will trigger multiple PMD
+ # threads to be spawned, which may improve dataplane performance.
+ #NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
+ # on the same NUMA node(s) selected for previous settings.
-## A Heat environment that can be used to deploy DPDK with OVS
+# A Heat environment that can be used to deploy DPDK with OVS
+# Deploying DPDK requires enabling hugepages for the overcloud nodes
resource_registry:
OS::TripleO::Services::ComputeNeutronOvsAgent: ../puppet/services/neutron-ovs-dpdk-agent.yaml
parameter_defaults:
- ## NeutronDpdkCoreList and NeutronDpdkMemoryChannels are REQUIRED settings.
- ## Attempting to deploy DPDK without appropriate values will cause deployment to fail or lead to unstable deployments.
- #NeutronDpdkCoreList: ""
- #NeutronDpdkMemoryChannels: ""
-
NeutronDatapathType: "netdev"
NeutronVhostuserSocketDir: "/var/lib/vhost_sockets"
-
- #NeutronDpdkSocketMemory: ""
- #NeutronDpdkDriverType: "vfio-pci"
- #NovaReservedHostMemory: 4096
- #NovaVcpuPinSet: ""
-
+ NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
+ ## Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
+ ## It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
+ ## This can be done using ComputeKernelArgs as shown below.
+ #ComputeParameters:
+ #ComputeKernelArgs: "intel_iommu=on default_hugepagesz=2MB hugepagesz=2MB hugepages=2048"
+ ## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
+ ## due to CPU contention of DPDK PMD threads.
+ ## It is highly recommended to to enable isolcpus (via ComputeKernelArgs) on compute overcloud nodes and set the following parameters:
+ #OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
+ # It is recommended to use the socket closest to the PCIe slot used for the
+ # desired DPDK NIC. Format should be comma separated per socket string such as:
+ # "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
+ #OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
+ #OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ # location to cores on socket, number of hyper-threaded logical cores, and
+ # desired number of PMD threads can all play a role in configuring this setting.
+ # These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
+ # If using hyperthreading then specify both logical cores that would equal the
+ # physical core. Also, specifying more than one core will trigger multiple PMD
+ # threads to be spawned, which may improve dataplane performance.
+ #NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
+ # on the same NUMA node(s) selected for previous settings.
--- /dev/null
+# An environment which creates an Overcloud without the use of pacemaker
+# (i.e. only with keepalived and systemd for all resources)
+resource_registry:
+ OS::TripleO::Tasks::ControllerPreConfig: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostConfig: OS::Heat::None
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: OS::Heat::None
+
+ OS::TripleO::Services::CinderVolume: ../puppet/services/cinder-volume.yaml
+ OS::TripleO::Services::RabbitMQ: ../puppet/services/rabbitmq.yaml
+ OS::TripleO::Services::HAproxy: ../puppet/services/haproxy.yaml
+ OS::TripleO::Services::Redis: ../puppet/services/database/redis.yaml
+ OS::TripleO::Services::MySQL: ../puppet/services/database/mysql.yaml
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+ OS::TripleO::Services::Pacemaker: OS::Heat::None
+ OS::TripleO::Services::PacemakerRemote: OS::Heat::None
+
--- /dev/null
+resource_registry:
+ OS::TripleO::AllNodes::SoftwareConfig: OS::Heat::None
+ OS::TripleO::PostDeploySteps: OS::Heat::None
+ OS::TripleO::DefaultPasswords: OS::Heat::None
+ OS::TripleO::RandomString: OS::Heat::None
+ OS::TripleO::AllNodesDeployment: OS::Heat::None
+
+parameter_defaults:
+ # Deploy no services
+{% for role in roles %}
+ {{role.name}}Services: []
+{% endfor %}
+
+ # Consistent Hostname format
+ ControllerHostnameFormat: overcloud-controller-%index%
+ ComputeHostnameFormat: overcloud-novacompute-%index%
+ ObjectStorageHostnameFormat: overcloud-objectstorage-%index%
+ CephStorageHostnameFormat: overcloud-cephstorage-%index%
+ BlockStorageHostnameFormat: overcloud-blockstorage-%index%
--- /dev/null
+parameter_defaults:
+ # Consistent Hostname format
+ ControllerDeployedServerHostnameFormat: overcloud-controller-%index%
+ ComputeDeployedServerHostnameFormat: overcloud-novacompute-%index%
+ ObjectStorageDeployedServerHostnameFormat: overcloud-objectstorage-%index%
+ CephStorageDeployedServerHostnameFormat: overcloud-cephstorage-%index%
+ BlockStorageDeployedServerHostnameFormat: overcloud-blockstorage-%index%
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::OctaviaApi: ../../docker/services/octavia-api.yaml
+ OS::TripleO::Services::OctaviaHousekeeping: ../../docker/services/octavia-housekeeping.yaml
+ OS::TripleO::Services::OctaviaHealthManager: ../../docker/services/octavia-health-manager.yaml
+ OS::TripleO::Services::OctaviaWorker: ../../docker/services/octavia-worker.yaml
OS::TripleO::Services::IronicApi: ../../puppet/services/ironic-api.yaml
OS::TripleO::Services::IronicConductor: ../../puppet/services/ironic-conductor.yaml
OS::TripleO::Services::NovaIronic: ../../puppet/services/nova-ironic.yaml
+parameter_defaults:
+ NovaSchedulerDiscoverHostsInCellsInterval: 15
description: |
When enabled, the system will perform a yum update after performing the
RHEL Registration process.
- deployment_actions:
- default: ['CREATE', 'UPDATE']
- type: comma_delimited_list
- description: >
- List of stack actions that will trigger any deployments in this
- templates. The actions will be an empty list of the server is in the
- toplevel DeploymentServerBlacklist parameter's value.
-
-conditions:
- deployment_actions_empty:
- equals:
- - {get_param: deployment_actions}
- - []
resources:
name: RHELUnregistrationDeployment
server: {get_param: server}
config: {get_resource: RHELUnregistration}
- actions:
- if:
- - deployment_actions_empty
- - []
- - ['DELETE'] # Only do this on DELETE
+ actions: ['DELETE'] # Only do this on DELETE
input_values:
REG_METHOD: {get_param: rhel_reg_method}
name: UpdateDeploymentAfterRHELRegistration
config: {get_resource: YumUpdateConfigurationAfterRHELRegistration}
server: {get_param: server}
- actions:
- if:
- - deployment_actions_empty
- - []
- - ['CREATE'] # Only do this on CREATE
+ actions: ['CREATE'] # Only do this on CREATE
outputs:
deploy_stdout:
line: 'isolated_cores={{ _TUNED_CORES_ }}'
when: _TUNED_CORES_|default("") != ""
- - name: Tune-d provile activation
+ - name: Tune-d profile activation
shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
become: true
when: _TUNED_PROFILE_NAME_|default("") != ""
parameters:
server:
type: string
- deployment_actions:
- default: ['CREATE', 'UPDATE']
- type: comma_delimited_list
- description: >
- List of stack actions that will trigger any deployments in this
- templates. The actions will be an empty list of the server is in the
- toplevel DeploymentServerBlacklist parameter's value.
-
-conditions:
- deployment_actions_empty:
- equals:
- - {get_param: deployment_actions}
- - []
resources:
name: SomeDeployment
server: {get_param: server}
config: {get_resource: SomeConfig}
- actions:
- if:
- - deployment_actions_empty
- - []
- - ['CREATE'] # Only do this on CREATE
actions: ['CREATE'] # Only do this on CREATE
RebootConfig:
name: RebootDeployment
server: {get_param: server}
config: {get_resource: RebootConfig}
- actions:
- if:
- - deployment_actions_empty
- - []
- - ['CREATE'] # Only do this on CREATE
+ actions: ['CREATE'] # Only do this on CREATE
signal_transport: NO_SIGNAL
{{role}}HostCpusList:
type: string
default: ""
- deployment_actions:
- default: ['CREATE', 'UPDATE']
- type: comma_delimited_list
- description: >
- List of stack actions that will trigger any deployments in this
- templates. The actions will be an empty list of the server is in the
- toplevel DeploymentServerBlacklist parameter's value.
parameter_group:
- label: deprecated
equals:
- get_param: {{role}}TunedProfileName
- ""
- deployment_actions_empty:
- equals:
- - {get_param: deployment_actions}
- - []
resources:
name: HostParametersDeployment
server: {get_param: server}
config: {get_resource: HostParametersConfig}
- actions:
- if:
- - deployment_actions_empty
- - []
- - ['CREATE'] # Only do this on CREATE
+ actions: ['CREATE'] # Only do this on CREATE
input_values:
_KERNEL_ARGS_: {get_param: {{role}}KernelArgs}
_TUNED_PROFILE_NAME_: {get_param: {{role}}TunedProfileName}
name: RebootDeployment
server: {get_param: server}
config: {get_resource: RebootConfig}
- actions:
- if:
- - deployment_actions_empty
- - []
- - ['CREATE'] # Only do this on CREATE
+ actions: ['CREATE'] # Only do this on CREATE
signal_transport: NO_SIGNAL
outputs:
type: json
description: Role Specific parameters
default: {}
- deployment_actions:
- default: ['CREATE', 'UPDATE']
+ ServiceNames:
type: comma_delimited_list
+ default: []
+ IsolCpusList:
+ default: "0"
+ description: List of cores to be isolated by tuned
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]+"
+ OvsEnableDpdk:
+ default: false
+ description: Whether or not to configure enable DPDK in OVS
+ type: boolean
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
description: >
- List of stack actions that will trigger any deployments in this
- templates. The actions will be an empty list of the server is in the
- toplevel DeploymentServerBlacklist parameter's value.
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]+"
+ default: '0'
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
conditions:
is_host_config_required: {not: {equals: [{get_param: [RoleParameters, KernelArgs]}, ""]}}
- deployment_actions_empty:
- equals:
- - {get_param: deployment_actions}
- - []
+ # YAQL is enabled in conditions with https://review.openstack.org/#/c/467506/
+ is_dpdk_config_required:
+ or:
+ - yaql:
+ expression: $.data.service_names.contains('neutron_ovs_dpdk_agent')
+ data:
+ service_names: {get_param: ServiceNames}
+ - {get_param: OvsEnableDpdk}
+ - {get_param: [RoleParameters, OvsEnableDpdk]}
+ is_reboot_config_required:
+ or:
+ - is_host_config_required
+ - is_dpdk_config_required
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+ isol_cpus_empty: {equals: [{get_param: IsolCpusList}, '0']}
resources:
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - IsolCpusList: IsolCpusList
+ OvsDpdkCoreList: OvsDpdkCoreList
+ OvsDpdkMemoryChannels: OvsDpdkMemoryChannels
+ OvsDpdkSocketMemory: OvsDpdkSocketMemory
+ OvsDpdkDriverType: OvsDpdkDriverType
+ OvsPmdCoreList: OvsDpdkCoreList
+ - values: {get_param: [RoleParameters]}
+ - values:
+ IsolCpusList: {if: [isol_cpus_empty, {get_param: HostCpusList}, {get_param: IsolCpusList}]}
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
HostParametersConfig:
type: OS::Heat::SoftwareConfig
condition: is_host_config_required
name: HostParametersDeployment
server: {get_param: server}
config: {get_resource: HostParametersConfig}
- actions:
- if:
- - deployment_actions_empty
- - []
- - ['CREATE'] # Only do this on CREATE
+ actions: ['CREATE'] # Only do this on CREATE
input_values:
_KERNEL_ARGS_: {get_param: [RoleParameters, KernelArgs]}
_TUNED_PROFILE_NAME_: {get_param: [RoleParameters, TunedProfileName]}
- _TUNED_CORES_: {get_param: [RoleParameters, HostIsolatedCoreList]}
+ _TUNED_CORES_: {get_param: [RoleParameters, IsolCpusList]}
+
+ EnableDpdkConfig:
+ type: OS::Heat::SoftwareConfig
+ condition: is_dpdk_config_required
+ properties:
+ group: script
+ config:
+ str_replace:
+ template: |
+ #!/bin/bash
+ set -x
+ # DO NOT use --detailed-exitcodes
+ puppet apply --logdest console \
+ --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ -e '
+ class {"vswitch::dpdk":
+ host_core_list => "$HOST_CORES",
+ pmd_core_list => "$PMD_CORES",
+ memory_channels => "$MEMORY_CHANNELS",
+ socket_mem => "$SOCKET_MEMORY",
+ }
+ '
+ params:
+ $HOST_CORES: {get_attr: [RoleParametersValue, value, OvsDpdkCoreList]}
+ $PMD_CORES: {get_attr: [RoleParametersValue, value, OvsPmdCoreList]}
+ $MEMORY_CHANNELS: {get_attr: [RoleParametersValue, value, OvsDpdkMemoryChannels]}
+ $SOCKET_MEMORY: {get_attr: [RoleParametersValue, value, OvsDpdkSocketMemory]}
+
+ EnableDpdkDeployment:
+ type: OS::Heat::SoftwareDeployment
+ condition: is_dpdk_config_required
+ properties:
+ name: EnableDpdkDeployment
+ server: {get_param: server}
+ config: {get_resource: EnableDpdkConfig}
+ actions: ['CREATE'] # Only do this on CREATE
RebootConfig:
type: OS::Heat::SoftwareConfig
- condition: is_host_config_required
+ condition: is_reboot_config_required
properties:
group: script
config: |
RebootDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: HostParametersDeployment
- condition: is_host_config_required
+ condition: is_reboot_config_required
properties:
name: RebootDeployment
server: {get_param: server}
config: {get_resource: RebootConfig}
- actions:
- if:
- - deployment_actions_empty
- - []
- - ['CREATE'] # Only do this on CREATE
+ actions: ['CREATE'] # Only do this on CREATE
signal_transport: NO_SIGNAL
outputs:
server:
description: ID of the node to apply this config to
type: string
- deployment_actions:
- default: ['CREATE', 'UPDATE']
- type: comma_delimited_list
- description: >
- List of stack actions that will trigger any deployments in this
- templates. The actions will be an empty list of the server is in the
- toplevel DeploymentServerBlacklist parameter's value.
resources:
SshHostPubKeyConfig:
properties:
config: {get_resource: SshHostPubKeyConfig}
server: {get_param: server}
- actions: {get_param: deployment_actions}
outputs:
parameters:
BondInterfaceOvsOptions:
default: ''
- description: 'The ovs_options string for the bond interface. Set things like
-
- lacp=active and/or bond_mode=balance-slb using this option.
-
- '
+ description: The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
- description: 'The balance-tcp bond mode is known to cause packet loss and
-
+ description: The balance-tcp bond mode is known to cause packet loss and
should not be used in BondInterfaceOvsOptions.
-
- '
ControlPlaneIp:
default: ''
description: IP address/subnet on the ctlplane network
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
ExternalNetworkVlanID:
default: 10
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: bond_mode=active-backup
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: bond_mode=active-backup
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: 'The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.'
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
description: 'The balance-tcp bond mode is known to cause packet loss and
-
- should not be used in BondInterfaceOvsOptions.
-
- '
+ should not be used in BondInterfaceOvsOptions.'
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: string
BondInterfaceOvsOptions:
default: ''
- description: The ovs_options string for the bond interface. Set things like lacp=active and/or bond_mode=balance-slb using
- this option.
+ description: The ovs_options or bonding_options string for the bond
+ interface. Set things like lacp=active and/or bond_mode=balance-slb
+ for OVS bonds or like mode=4 for Linux bonds using this option.
type: string
constraints:
- allowed_pattern: ^((?!balance.tcp).)*$
- description: 'The balance-tcp bond mode is known to cause packet loss and
-
+ description: The balance-tcp bond mode is known to cause packet loss and
should not be used in BondInterfaceOvsOptions.
-
- '
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
resources:
VipPort:
- type: OS::Neutron::Port
+ type: OS::TripleO::Network::Ports::ControlPlaneVipPort
properties:
network: {get_param: ControlPlaneNetwork}
name: {get_param: PortName}
SERVICE: {get_attr: [EnabledServicesValue, value]}
- values: {get_param: ServiceNetMap}
- values: {get_attr: [NetIpMapValue, value]}
+ ctlplane_service_ips:
+ description: >
+ Map of enabled services to a list of their ctlplane IP addresses
+ value:
+ yaql:
+ expression: dict($.data.map.items().where(len($[1]) > 0))
+ data:
+ map:
+ map_merge:
+ repeat:
+ template:
+ SERVICE_ctlplane_node_ips: {get_param: ControlPlaneIpList}
+ for_each:
+ SERVICE: {get_attr: [EnabledServicesValue, value]}
service_hostnames:
description: >
Map of enabled services to a list of hostnames where they're running
}
if [ -n '$network_config' ]; then
- if [ -z "${disable_configure_safe_defaults:-''}" ]; then
+ if [ -z "${disable_configure_safe_defaults:-}" ]; then
trap configure_safe_defaults EXIT
fi
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
CongressApiNetwork: internal_api
- GlanceApiNetwork: storage
+ GlanceApiNetwork: internal_api
IronicApiNetwork: ctlplane
IronicNetwork: ctlplane
IronicInspectorNetwork: ctlplane
OS::TripleO::PostDeploySteps: puppet/post.yaml
OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
+ OS::TripleO::AllNodesDeployment: OS::Heat::StructuredDeployments
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
+ OS::TripleO::RandomString: OS::Heat::RandomString
# Tasks (for internal TripleO usage)
OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
HOST: {get_param: CloudNameStorageManagement}
HeatAuthEncryptionKey:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
PcsdPassword:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 16
HorizonSecret:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 10
servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}AllNodesDeployment:
- type: OS::Heat::StructuredDeployments
+ type: OS::TripleO::AllNodesDeployment
depends_on:
{% for role_inner in roles %}
- {{role_inner.name}}HostsDeployment
UpdateIdentifier: {get_param: UpdateIdentifier}
MysqlRootPassword:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 10
RabbitCookie:
- type: OS::Heat::RandomString
+ type: OS::TripleO::RandomString
properties:
length: 20
salt: {get_param: RabbitCookieSalt}
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
+ stack_name: {get_param: 'OS::stack_name'}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
+ ctlplane_service_ips:
+ # Note (shardy) this somewhat complex yaql may be replaced
+ # with a map_deep_merge function in ocata. It merges the
+ # list of maps, but appends to colliding lists when a service
+ # is deployed on more than one role
+ yaql:
+ expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+ data:
+ l:
+{% for role in roles %}
+ - {get_attr: [{{role.name}}IpListMap, ctlplane_service_ips]}
+{% endfor %}
role_data:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{% endfor %}
+ ServerOsCollectConfigData:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+
outputs:
ManagedEndpoints:
description: Asserts that the keystone endpoints have been provisioned.
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{% endfor %}
+ ServerOsCollectConfigData:
+ description: The os-collect-config configuration associated with each server resource
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, attributes, os_collect_config]}
+{% endfor %}
+ VipMap:
+ description: Mapping of each network to VIP addresses. Also includes the Redis VIP.
+ value:
+ map_merge:
+ - {get_attr: [VipMap, net_ip_map]}
+ - redis: {get_attr: [RedisVirtualIP, ip_address]}
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
BlockStorage:
- {get_param: BlockStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: BlockStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
properties:
server: {get_resource: BlockStorage}
RoleParameters: {get_param: RoleParameters}
- deployment_actions: {get_attr: [DeploymentActions, value]}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
NodeExtraConfig:
depends_on: NodeTLSCAData
type: OS::TripleO::NodeExtraConfig
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: BlockStorage}
- ['CREATE', 'UPDATE']
- []
- DeploymentActions:
- type: OS::Heat::Value
- properties:
- value:
- if:
- - server_not_blacklisted
- - ['CREATE', 'UPDATE']
- - []
-
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
depends_on: BlockStorageDeployment
properties:
server: {get_resource: BlockStorage}
- deployment_actions: {get_attr: [DeploymentActions, value]}
outputs:
ip_address:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [BlockStorage, os_collect_config]}
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
CephStorage:
- {get_param: CephStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: CephStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
properties:
server: {get_resource: CephStorage}
RoleParameters: {get_param: RoleParameters}
- deployment_actions: {get_attr: [DeploymentActions, value]}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
CephStorageExtraConfigPre:
depends_on: CephStorageDeployment
type: OS::TripleO::CephStorageExtraConfigPre
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: CephStorage}
NodeExtraConfig:
depends_on: [CephStorageExtraConfigPre, NodeTLSCAData]
type: OS::TripleO::NodeExtraConfig
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: CephStorage}
- ['CREATE', 'UPDATE']
- []
- DeploymentActions:
- type: OS::Heat::Value
- properties:
- value:
- if:
- - server_not_blacklisted
- - ['CREATE', 'UPDATE']
- - []
-
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
depends_on: CephStorageDeployment
properties:
server: {get_resource: CephStorage}
- deployment_actions: {get_attr: [DeploymentActions, value]}
outputs:
ip_address:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [CephStorage, os_collect_config]}
type: string
NeutronPublicInterface:
default: nic1
- description: A port to add to the NeutronPhysicalBridge.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
NodeIndex:
type: number
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
server_not_blacklisted:
not:
equals:
- {get_param: NovaComputeServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: NovaComputeSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
properties:
server: {get_resource: NovaCompute}
RoleParameters: {get_param: RoleParameters}
- deployment_actions: {get_attr: [DeploymentActions, value]}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Compute::Net::SoftwareConfig
ComputeExtraConfigPre:
depends_on: NovaComputeDeployment
type: OS::TripleO::ComputeExtraConfigPre
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: NovaCompute}
NodeExtraConfig:
depends_on: [ComputeExtraConfigPre, NodeTLSCAData]
type: OS::TripleO::NodeExtraConfig
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: NovaCompute}
update_identifier:
get_param: UpdateIdentifier
- DeploymentActions:
- type: OS::Heat::Value
- properties:
- value:
- if:
- - server_not_blacklisted
- - ['CREATE', 'UPDATE']
- - []
-
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
depends_on: NovaComputeDeployment
properties:
server: {get_resource: NovaCompute}
- deployment_actions: {get_attr: [DeploymentActions, value]}
outputs:
ip_address:
value:
{get_resource: NovaCompute}
condition: server_not_blacklisted
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [NovaCompute, os_collect_config]}
type: string
constraints:
- custom_constraint: nova.keypair
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing external networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
parameter_groups:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
-
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
- {get_param: ControllerServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ControllerSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
properties:
server: {get_resource: Controller}
RoleParameters: {get_param: RoleParameters}
- deployment_actions: {get_attr: [DeploymentActions, value]}
+ ServiceNames: {get_param: ServiceNames}
NetworkConfig:
type: OS::TripleO::Controller::Net::SoftwareConfig
- {get_param: NetworkDeploymentActions}
- []
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
# Resource for site-specific injection of root certificate
ControllerExtraConfigPre:
depends_on: ControllerDeployment
type: OS::TripleO::ControllerExtraConfigPre
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: Controller}
NodeExtraConfig:
depends_on: [ControllerExtraConfigPre, NodeTLSData]
type: OS::TripleO::NodeExtraConfig
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: Controller}
update_identifier:
get_param: UpdateIdentifier
- DeploymentActions:
- type: OS::Heat::Value
- properties:
- value:
- if:
- - server_not_blacklisted
- - ['CREATE', 'UPDATE']
- - []
-
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
depends_on: ControllerDeployment
properties:
server: {get_resource: Controller}
- deployment_actions: {get_attr: [DeploymentActions, value]}
outputs:
ip_address:
tls_cert_modulus_md5:
description: MD5 checksum of the TLS Certificate Modulus
value: {get_attr: [NodeTLSData, cert_modulus_md5]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [Controller, os_collect_config]}
for URL in $(echo $artifact_urls | sed -e "s| |\n|g" | sort -u); do
curl --globoff -o $TMP_DATA/file_data "$URL"
if file -b $TMP_DATA/file_data | grep RPM &>/dev/null; then
- yum install -y $TMP_DATA/file_data
+ mv $TMP_DATA/file_data $TMP_DATA/file_data.rpm
+ yum install -y $TMP_DATA/file_data.rpm
+ rm $TMP_DATA/file_data.rpm
elif file -b $TMP_DATA/file_data | grep 'gzip compressed data' &>/dev/null; then
pushd /
tar xvzf $TMP_DATA/file_data
popd
else
- echo "ERROR: Unsupported file format."
+ echo "ERROR: Unsupported file format: $URL"
exit 1
fi
- rm $TMP_DATA/file_data
+ if [ -f $TMP_DATA/file_data ]; then
+ rm $TMP_DATA/file_data
+ fi
done
else
echo "No artifact_urls was set. Skipping..."
parameters:
servers:
type: json
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
-
+ ctlplane_service_ips:
+ type: json
UpdateIdentifier:
type: string
description: >
{%- endfor %}
properties:
servers: {get_param: servers}
+ stack_name: {get_param: stack_name}
role_data: {get_param: role_data}
+ ctlplane_service_ips: {get_param: ctlplane_service_ips}
outputs:
# Output the config for each role, just use Step1 as the config should be
type: json
description: Role Specific Parameters
default: {}
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
+ default: {}
conditions:
server_not_blacklisted:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
- {get_param: SwiftStorageServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: ObjectStorageSchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
properties:
server: {get_resource: SwiftStorage}
RoleParameters: {get_param: RoleParameters}
- deployment_actions: {get_attr: [DeploymentActions, value]}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
NodeExtraConfig:
depends_on: NodeTLSCAData
type: OS::TripleO::NodeExtraConfig
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: SwiftStorage}
- ['CREATE', 'UPDATE']
- []
- DeploymentActions:
- type: OS::Heat::Value
- properties:
- value:
- if:
- - server_not_blacklisted
- - ['CREATE', 'UPDATE']
- - []
-
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
depends_on: SwiftStorageHieraDeploy
properties:
server: {get_resource: SwiftStorage}
- deployment_actions: {get_attr: [DeploymentActions, value]}
outputs:
ip_address:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [SwiftStorage, os_collect_config]}
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ ctlplane_service_ips:
+ type: json
-resources:
{% include 'puppet-steps.j2' %}
+{% set deploy_steps_max = 6 %}
+
+conditions:
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}_Enabled:
+ or:
+ {% for role in roles %}
+ - not:
+ equals:
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks, step{{step}}]
+ - ''
+ - False
+ {% endfor %}
+{% endfor %}
+
+resources:
# Post deployment steps for all roles
# A single config is re-applied with an incrementing step number
{% for role in roles %}
StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
# Step through a series of configuration steps
-{% for step in range(1, 6) %}
+{% for step in range(1, deploy_steps_max) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
- {% if step == 1 %}
- depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
- {% else %}
depends_on:
+ - WorkflowTasks_Step{{step}}_Execution
+ # TODO(gfidente): the following if/else condition
+ # replicates what is already defined for the
+ # WorkflowTasks_StepX resource and can be remove
+ # if https://bugs.launchpad.net/heat/+bug/1700569
+ # is fixed.
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
{% for dep in roles %}
- {{dep.name}}Deployment_Step{{step -1}}
{% endfor %}
- {% endif %}
+ {% endif %}
properties:
name: {{role.name}}Deployment_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
{% endfor %}
+
+# BEGIN service_workflow_tasks handling
+{% for step in range(1, deploy_steps_max) %}
+ WorkflowTasks_Step{{step}}:
+ type: OS::Mistral::Workflow
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on:
+ {% if step == 1 %}
+ {% for dep in roles %}
+ - {{dep.name}}PreConfig
+ - {{dep.name}}ArtifactsDeploy
+ {% endfor %}
+ {% else %}
+ {% for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step -1}}
+ {% endfor %}
+ {% endif %}
+ properties:
+ name: {list_join: [".", ["tripleo", {get_param: stack_name}, "workflowtasks", "step{{step}}"]]}
+ type: direct
+ tasks:
+ yaql:
+ expression: $.data.where($ != '').select($.get('step{{step}}')).where($ != null).flatten()
+ data:
+ {% for role in roles %}
+ - get_param: [role_data, {{role.name}}, service_workflow_tasks]
+ {% endfor %}
+
+ WorkflowTasks_Step{{step}}_Execution:
+ type: OS::Mistral::ExternalResource
+ condition: WorkflowTasks_Step{{step}}_Enabled
+ depends_on: WorkflowTasks_Step{{step}}
+ properties:
+ actions:
+ CREATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ UPDATE:
+ workflow: { get_resource: WorkflowTasks_Step{{step}} }
+ params:
+ env:
+ service_ips: { get_param: ctlplane_service_ips }
+ always_update: true
+{% endfor %}
+# END service_workflow_tasks handling
constraints:
- custom_constraint: nova.keypair
{% endif %}
+ NeutronPhysicalBridge:
+ default: 'br-ex'
+ description: An OVS bridge to create for accessing tenant networks.
+ type: string
NeutronPublicInterface:
default: nic1
- description: What interface to bridge onto br-ex for network nodes.
+ description: Which interface to add to the NeutronPhysicalBridge.
type: string
ServiceNetMap:
default: {}
RoleParameters:
type: json
description: Role Specific Parameters
+ DeploymentSwiftDataMap:
+ type: json
+ description: |
+ Map of servers to Swift container and object for storing deployment data.
+ The keys are the Heat assigned hostnames, and the value is a map of the
+ container/object name in Swift. Example value:
+ overcloud-controller-0:
+ container: overcloud-controller
+ object: 0
+ overcloud-controller-1:
+ container: overcloud-controller
+ object: 1
+ overcloud-controller-2:
+ container: overcloud-controller
+ object: 2
+ overcloud-novacompute-0:
+ container: overcloud-compute
+ object: 0
default: {}
conditions:
equals:
- {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
- 1
+ deployment_swift_data_map_unset:
+ equals:
+ - get_param:
+ - DeploymentSwiftDataMap
+ - {get_param: Hostname}
+ - ""
resources:
{{role}}:
- type: OS::TripleO::{{role.name}}Server
+ type: OS::TripleO::{{role}}Server
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
- {get_param: {{role}}ServerMetadata}
- {get_param: ServiceMetadataSettings}
scheduler_hints: {get_param: {{role}}SchedulerHints}
+ deployment_swift_data:
+ if:
+ - deployment_swift_data_map_unset
+ - {}
+ - {get_param: [DeploymentSwiftDataMap,
+ {get_param: Hostname}]}
# Combine the NodeAdminUserData and NodeUserData mime archives
UserData:
properties:
server: {get_resource: {{role}}}
RoleParameters: {get_param: RoleParameters}
- deployment_actions: {get_attr: [DeploymentActions, value]}
+ ServiceNames: {get_param: ServiceNames}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
server: {get_resource: {{role}}}
actions: {get_param: NetworkDeploymentActions}
input_values:
- bridge_name: br-ex
+ bridge_name: {get_param: NeutronPhysicalBridge}
interface_name: {get_param: NeutronPublicInterface}
actions:
if:
{{role}}ExtraConfigPre:
depends_on: {{role}}Deployment
type: OS::TripleO::{{role}}ExtraConfigPre
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: {{role}}}
NodeExtraConfig:
depends_on: [{{role}}ExtraConfigPre, NodeTLSCAData]
type: OS::TripleO::NodeExtraConfig
- # We have to use conditions here so that we don't break backwards
- # compatibility with templates everywhere
- condition: server_not_blacklisted
properties:
server: {get_resource: {{role}}}
- ['CREATE', 'UPDATE']
- []
- DeploymentActions:
- type: OS::Heat::Value
- properties:
- value:
- if:
- - server_not_blacklisted
- - ['CREATE', 'UPDATE']
- - []
-
SshHostPubKey:
type: OS::TripleO::Ssh::HostPubKey
depends_on: {{role}}Deployment
properties:
server: {get_resource: {{role}}}
- deployment_actions: {get_attr: [DeploymentActions, value]}
outputs:
ip_address:
management_ip_address:
description: IP address of the server in the management network
value: {get_attr: [ManagementPort, ip_address]}
+ os_collect_config:
+ description: The os-collect-config configuration associated with this server resource
+ value: {get_attr: [{{role}}, os_collect_config]}
5) Service activation (Pacemaker)
+It is also possible to use Mistral actions or workflows together with
+a deployment step, these are executed before the main configuration run.
+To describe actions or workflows from within a service use:
+
+ * service_workflow_tasks: One or more workflow task properties
+
+which expects a map where the key is the step and the value a list of
+dictionaries descrbing each a workflow task, for example::
+
+ service_workflow_tasks:
+ step2:
+ - name: echo
+ action: std.echo output=Hello
+ step3:
+ - name: external
+ workflow: my-pre-existing-workflow-name
+ input:
+ workflow_param1: value
+ workflow_param2: value
+
+The Heat guide for the `OS::Mistral::Workflow task property
+<https://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Mistral::Workflow-prop-tasks>`_
+has more details about the expected dictionary.
+
Batch Upgrade Steps
-------------------
template: "%{hiera('cloud_name_NETWORK')}"
params:
NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ dnsnames:
+ - str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ - str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
principal:
str_replace:
template: "mysql/%{hiera('cloud_name_NETWORK')}"
- service: mysql
network: {get_param: [ServiceNetMap, MysqlNetwork]}
type: vip
+ - service: mysql
+ network: {get_param: [ServiceNetMap, MysqlNetwork]}
+ type: node
- null
upgrade_tasks:
- name: Check for galera root password
- 26379
step_config: |
include ::tripleo::profile::base::database::redis
+ upgrade_tasks:
+ - name: Check if redis is deployed
+ command: systemctl is-enabled redis
+ tags: common
+ ignore_errors: True
+ register: redis_enabled
+ - name: "PreUpgrade step0,validation: Check if redis is running"
+ shell: >
+ /usr/bin/systemctl show 'redis' --property ActiveState |
+ grep '\bactive\b'
+ when: redis_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop redis service
+ tags: step1
+ when: redis_enabled.rc == 0
+ service: name=redis state=stopped
+ - name: Install redis package if it was disabled
+ tags: step3
+ yum: name=redis state=latest
+ when: redis_enabled.rc != 0
default: /dev/log
description: Syslog address where HAproxy will send its log
type: string
+ HAProxyStatsEnabled:
+ default: true
+ description: Whether or not to enable the HAProxy stats interface.
+ type: boolean
RedisPassword:
description: The password for Redis
type: string
tripleo::haproxy::redis_password: {get_param: RedisPassword}
tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
tripleo::haproxy::crl_file: {get_param: InternalTLSCRLPEMFile}
+ tripleo::haproxy::haproxy_stats: {get_param: HAProxyStatsEnabled}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
horizon::cache_backend: django.core.cache.backends.memcached.MemcachedCache
horizon::django_session_engine: 'django.contrib.sessions.backends.cache'
horizon::vhost_extra_params:
- add_listen: false
priority: 10
access_log_format: '%a %l %u %t \"%r\" %>s %b \"%%{}{Referer}i\" \"%%{}{User-Agent}i\"'
options: ['FollowSymLinks','MultiViews']
e.g. { ironic-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
default: {}
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
+
IronicBase:
type: ./ironic-base.yaml
properties:
config_settings:
map_merge:
- get_attr: [IronicBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- ironic::api::authtoken::password: {get_param: IronicPassword}
ironic::api::authtoken::project_name: 'service'
ironic::api::authtoken::user_domain_name: 'Default'
ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
# This is used to build links in responses
ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+ ironic::api::service_name: 'httpd'
ironic::policy::policies: {get_param: IronicApiPolicies}
+ ironic::wsgi::apache::bind_host: {get_param: [ServiceNetMap, IronicApiNetwork]}
+ ironic::wsgi::apache::port: {get_param: [EndpointMap, IronicInternal, port]}
+ ironic::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, IronicApiNetwork]}
+ ironic::wsgi::apache::ssl: {get_param: EnableInternalTLS}
tripleo.ironic_api.firewall_rules:
'133 ironic api':
dport:
- '%'
- "%{hiera('mysql_bind_host')}"
upgrade_tasks:
- - name: Stop ironic_api service
+ - name: Stop ironic_api service (before httpd support)
+ tags: step1
+ service: name=openstack-ironic-api state=stopped enabled=no
+ - name: Stop ironic_api service (running under httpd)
tags: step1
- service: name=openstack-ironic-api state=stopped
+ service: name=httpd state=stopped
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
outputs:
role_data:
expression: $.data.ovs_upgrade + $.data.neutron_ovs_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
neutron_ovs_upgrade:
- name: Check if neutron_ovs_agent is deployed
command: systemctl is-enabled neutron-openvswitch-agent
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- HostCpusList:
- default: "0"
- description: List of cores to be used for host process
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]+"
- NeutronDpdkCoreList:
- default: ""
- description: List of cores to be used for DPDK Poll Mode Driver
- type: string
- constraints:
- - allowed_pattern: "[0-9,-]*"
- NeutronDpdkMemoryChannels:
- default: ""
- description: Number of memory channels to be used for DPDK
- type: string
- constraints:
- - allowed_pattern: "[0-9]*"
- NeutronDpdkSocketMemory:
- default: ""
- description: Memory allocated for each socket
- type: string
- NeutronDpdkDriverType:
- default: "vfio-pci"
- description: DPDK Driver type
- type: string
# below parameters has to be set in neutron agent only for compute nodes.
# as of now there is no other usecase for these parameters except dpdk.
# should be moved to compute only ovs agent in case of any other usecases.
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
-
# Merging role-specific parameters (RoleParameters) with the default parameters.
# RoleParameters will have the precedence over the default parameters.
RoleParametersValue:
- map_replace:
- neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
- vswitch::dpdk::driver_type: NeutronDpdkDriverType
- vswitch::dpdk::host_core_list: HostCpusList
- vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
- vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
- vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
- values: {get_param: [RoleParameters]}
- values:
NeutronDatapathType: {get_param: NeutronDatapathType}
NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
- NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
- HostCpusList: {get_param: HostCpusList}
- NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
- NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
- NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
+
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- keys:
tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
+ - get_attr: [Ovs, role_data, config_settings]
- get_attr: [RoleParametersValue, value]
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
upgrade_tasks:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
resources:
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # ApacheServiceBase:
- # type: ./apache.yaml
- # properties:
- # ServiceNetMap: {get_param: ServiceNetMap}
- # DefaultPasswords: {get_param: DefaultPasswords}
- # EndpointMap: {get_param: EndpointMap}
- # RoleName: {get_param: RoleName}
- # RoleParameters: {get_param: RoleParameters}
- # EnableInternalTLS: {get_param: EnableInternalTLS}
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
type: ./nova-base.yaml
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- nova::cron::archive_deleted_rows::hour: '*/12'
nova::cron::archive_deleted_rows::destination: '/dev/null'
tripleo.nova_api.firewall_rules:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- nova_wsgi_enabled: false
- # nova::api::service_name: 'httpd'
- # nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+ nova_wsgi_enabled: true
+ nova::api::service_name: 'httpd'
+ nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- # nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
- # nova::wsgi::apache_api::servername:
- # str_replace:
- # template:
- # "%{hiera('fqdn_$NETWORK')}"
- # params:
- # $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache_api::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
nova::api::instance_name_template: {get_param: InstanceNameTemplate}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
- nova_workers_zero
- {}
- nova::api::osapi_compute_workers: {get_param: NovaWorkers}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
+ nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
step_config: |
include tripleo::profile::base::nova::api
service_config_settings:
nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
nova::keystone::auth::password: {get_param: NovaPassword}
nova::keystone::auth::region: {get_param: KeystoneRegion}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # metadata_settings:
- # get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: get bootstrap nodeid
- tags: common
- command: hiera bootstrap_nodeid
- register: bootstrap_node
- - name: set is_bootstrap_node fact
- tags: common
- set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- - name: Extra migration for nova tripleo/+bug/1656791
- tags: step0,pre-upgrade
- when: is_bootstrap_node
- command: nova-manage db online_data_migrations
- - name: Stop and disable nova_api service (pre-upgrade not under httpd)
- tags: step2
- service: name=openstack-nova-api state=stopped enabled=no
- - name: Create puppet manifest to set transport_url in nova.conf
- tags: step5
- when: is_bootstrap_node
- copy:
- dest: /root/nova-api_upgrade_manifest.pp
- mode: 0600
- content: >
- $transport_url = os_transport_url({
- 'transport' => hiera('messaging_service_name', 'rabbit'),
- 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
- 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
- 'username' => hiera('nova::rabbit_userid', 'guest'),
- 'password' => hiera('nova::rabbit_password'),
- 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
- })
- oslo::messaging::default { 'nova_config':
- transport_url => $transport_url
- }
- - name: Run puppet apply to set tranport_url in nova.conf
- tags: step5
- when: is_bootstrap_node
- command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
- register: puppet_apply_nova_api_upgrade
- failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
- changed_when: puppet_apply_nova_api_upgrade.rc == 2
- - name: Setup cell_v2 (map cell0)
- tags: step5
- when: is_bootstrap_node
- shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
- - name: Setup cell_v2 (create default cell)
- tags: step5
- when: is_bootstrap_node
- # (owalsh) puppet-nova expects the cell name 'default'
- # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
- shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
- register: nova_api_create_cell
- failed_when: nova_api_create_cell.rc not in [0,2]
- changed_when: nova_api_create_cell.rc == 0
- - name: Setup cell_v2 (sync nova/cell DB)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage db sync
- async: {get_param: NovaDbSyncTimeout}
- poll: 10
- - name: Setup cell_v2 (get cell uuid)
- tags: step5
- when: is_bootstrap_node
- shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
- register: nova_api_cell_uuid
- - name: Setup cell_v2 (migrate hosts)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
- - name: Setup cell_v2 (migrate instances)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
- - name: Sync nova_api DB
- tags: step5
- command: nova-manage api_db sync
- when: is_bootstrap_node
- - name: Online data migration for nova
- tags: step5
- when: is_bootstrap_node
- command: nova-manage db online_data_migrations
+ yaql:
+ expression: $.data.apache_upgrade + $.data.nova_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ nova_api_upgrade:
+ - name: get bootstrap nodeid
+ tags: common
+ command: hiera bootstrap_nodeid
+ register: bootstrap_node
+ - name: set is_bootstrap_node fact
+ tags: common
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+ - name: Extra migration for nova tripleo/+bug/1656791
+ tags: step0,pre-upgrade
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
+ - name: Stop and disable nova_api service (pre-upgrade not under httpd)
+ tags: step2
+ service: name=openstack-nova-api state=stopped enabled=no
+ - name: Create puppet manifest to set transport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ copy:
+ dest: /root/nova-api_upgrade_manifest.pp
+ mode: 0600
+ content: >
+ $transport_url = os_transport_url({
+ 'transport' => hiera('messaging_service_name', 'rabbit'),
+ 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
+ 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
+ 'username' => hiera('nova::rabbit_userid', 'guest'),
+ 'password' => hiera('nova::rabbit_password'),
+ 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
+ })
+ oslo::messaging::default { 'nova_config':
+ transport_url => $transport_url
+ }
+ - name: Run puppet apply to set tranport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ register: puppet_apply_nova_api_upgrade
+ failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
+ changed_when: puppet_apply_nova_api_upgrade.rc == 2
+ - name: Setup cell_v2 (map cell0)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
+ - name: Setup cell_v2 (create default cell)
+ tags: step5
+ when: is_bootstrap_node
+ # (owalsh) puppet-nova expects the cell name 'default'
+ # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
+ shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
+ register: nova_api_create_cell
+ failed_when: nova_api_create_cell.rc not in [0,2]
+ changed_when: nova_api_create_cell.rc == 0
+ - name: Setup cell_v2 (sync nova/cell DB)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db sync
+ async: {get_param: NovaDbSyncTimeout}
+ poll: 10
+ - name: Setup cell_v2 (get cell uuid)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
+ register: nova_api_cell_uuid
+ - name: Setup cell_v2 (migrate hosts)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
+ - name: Setup cell_v2 (migrate instances)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
+ - name: Sync nova_api DB
+ tags: step5
+ command: nova-manage api_db sync
+ when: is_bootstrap_node
+ - name: Online data migration for nova
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
default:
tag: openstack.nova.scheduler
path: /var/log/nova/nova-scheduler.log
+ NovaSchedulerDiscoverHostsInCellsInterval:
+ type: number
+ default: -1
+ description: >
+ This value controls how often (in seconds) the scheduler should
+ attempt to discover new hosts that have been added to cells.
+ The default value of -1 disables the periodic task completely.
+ It is recommended to set this parameter for deployments using Ironic.
resources:
NovaBase:
- nova::ram_allocation_ratio: '1.0'
nova::scheduler::filter::scheduler_available_filters: {get_param: NovaSchedulerAvailableFilters}
nova::scheduler::filter::scheduler_default_filters: {get_param: NovaSchedulerDefaultFilters}
+ nova::scheduler::discover_hosts_in_cells_interval: {get_param: NovaSchedulerDiscoverHostsInCellsInterval}
step_config: |
include tripleo::profile::base::nova::scheduler
upgrade_tasks:
type: json
resources:
- OpenVswitchUpgrade:
- type: ./openvswitch-upgrade.yaml
+ Ovs:
+ type: ./openvswitch.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
value:
service_name: opendaylight_ovs
config_settings:
- opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
- opendaylight::username: {get_param: OpenDaylightUsername}
- opendaylight::password: {get_param: OpenDaylightPassword}
- opendaylight_check_url: {get_param: OpenDaylightCheckURL}
- opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
- neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
- neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
- tripleo.opendaylight_ovs.firewall_rules:
- '118 neutron vxlan networks':
- proto: 'udp'
- dport: 4789
- '136 neutron gre networks':
- proto: 'gre'
+ map_merge:
+ - opendaylight::odl_rest_port: {get_param: OpenDaylightPort}
+ opendaylight::username: {get_param: OpenDaylightUsername}
+ opendaylight::password: {get_param: OpenDaylightPassword}
+ opendaylight_check_url: {get_param: OpenDaylightCheckURL}
+ opendaylight::nb_connection_protocol: {get_param: OpenDaylightConnectionProtocol}
+ neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+ neutron::plugins::ovs::opendaylight::provider_mappings: {get_param: OpenDaylightProviderMappings}
+ tripleo.opendaylight_ovs.firewall_rules:
+ '118 neutron vxlan networks':
+ proto: 'udp'
+ dport: 4789
+ '136 neutron gre networks':
+ proto: 'gre'
+ - get_attr: [Ovs, role_data, config_settings]
step_config: |
include tripleo::profile::base::neutron::plugins::ovs::opendaylight
upgrade_tasks:
expression: $.data.ovs_upgrade + $.data.opendaylight_upgrade
data:
ovs_upgrade:
- get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
+ get_attr: [Ovs, role_data, upgrade_tasks]
opendaylight_upgrade:
- name: Check if openvswitch is deployed
command: systemctl is-enabled openvswitch
+++ /dev/null
-heat_template_version: pike
-
-description: >
- Openvswitch package special handling for upgrade.
-
-outputs:
- role_data:
- description: Upgrade task for special handling of Openvswitch (OVS) upgrade.
- value:
- service_name: openvswitch_upgrade
- upgrade_tasks:
- - name: Check openvswitch version.
- tags: step2
- register: ovs_version
- ignore_errors: true
- shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
- - name: Check openvswitch packaging.
- tags: step2
- shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
- register: ovs_packaging_issue
- ignore_errors: true
- - block:
- - name: "Ensure empty directory: emptying."
- file:
- state: absent
- path: /root/OVS_UPGRADE
- - name: "Ensure empty directory: creating."
- file:
- state: directory
- path: /root/OVS_UPGRADE
- owner: root
- group: root
- mode: 0750
- - name: Download OVS packages.
- command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
- - name: Get rpm list for manual upgrade of OVS.
- shell: ls -1 /root/OVS_UPGRADE/*.rpm
- register: ovs_list_of_rpms
- - name: Manual upgrade of OVS
- shell: |
- rpm -U --test {{item}} 2>&1 | grep "already installed" || \
- rpm -U --replacepkgs --notriggerun --nopostun {{item}};
- args:
- chdir: /root/OVS_UPGRADE
- with_items:
- - "{{ovs_list_of_rpms.stdout_lines}}"
- tags: step2
- when: "'2.5.0-14' in '{{ovs_version.stdout}}'
- or
- ovs_packaging_issue|succeeded"
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Open vSwitch Configuration
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ OvsDpdkCoreList:
+ description: >
+ List of cores to be used for DPDK lcore threads. Note, these threads
+ are used by the OVS control path for validator and handling functions.
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ""
+ OvsDpdkMemoryChannels:
+ description: Number of memory channels per socket to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ""
+ OvsDpdkSocketMemory:
+ default: ""
+ description: >
+ Sets the amount of hugepage memory to assign per NUMA node. It is
+ recommended to use the socket closest to the PCIe slot used for the
+ desired DPDK NIC. The format should be in "<socket 0 mem>, <socket 1
+ mem>, <socket n mem>", where the value is specified in MB. For example:
+ "1024,0".
+ type: string
+ OvsDpdkDriverType:
+ default: "vfio-pci"
+ description: >
+ DPDK Driver type. Ensure the Overcloud NIC to be used for DPDK supports
+ this UIO/PMD driver.
+ type: string
+ OvsPmdCoreList:
+ description: >
+ A list or range of CPU cores for PMD threads to be pinned to. Note, NIC
+ location to cores on socket, number of hyper-threaded logical cores, and
+ desired number of PMD threads can all play a role in configuring this
+ setting. These cores should be on the same socket where
+ OvsDpdkSocketMemory is assigned. If using hyperthreading then specify
+ both logical cores that would equal the physical core. Also, specifying
+ more than one core will trigger multiple PMD threads to be spawned which
+ may improve dataplane performance.
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ type: string
+ default: ""
+ # DEPRECATED: the following options are deprecated and are currently maintained
+ # for backwards compatibility. They will be removed in the Queens cycle.
+ HostCpusList:
+ description: List of cores to be used for host process
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkCoreList:
+ description: List of cores to be used for DPDK Poll Mode Driver
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9,-]*"
+ default: ''
+ NeutronDpdkMemoryChannels:
+ description: Number of memory channels to be used for DPDK
+ type: string
+ constraints:
+ - allowed_pattern: "[0-9]*"
+ default: ''
+ NeutronDpdkSocketMemory:
+ default: ''
+ description: Memory allocated for each socket
+ type: string
+ NeutronDpdkDriverType:
+ default: "vfio-pci"
+ description: DPDK Driver type
+ type: string
+
+parameter_groups:
+- label: deprecated
+ description: Do not use deprecated params, they will be removed.
+ parameters:
+ - HostCpusList
+ - NeutronDpdkCoreList
+ - NeutronDpdkMemoryChannels
+ - NeutronDpdkSocketMemory
+ - NeutronDpdkDriverType
+
+conditions:
+ l_cores_empty: {equals: [{get_param: OvsDpdkCoreList}, '']}
+ pmd_cores_empty: {equals: [{get_param: OvsPmdCoreList}, '']}
+ mem_channels_empty: {equals: [{get_param: OvsDpdkMemoryChannels}, '']}
+ socket_mem_empty: {equals: [{get_param: OvsDpdkSocketMemory}, '']}
+ driver_not_set: {equals: [{get_param: OvsDpdkDriverType}, 'vfio-pci']}
+
+outputs:
+ role_data:
+ description: Role data for the Open vSwitch service.
+ value:
+ service_name: openvswitch
+ config_settings:
+ map_replace:
+ - map_replace:
+ - vswitch::dpdk::driver_type: OvsDpdkDriverType
+ vswitch::dpdk::host_core_list: OvsDpdkCoreList
+ vswitch::dpdk::pmd_core_list: OvsPmdCoreList
+ vswitch::dpdk::memory_channels: OvsDpdkMemoryChannels
+ vswitch::dpdk::socket_mem: OvsDpdkSocketMemory
+ - values: {get_param: [RoleParameters]}
+ - values:
+ OvsDpdkCoreList: {if: [l_cores_empty, {get_param: HostCpusList}, {get_param: OvsDpdkCoreList}]}
+ OvsDpdkMemoryChannels: {if: [mem_channels_empty, {get_param: NeutronDpdkMemoryChannels}, {get_param: OvsDpdkMemoryChannels}]}
+ OvsDpdkSocketMemory: {if: [socket_mem_empty, {get_param: NeutronDpdkSocketMemory}, {get_param: OvsDpdkSocketMemory}]}
+ OvsDpdkDriverType: {if: [driver_not_set, {get_param: NeutronDpdkDriverType}, {get_param: OvsDpdkDriverType}]}
+ OvsPmdCoreList: {if: [pmd_cores_empty, {get_param: NeutronDpdkCoreList}, {get_param: OvsPmdCoreList}]}
+
+ upgrade_tasks:
+ - name: Check openvswitch version.
+ tags: step2
+ register: ovs_version
+ ignore_errors: true
+ shell: rpm -qa | awk -F- '/^openvswitch-2/{print $2 "-" $3}'
+ - name: Check openvswitch packaging.
+ tags: step2
+ shell: rpm -q --scripts openvswitch | awk '/postuninstall/,/*/' | grep -q "systemctl.*try-restart"
+ register: ovs_packaging_issue
+ ignore_errors: true
+ - block:
+ - name: "Ensure empty directory: emptying."
+ file:
+ state: absent
+ path: /root/OVS_UPGRADE
+ - name: "Ensure empty directory: creating."
+ file:
+ state: directory
+ path: /root/OVS_UPGRADE
+ owner: root
+ group: root
+ mode: 0750
+ - name: Download OVS packages.
+ command: yumdownloader --destdir /root/OVS_UPGRADE --resolve openvswitch
+ - name: Get rpm list for manual upgrade of OVS.
+ shell: ls -1 /root/OVS_UPGRADE/*.rpm
+ register: ovs_list_of_rpms
+ - name: Manual upgrade of OVS
+ shell: |
+ rpm -U --test {{item}} 2>&1 | grep "already installed" || \
+ rpm -U --replacepkgs --notriggerun --nopostun {{item}};
+ args:
+ chdir: /root/OVS_UPGRADE
+ with_items:
+ - "{{ovs_list_of_rpms.stdout_lines}}"
+ tags: step2
+ when: "'2.5.0-14' in '{{ovs_version.stdout}}'
+ or
+ ovs_packaging_issue|succeeded"
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
resources:
# internal_api_subnet - > IP/CIDR
tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr:
get_param: [ServiceNetMap, MysqlNetwork]
+ tripleo::profile::pacemaker::database::mysql::ca_file:
+ get_param: InternalTLSCAFile
step_config: |
include ::tripleo::profile::pacemaker::database::mysql
metadata_settings:
type: string
SwiftCeilometerPipelineEnabled:
description: Set to False to disable the swift proxy ceilometer pipeline.
- default: True
+ default: false
type: boolean
SwiftCeilometerIgnoreProjects:
- default: ['services']
+ default: ['service']
description: Comma-seperated list of project names to ignore.
type: comma_delimited_list
RabbitClientPort:
conditions:
- ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+ ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, true]}
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
resources:
swift::proxy::authtoken::project_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
- swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
- swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
- swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- swift::proxy::ceilometer::password: {get_param: SwiftPassword}
- swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
- swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
- swift::proxy::ceilometer::nonblocking_notify: true
+ -
+ if:
+ - ceilometer_pipeline_enabled
+ -
+ swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
+ swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+ swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ swift::proxy::ceilometer::password: {get_param: SwiftPassword}
+ swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
+ swift::proxy::ceilometer::nonblocking_notify: true
+ swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ - {}
+ - swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
- ''
- 'proxy-logging'
- 'proxy-server'
- swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
swift::proxy::account_autocreate: true
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
--- /dev/null
+---
+features:
+ - The HAProxy stats interface can now be enabled/disabled with the
+ HAProxyStatsEnabled flag. Note that it's still enabled by default.
--- /dev/null
+---
+features:
+ - Added new DeploymentSwiftDataMap parameter, which is used to set the
+ deployment_swift_data property on the Server resoures. The parameter is a
+ map where the keys are the Heat assigned hostnames, and the value is a map
+ of the container/object name in Swift.
--- /dev/null
+---
+features:
+ - Adds a new output, ServerOsCollectConfigData, which is the
+ os-collect-config configuration associated with each server resource.
+ This can be used to [pre]configure the os-collect-config agents on
+ deployed-server's.
--- /dev/null
+---
+fixes:
+ - |
+ When ``environments/services/ironic.yaml`` is used, enable periodic task
+ in nova-scheduler to automatically discover new nodes. Otherwise a user
+ has to run nova management command on controllers each time.
--- /dev/null
+---
+fixes:
+ - Disable ceilometer in the swift proxy middleware pipeline out of the box.
+ This generates a lot of events with gnocchi and swift backend and causes
+ heavy load. It should be easy to enable if needed.
--- /dev/null
+---
+features:
+ - DPDK is enabled in OvS before the NetworkDeployment to ensure DPDK
+ is ready to handle new port additions.
+upgrade:
+ - A new parameter ServiceNames is added to the PreNeworkConfig resource.
+ All templates associated with PreNeworkConfig should add this new
+ parameter during the upgrade.
--- /dev/null
+---
+fixes:
+ - Incorrect network used for Glance API service.
--- /dev/null
+---
+fixes:
+ - |
+ Fix support for RPMs to be installed via DeployArtifactURLs. LP#1697102
--- /dev/null
+---
+features:
+ - Adds common openvswitch service template to be
+ inherited by other services.
+ - Adds environment file to be used for deploying
+ OpenDaylight + OVS DPDK.
+ - Adds first boot and ovs configuration scripts
+deprecations:
+ - The ``HostCpusList`` parameter is deprecated in
+ favor of ``OvsDpdkCoreList`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkCoreList`` parameter is deprecated in
+ favor of ``OvsPmdCoreList`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkMemoryChannels`` parameter is deprecated in
+ favor of ``OvsDpdkMemoryChannels`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkSocketMemory`` parameter is deprecated in
+ favor of ``OvsDpdkSocketMemory`` and will be removed
+ in a future release.
+ - The ``NeutronDpdkDriverType`` parameter is deprecated in
+ favor of ``OvsDpdkDriverType`` and will be removed
+ in a future release.
--- /dev/null
+---
+features:
+ - |
+ It is now possible to trigger Mistral workflows or workflow actions
+ before a deployment step is applied. This can be defined within the
+ scope of a service template and is described as a task property
+ for the Heat OS::Mistral::Workflow resource, for more details also
+ see the puppet/services/README.rst file.
\ No newline at end of file
--- /dev/null
+---
+features:
+ - Add 2 new example environments to facilitate deploying split-stack,
+ environments/overcloud-baremetal.j2.yaml and
+ environments/overcloud-services.yaml. The environments are used to deploy two
+ separate Heat stacks, one for just the baremetal+network configuration and one
+ for the service configuration.
--- /dev/null
+---
+features:
+ - Add VipMap output to the top level stack output. VipMap is a mapping from
+ each network to the VIP address on that network. Also includes the Redis
+ VIP.
yaql:
expression: $.data.role_data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
data: {role_data: {get_attr: [ServiceChain, role_data]}}
+ service_workflow_tasks:
+ yaql:
+ expression: $.data.role_data.where($ != null).select($.get('service_workflow_tasks')).where($ != null).reduce($1.mergeWith($2), {})
+ data: {role_data: {get_attr: [ServiceChain, role_data]}}
step_config: {get_attr: [ServiceChain, role_data, step_config]}
upgrade_tasks:
yaql:
REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
'config_image']
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags' ]
+# Mapping of parameter names to a list of the fields we should _not_ enforce
+# consistency across files on. This should only contain parameters whose
+# definition we cannot change for backwards compatibility reasons. New
+# parameters to the templates should not be added to this list.
+PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
+ 'ManagementAllocationPools': ['default'],
+ 'ExternalNetCidr': ['default'],
+ 'ExternalAllocationPools': ['default'],
+ 'StorageNetCidr': ['default'],
+ 'StorageAllocationPools': ['default'],
+ 'StorageMgmtNetCidr': ['default'],
+ 'StorageMgmtAllocationPools': ['default'],
+ }
+
+PREFERRED_CAMEL_CASE = {
+ 'ec2api': 'Ec2Api',
+ 'haproxy': 'HAProxy',
+}
def exit_usage():
sys.exit(1)
+def to_camel_case(string):
+ return PREFERRED_CAMEL_CASE.get(string, ''.join(s.capitalize() or '_' for
+ s in string.split('_')))
+
+
def get_base_endpoint_map(filename):
try:
tpl = yaml.load(open(filename).read())
% (key, filename))
return 1
+ config_volume = puppet_config.get('config_volume')
+ expected_config_image_parameter = "Docker%sConfigImage" % to_camel_case(config_volume)
+ if config_volume and not expected_config_image_parameter in tpl.get('parameters', []):
+ print('ERROR: Missing %s heat parameter for %s config_volume.'
+ % (expected_config_image_parameter, config_volume))
+ return 1
+
if 'parameters' in tpl:
for param in required_params:
if param not in tpl['parameters']:
return 0
-def validate(filename):
+def validate(filename, param_map):
+ """Validate a Heat template
+
+ :param filename: The path to the file to validate
+ :param param_map: A dict which will be populated with the details of the
+ parameters in the template. The dict will have the
+ following structure:
+
+ {'ParameterName': [
+ {'filename': ./file1.yaml,
+ 'data': {'description': '',
+ 'type': string,
+ 'default': '',
+ ...}
+ },
+ {'filename': ./file2.yaml,
+ 'data': {'description': '',
+ 'type': string,
+ 'default': '',
+ ...}
+ },
+ ...
+ ]}
+ """
print('Validating %s' % filename)
retval = 0
try:
return 1
# yaml is OK, now walk the parameters and output a warning for unused ones
if 'heat_template_version' in tpl:
- for p in tpl.get('parameters', {}):
+ for p, data in tpl.get('parameters', {}).items():
+ definition = {'data': data, 'filename': filename}
+ param_map.setdefault(p, []).append(definition)
if p in required_params:
continue
str_p = '\'%s\'' % p
failed_files = []
base_endpoint_map = None
env_endpoint_maps = list()
+param_map = {}
for base_path in path_args:
if os.path.isdir(base_path):
for f in files:
if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
file_path = os.path.join(subdir, f)
- failed = validate(file_path)
+ failed = validate(file_path, param_map)
if failed:
failed_files.append(file_path)
exit_val |= failed
if env_endpoint_map:
env_endpoint_maps.append(env_endpoint_map)
elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
- failed = validate(base_path)
+ failed = validate(base_path, param_map)
if failed:
failed_files.append(base_path)
exit_val |= failed
failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
exit_val |= 1
+# Validate that duplicate parameters defined in multiple files all have the
+# same definition.
+mismatch_count = 0
+for p, defs in param_map.items():
+ # Nothing to validate if the parameter is only defined once
+ if len(defs) == 1:
+ continue
+ check_data = [d['data'] for d in defs]
+ # Override excluded fields so they don't affect the result
+ exclusions = PARAMETER_DEFINITION_EXCLUSIONS.get(p, [])
+ ex_dict = {}
+ for field in exclusions:
+ ex_dict[field] = 'IGNORED'
+ for d in check_data:
+ d.update(ex_dict)
+ # If all items in the list are not == the first, then the check fails
+ if check_data.count(check_data[0]) != len(check_data):
+ mismatch_count += 1
+ # TODO(bnemec): Make this a hard failure once all the templates have
+ # been fixed.
+ #exit_val |= 1
+ #failed_files.extend([d['filename'] for d in defs])
+ print('Mismatched parameter definitions found for "%s"' % p)
+ print('Definitions found:')
+ for d in defs:
+ print(' %s:\n %s' % (d['filename'], d['data']))
+print('Mismatched parameter definitions: %d' % mismatch_count)
+
if failed_files:
print('Validation failed on:')
for f in failed_files: