+----------------+-------------+-------------+-------------+-------------+-----------------+
| neutron-bgpvpn | | | | X | |
+----------------+-------------+-------------+-------------+-------------+-----------------+
+| neutron-l2gw | | | | X | |
++----------------+-------------+-------------+-------------+-------------+-----------------+
| rabbitmq | X | X | X | X | X |
+----------------+-------------+-------------+-------------+-------------+-----------------+
| mongodb | X | X | | | |
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive validations that occur on all nodes.
default: false
description: Optional validation to ensure FQDN as set by Nova matches the name set in /etc/hosts.
type: boolean
+ ValidateNtp:
+ default: true
+ description: Validation to ensure at least one time source is accessible.
+ type: boolean
resources:
AllNodesValidationsImpl:
default: {get_param: PingTestIps}
- name: validate_fqdn
default: {get_param: ValidateFqdn}
+ - name: validate_ntp
+ default: {get_param: ValidateNtp}
config: {get_file: ./validation-scripts/all-nodes.sh}
outputs:
-heat_template_version: ocata
+heat_template_version: pike
description: 'Bootstrap Config'
parameters:
# repository for deployment using puppet. It groups configuration by topic,
# describes possible combinations of environments and resource capabilities.
-# root_template: identifies repository's root template
-# root_environment: identifies root_environment, this one is special in terms of
-# order in which the environments are merged before deploying. This one serves as
-# a base and it's parameters/resource_registry gets overridden by other environments
-# if used.
-
# topics:
# High Level grouping by purpose of environments
# Attributes:
# only when that given environment is used. (resource_type of that environment can
# be implemented using multiple templates).
-root_template: overcloud.yaml
-root_environment: overcloud-resource-registry-puppet.yaml
topics:
- title: Base Resources Configuration
description:
description: Enable FOS in the overcloud
requires:
- overcloud-resource-registry-puppet.yaml
+ - file: environments/neutron-nsx.yaml
+ title: Deploy NSX Services
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- file: environments/neutron-l2gw.yaml
title: Neutron L2 gateway Service Plugin
- description: Enables Neutron L2 gateway Service Plugin
+ description: Enables Neutron L2 gateway Service Plugin and Agent
requires:
- overcloud-resource-registry-puppet.yaml
description: >
Enable various Cinder backends
environments:
+ - file: environments/cinder-pure-config.yaml
+ title: Cinder Pure Storage FlashArray backend
+ description:
+ requires:
+ - overcloud-resource-registry-puppet.yaml
- file: environments/cinder-netapp-config.yaml
title: Cinder NetApp backend
description:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::Sshd
- name: Controller
CountDefault: 1
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ # NOTE: This is needed because of upgrades from Ocata to Pike. We
+ # deploy the initial environment with Ocata templates, and
+ # overcloud-resource-registry.yaml there doesn't have this Docker
+ # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
+ # remove this.
+ OS::TripleO::Services::Docker: OS::Heat::None
+
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
--- /dev/null
+# NOTE: This is an environment specific for containers upgrade
+# CI. Mainly we deploy non-pacemakerized overcloud, as at the time
+# being containerization of services managed by pacemaker is not
+# complete, so we deploy and upgrade the non-HA services for now.
+
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+
+ # NOTE: This is needed because of upgrades from Ocata to Pike. We
+ # deploy the initial environment with Ocata templates, and
+ # overcloud-resource-registry.yaml there doesn't have this Docker
+ # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
+ # remove this.
+ OS::TripleO::Services::Docker: OS::Heat::None
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Docker
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::TripleoPackages
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Sshd
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ # Required for Centos 7.3 and Qemu 2.6.0
+ nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ #NOTE(gfidente): not great but we need this to deploy on ext4
+ #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+ ceph::profile::params::osd_max_object_name_len: 256
+ ceph::profile::params::osd_max_object_namespace_len: 64
+ SwiftCeilometerPipelineEnabled: False
+ Debug: True
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Core Service
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::Horizon
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml
+ OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml
OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml
OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml
OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
- OS::TripleO::Services::FluentdClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/logging/fluentd-client.yaml
- OS::TripleO::Services::SensuClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/monitoring/sensu-client.yaml
+ OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
+ OS::TripleO::Services::SensuClient: ../../puppet/services/monitoring/sensu-client.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier
- OS::TripleO::Services::AodhListener
- - OS::TripleO::Services::CeilometerApi
- - OS::TripleO::Services::CeilometerCollector
- - OS::TripleO::Services::CeilometerExpirer
- OS::TripleO::Services::CeilometerAgentCentral
- OS::TripleO::Services::CeilometerAgentIpmi
- OS::TripleO::Services::CeilometerAgentNotification
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ CephPoolDefaultSize: 1
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderBackupBackend: ceph
OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::MongoDb: ../../puppet/services/database/mongodb.yaml
OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
parameter_defaults:
- OS::TripleO::Services::MistralExecutor
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
OS::TripleO::Services::ManilaShare: ../../puppet/services/pacemaker/manila-share.yaml
OS::TripleO::Services::ManilaBackendCephFs: ../../puppet/services/manila-backend-cephfs.yaml
OS::TripleO::Services::NeutronBgpVpnApi: ../../puppet/services/neutron-bgpvpn-api.yaml
+ OS::TripleO::Services::NeutronL2gwApi: ../../puppet/services/neutron-l2gw-api.yaml
+ OS::TripleO::Services::NeutronL2gwAgent: ../../puppet/services/neutron-l2gw-agent.yaml
# These enable Pacemaker
- OS::TripleO::Tasks::ControllerPrePuppet: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
OS::TripleO::Services::RabbitMQ: ../../puppet/services/pacemaker/rabbitmq.yaml
OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
- OS::TripleO::Services::NeutronBgpVpnApi
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronL2gwApi
+ - OS::TripleO::Services::NeutronL2gwAgent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronServer
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
+ - OS::TripleO::Services::Sshd
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
+ CephPoolDefaultSize: 1
SwiftCeilometerPipelineEnabled: false
- NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+ NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin, networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin'
BgpvpnServiceProvider: 'BGPVPN:Dummy:networking_bgpvpn.neutron.services.service_drivers.driver_api.BGPVPNDriver:default'
+ L2gwServiceProvider: ['L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default']
-heat_template_version: ocata
+heat_template_version: pike
description: >
HOT template to created resources deployed by scenario001.
ram: 512
vcpus: 1
-# Disabling this resource now
-# https://bugs.launchpad.net/tripleo/+bug/1646506
-# gnocchi_res_alarm:
-# type: OS::Aodh::GnocchiResourcesAlarm
-# properties:
-# description: Do stuff with gnocchi
-# metric: cpu_util
-# aggregation_method: mean
-# granularity: 60
-# evaluation_periods: 1
-# threshold: 50
-# alarm_actions: []
-# resource_type: instance
-# resource_id: { get_resource: server1 }
-# comparison_operator: gt
+ gnocchi_res_alarm:
+ type: OS::Aodh::GnocchiResourcesAlarm
+ properties:
+ description: Do stuff with gnocchi
+ metric: cpu_util
+ aggregation_method: mean
+ granularity: 60
+ evaluation_periods: 1
+ threshold: 50
+ alarm_actions: []
+ resource_type: instance
+ resource_id: { get_resource: server1 }
+ comparison_operator: gt
asg:
type: OS::Heat::AutoScalingGroup
-heat_template_version: ocata
+heat_template_version: pike
description: >
HOT template to created resources deployed by scenario002.
-heat_template_version: ocata
+heat_template_version: pike
description: >
HOT template to created resources deployed by scenario003.
-heat_template_version: ocata
+heat_template_version: pike
description: >
HOT template to created resources deployed by scenario004.
properties:
name: default
driver_handles_share_servers: false
+ snapshot_support: false
manila_share:
type: OS::Manila::Share
properties:
+ name: pingtest
+ share_type: { get_resource: manila_share_type }
share_protocol: CEPHFS
size: 1
-heat_template_version: ocata
+heat_template_version: pike
description: >
This template resides in tripleo-ci for Mitaka CI jobs only.
-heat_template_version: ocata
+heat_template_version: pike
description: Passwords we manage at the top level
-heat_template_version: ocata
+heat_template_version: pike
parameters:
network:
-heat_template_version: ocata
+heat_template_version: pike
description: "
A fake OS::Neutron::Port stack which outputs fixed_ips and subnets based on
-heat_template_version: ocata
+heat_template_version: pike
description: 'Deployed Server Bootstrap Config'
-heat_template_version: ocata
+heat_template_version: pike
description: 'Deployed Server Bootstrap Config'
-heat_template_version: ocata
+heat_template_version: pike
parameters:
image:
type: string
SUBNODES_SSH_KEY=${SUBNODES_SSH_KEY:-"~/.ssh/id_rsa"}
SSH_OPTIONS="-tt -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=Verbose -o PasswordAuthentication=no -o ConnectionAttempts=32"
OVERCLOUD_ROLES=${OVERCLOUD_ROLES:-"Controller Compute BlockStorage ObjectStorage CephStorage"}
+STACK_NAME=${STACK_NAME:-"overcloud"}
# Set the _hosts vars for the default roles based on the old var names that
# were all caps for backwards compatibility.
for role in $OVERCLOUD_ROLES; do
- while ! check_stack overcloud; do
+ while ! check_stack $STACK_NAME; do
sleep $SLEEP_TIME
done
- rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
+ rg_stack=$(openstack stack resource show $STACK_NAME $role -c physical_resource_id -f value)
while ! check_stack $rg_stack; do
sleep $SLEEP_TIME
- rg_stack=$(openstack stack resource show overcloud $role -c physical_resource_id -f value)
+ rg_stack=$(openstack stack resource show $STACK_NAME $role -c physical_resource_id -f value)
done
stacks=$(openstack stack resource list $rg_stack -c resource_name -c physical_resource_id -f json | jq -r "sort_by(.resource_name) | .[] | .physical_resource_id")
+++ /dev/null
-#!/bin/bash
-# This is where we stack puppet configuration (for now)...
-mkdir -p /var/lib/config-data
-
-# This is the docker-puppet configs end in
-mkdir -p /var/lib/docker-puppet
# inside of a container.
import json
+import logging
import os
import subprocess
import sys
import tempfile
import multiprocessing
+log = logging.getLogger()
+log.setLevel(logging.DEBUG)
+ch = logging.StreamHandler(sys.stdout)
+ch.setLevel(logging.DEBUG)
+formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
+ch.setFormatter(formatter)
+log.addHandler(ch)
# this is to match what we do in deployed-server
def short_hostname():
def pull_image(name):
- print('Pulling image: %s' % name)
+ log.info('Pulling image: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
- print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr:
+ log.debug(cmd_stderr)
def rm_container(name):
if os.environ.get('SHOW_DIFF', None):
- print('Diffing container: %s' % name)
+ log.info('Diffing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
- print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr:
+ log.debug(cmd_stderr)
- print('Removing container: %s' % name)
+ log.info('Removing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
- print(cmd_stdout)
+ if cmd_stdout:
+ log.debug(cmd_stdout)
if cmd_stderr and \
- cmd_stderr != 'Error response from daemon: ' \
- 'No such container: {}\n'.format(name):
- print(cmd_stderr)
+ cmd_stderr != 'Error response from daemon: ' \
+ 'No such container: {}\n'.format(name):
+ log.debug(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
+log.info('Running docker-puppet')
config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
-print('docker-puppet')
-print('CONFIG: %s' % config_file)
+log.debug('CONFIG: %s' % config_file)
with open(config_file) as f:
json_data = json.load(f)
if not manifest or not config_image:
continue
- print('---------')
- print('config_volume %s' % config_volume)
- print('puppet_tags %s' % puppet_tags)
- print('manifest %s' % manifest)
- print('config_image %s' % config_image)
- print('volumes %s' % volumes)
+ log.debug('config_volume %s' % config_volume)
+ log.debug('puppet_tags %s' % puppet_tags)
+ log.debug('manifest %s' % manifest)
+ log.debug('config_image %s' % config_image)
+ log.debug('volumes %s' % volumes)
# We key off of config volume for all configs.
if config_volume in configs:
# Append puppet tags and manifest.
- print("Existing service, appending puppet tags and manifest\n")
+ log.info("Existing service, appending puppet tags and manifest")
if puppet_tags:
configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
puppet_tags)
configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
manifest)
if configs[config_volume][3] != config_image:
- print("WARNING: Config containers do not match even though"
- " shared volumes are the same!\n")
+ log.warn("Config containers do not match even though"
+ " shared volumes are the same!")
else:
- print("Adding new service\n")
+ log.info("Adding new service")
configs[config_volume] = service
-print('Service compilation completed.\n')
+log.info('Service compilation completed.')
def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
- print('---------')
- print('config_volume %s' % config_volume)
- print('puppet_tags %s' % puppet_tags)
- print('manifest %s' % manifest)
- print('config_image %s' % config_image)
- print('volumes %s' % volumes)
- hostname = short_hostname()
- sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume
+ log.debug('config_volume %s' % config_volume)
+ log.debug('puppet_tags %s' % puppet_tags)
+ log.debug('manifest %s' % manifest)
+ log.debug('config_image %s' % config_image)
+ log.debug('volumes %s' % volumes)
+ sh_script = '/var/lib/docker-puppet/docker-puppet.sh'
with open(sh_script, 'w') as script_file:
os.chmod(script_file.name, 0755)
mkdir -p /etc/puppet
cp -a /tmp/puppet-etc/* /etc/puppet
rm -Rf /etc/puppet/ssl # not in use and causes permission errors
- echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json
+ echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker.json
TAGS=""
- if [ -n "%(puppet_tags)s" ]; then
- TAGS='--tags "%(puppet_tags)s"'
+ if [ -n "$PUPPET_TAGS" ]; then
+ TAGS="--tags \"$PUPPET_TAGS\""
fi
- FACTER_hostname=%(hostname)s FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
+ FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
# Disables archiving
- if [ -z "%(no_archive)s" ]; then
- rm -Rf /var/lib/config-data/%(name)s
+ if [ -z "$NO_ARCHIVE" ]; then
+ rm -Rf /var/lib/config-data/${NAME}
# copying etc should be enough for most services
- mkdir -p /var/lib/config-data/%(name)s/etc
- cp -a /etc/* /var/lib/config-data/%(name)s/etc/
+ mkdir -p /var/lib/config-data/${NAME}/etc
+ cp -a /etc/* /var/lib/config-data/${NAME}/etc/
if [ -d /root/ ]; then
- cp -a /root/ /var/lib/config-data/%(name)s/root/
+ cp -a /root/ /var/lib/config-data/${NAME}/root/
fi
if [ -d /var/lib/ironic/tftpboot/ ]; then
- mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
- cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/tftpboot/
+ mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
+ cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/tftpboot/
fi
if [ -d /var/lib/ironic/httpboot/ ]; then
- mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/
- cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/httpboot/
+ mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
+ cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/httpboot/
fi
# apache services may files placed in /var/www/
if [ -d /var/www/ ]; then
- mkdir -p /var/lib/config-data/%(name)s/var/www
- cp -a /var/www/* /var/lib/config-data/%(name)s/var/www/
+ mkdir -p /var/lib/config-data/${NAME}/var/www
+ cp -a /var/www/* /var/lib/config-data/${NAME}/var/www/
fi
fi
- """ % {'puppet_tags': puppet_tags, 'name': config_volume,
- 'hostname': hostname,
- 'no_archive': os.environ.get('NO_ARCHIVE', ''),
- 'step': os.environ.get('STEP', '6')})
+ """)
with tempfile.NamedTemporaryFile() as tmp_man:
with open(tmp_man.name, 'w') as man_file:
dcmd = ['/usr/bin/docker', 'run',
'--user', 'root',
'--name', 'docker-puppet-%s' % config_volume,
+ '--env', 'PUPPET_TAGS=%s' % puppet_tags,
+ '--env', 'NAME=%s' % config_volume,
+ '--env', 'HOSTNAME=%s' % short_hostname(),
+ '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
+ '--env', 'STEP=%s' % os.environ.get('STEP', '6'),
'--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
'--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
env[k] = os.environ.get(k)
if os.environ.get('NET_HOST', 'false') == 'true':
- print('NET_HOST enabled')
+ log.debug('NET_HOST enabled')
dcmd.extend(['--net', 'host', '--volume',
'/etc/hosts:/etc/hosts:ro'])
dcmd.append(config_image)
+ log.debug('Running docker command: %s' % ' '.join(dcmd))
subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
cmd_stdout, cmd_stderr = subproc.communicate()
- print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr:
+ log.debug(cmd_stderr)
if subproc.returncode != 0:
- print('Failed running docker-puppet.py for %s' % config_volume)
- rm_container('docker-puppet-%s' % config_volume)
+ log.error('Failed running docker-puppet.py for %s' % config_volume)
+ else:
+ # only delete successful runs, for debugging
+ rm_container('docker-puppet-%s' % config_volume)
return subproc.returncode
# Holds all the information for each process to consume.
process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
for p in process_map:
- print '--\n%s' % p
+ log.debug('- %s' % p)
# Fire off processes to perform each configuration. Defaults
# to the number of CPUs on the system.
success = True
for returncode, config_volume in zip(returncodes, config_volumes):
if returncode != 0:
- print('ERROR configuring %s' % config_volume)
+ log.error('ERROR configuring %s' % config_volume)
success = False
if not success:
{%- endfor -%}
{%- set primary_role_name = primary_role[0].name -%}
# primary role is: {{primary_role_name}}
-heat_template_version: ocata
+{% set deploy_steps_max = 6 -%}
+
+heat_template_version: pike
description: >
Post-deploy configuration steps via puppet for all roles,
value:
yaql:
expression:
- dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
+ $.data.default_tasks + dict($.data.docker_puppet_tasks.where($1 != null).selectMany($.items()).groupBy($[0], $[1]))
data:
docker_puppet_tasks: {get_param: [role_data, {{primary_role_name}}, docker_puppet_tasks]}
+ default_tasks:
+{%- for step in range(1, deploy_steps_max) %}
+ step_{{step}}: {}
+{%- endfor %}
# BEGIN primary_role_name docker-puppet-tasks (run only on a single node)
-{% for step in range(1, 6) %}
-
- {{primary_role_name}}DockerPuppetJsonConfig{{step}}:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-puppet/docker-puppet-tasks{{step}}.json:
- {get_attr: [{{primary_role_name}}DockerPuppetTasks, value, 'step_{{step}}']}
-
- {{primary_role_name}}DockerPuppetJsonDeployment{{step}}:
- type: OS::Heat::SoftwareDeployment
- properties:
- server: {get_param: [servers, {{primary_role_name}}, '0']}
- config: {get_resource: {{primary_role_name}}DockerPuppetJsonConfig{{step}}}
+{% for step in range(1, deploy_steps_max) %}
{{primary_role_name}}DockerPuppetTasksConfig{{step}}:
type: OS::Heat::SoftwareConfig
- {{dep.name}}Deployment_Step{{step}}
- {{dep.name}}ContainersDeployment_Step{{step}}
{% endfor %}
- - {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
properties:
- name: {{primary_role_name}}DockerPuppetJsonDeployment{{step}}
+ name: {{primary_role_name}}DockerPuppetTasksDeployment{{step}}
server: {get_param: [servers, {{primary_role_name}}, '0']}
config: {get_resource: {{primary_role_name}}DockerPuppetTasksConfig{{step}}}
input_values:
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}ArtifactsConfig}
- {{role.name}}PreConfig:
- type: OS::TripleO::Tasks::{{role.name}}PreConfig
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
- {{role.name}}CreateConfigDir:
- type: OS::Heat::SoftwareConfig
- properties:
- group: script
- config: {get_file: create-config-dir.sh}
-
- {{role.name}}CreateConfigDirDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}CreateConfigDir}
-
- {{role.name}}HostPrepAnsible:
- type: OS::Heat::Value
- properties:
- value:
- str_replace:
- template: CONFIG
- params:
- CONFIG:
- - hosts: localhost
- connection: local
- tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
-
{{role.name}}HostPrepConfig:
type: OS::Heat::SoftwareConfig
properties:
group: ansible
options:
modulepath: /usr/share/ansible-modules
- config: {get_attr: [{{role.name}}HostPrepAnsible, value]}
+ config:
+ str_replace:
+ template: _PLAYBOOK
+ params:
+ _PLAYBOOK:
+ - hosts: localhost
+ connection: local
+ vars:
+ puppet_config: {get_param: [role_data, {{role.name}}, puppet_config]}
+ docker_puppet_script: {get_file: docker-puppet.py}
+ docker_puppet_tasks: {get_attr: [{{primary_role_name}}DockerPuppetTasks, value]}
+ docker_startup_configs: {get_attr: [{{role.name}}DockerConfig, value]}
+ kolla_config: {get_param: [role_data, {{role.name}}, kolla_config]}
+ bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+ tasks:
+ # Join host_prep_tasks with the other per-host configuration
+ yaql:
+ expression: $.data.host_prep_tasks + $.data.template_tasks
+ data:
+ host_prep_tasks: {get_param: [role_data, {{role.name}}, host_prep_tasks]}
+ template_tasks:
+{%- raw %}
+ # This is where we stack puppet configuration (for now)...
+ - name: Create /var/lib/config-data
+ file: path=/var/lib/config-data state=directory
+ # This is the docker-puppet configs end in
+ - name: Create /var/lib/docker-puppet
+ file: path=/var/lib/docker-puppet state=directory
+ # this creates a JSON config file for our docker-puppet.py script
+ - name: Write docker-puppet-tasks json files
+ copy: content="{{puppet_config | to_json}}" dest=/var/lib/docker-puppet/docker-puppet.json force=yes
+ # FIXME: can we move docker-puppet somewhere so it's installed via a package?
+ - name: Write docker-puppet.py
+ copy: content="{{docker_puppet_script}}" dest=/var/lib/docker-puppet/docker-puppet.py force=yes
+ # Here we are dumping all the docker container startup configuration data
+ # so that we can have access to how they are started outside of heat
+ # and docker-cmd. This lets us create command line tools to test containers.
+ - name: Write docker-container-startup-configs
+ copy: content="{{docker_startup_configs | to_json}}" dest=/var/lib/docker-container-startup-configs.json force=yes
+ - name: Create /var/lib/kolla/config_files directory
+ file: path=/var/lib/kolla/config_files state=directory
+ - name: Write kolla config json files
+ copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes
+ with_dict: "{{kolla_config}}"
+ ########################################################
+ # Bootstrap tasks, only performed on bootstrap_server_id
+ ########################################################
+ - name: Write docker-puppet-tasks json files
+ copy: content="{{item.value|to_json}}" dest=/var/lib/docker-puppet/docker-puppet-tasks{{item.key.replace("step_", "")}}.json force=yes
+ with_dict: "{{docker_puppet_tasks}}"
+ when: deploy_server_id == bootstrap_server_id
+{%- endraw %}
{{role.name}}HostPrepDeployment:
type: OS::Heat::SoftwareDeploymentGroup
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}HostPrepConfig}
- # this creates a JSON config file for our docker-puppet.py script
- {{role.name}}GenPuppetConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-puppet/docker-puppet.json:
- {get_param: [role_data, {{role.name}}, puppet_config]}
-
- {{role.name}}GenPuppetDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- config: {get_resource: {{role.name}}GenPuppetConfig}
-
{{role.name}}GenerateConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config: {get_file: docker-puppet.py}
+ inputs:
+ - name: NET_HOST
{{role.name}}GenerateConfigDeployment:
type: OS::Heat::SoftwareDeploymentGroup
- depends_on: [{{role.name}}GenPuppetDeployment, {{role.name}}ArtifactsDeploy, {{role.name}}CreateConfigDirDeployment, {{role.name}}HostPrepDeployment]
+ depends_on: [{{role.name}}ArtifactsDeploy, {{role.name}}HostPrepDeployment]
properties:
name: {{role.name}}GenerateConfigDeployment
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}GenerateConfig}
+ input_values:
+ NET_HOST: 'true'
{{role.name}}PuppetStepConfig:
type: OS::Heat::Value
service_names: {get_param: [role_data, {{role.name}}, service_names]}
docker_config: {get_param: [role_data, {{role.name}}, docker_config]}
- # Here we are dumping all the docker container startup configuration data
- # so that we can have access to how they are started outside of heat
- # and docker-cmd. This lets us create command line tools to start and
- # test these containers.
- {{role.name}}DockerConfigJsonStartupData:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- /var/lib/docker-container-startup-configs.json:
- {get_attr: [{{role.name}}DockerConfig, value]}
-
- {{role.name}}DockerConfigJsonStartupDataDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- config: {get_resource: {{role.name}}DockerConfigJsonStartupData}
- servers: {get_param: [servers, {{role.name}}]}
-
- {{role.name}}KollaJsonConfig:
- type: OS::Heat::StructuredConfig
- properties:
- group: json-file
- config:
- {get_param: [role_data, {{role.name}}, kolla_config]}
-
- {{role.name}}KollaJsonDeployment:
- type: OS::Heat::SoftwareDeploymentGroup
- properties:
- name: {{role.name}}KollaJsonDeployment
- config: {get_resource: {{role.name}}KollaJsonConfig}
- servers: {get_param: [servers, {{role.name}}]}
-
# BEGIN BAREMETAL CONFIG STEPS
- {% if role.name == 'Controller' %}
- ControllerPrePuppet:
- type: OS::TripleO::Tasks::ControllerPrePuppet
+ {{role.name}}PreConfig:
+ type: OS::TripleO::Tasks::{{role.name}}PreConfig
properties:
- servers: {get_param: [servers, Controller]}
+ servers: {get_param: [servers, {{role.name}}]}
input_values:
update_identifier: {get_param: DeployIdentifier}
- {% endif %}
{{role.name}}Config:
type: OS::TripleO::{{role.name}}Config
properties:
StepConfig: {get_attr: [{{role.name}}PuppetStepConfig, value]}
- {% for step in range(1, 6) %}
+ {% for step in range(1, deploy_steps_max) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
# END BAREMETAL CONFIG STEPS
# BEGIN CONTAINER CONFIG STEPS
- {% for step in range(1, 6) %}
+ {% for step in range(1, deploy_steps_max) %}
{{role.name}}ContainersConfig_Step{{step}}:
type: OS::Heat::StructuredConfig
type: OS::Heat::StructuredDeploymentGroup
{% if step == 1 %}
depends_on:
+ {%- for dep in roles %}
+ - {{dep.name}}Deployment_Step{{step}} # baremetal steps of the same level run first
+ {%- endfor %}
- {{role.name}}PreConfig
- - {{role.name}}KollaJsonDeployment
- - {{role.name}}GenPuppetDeployment
+ - {{role.name}}HostPrepDeployment
- {{role.name}}GenerateConfigDeployment
{% else %}
depends_on:
properties:
servers: {get_param: [servers, {{role.name}}]}
- {% if role.name == 'Controller' %}
- ControllerPostPuppet:
- depends_on:
- - ControllerExtraConfigPost
- type: OS::TripleO::Tasks::ControllerPostPuppet
- properties:
- servers: {get_param: [servers, Controller]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
- {% endif %}
-
{% endfor %}
-heat_template_version: ocata
+heat_template_version: pike
parameters:
DockerNamespace:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized aodh service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/aodh-api.json:
command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/aodh
+ owner: aodh:aodh
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
aodh_init_log:
start_order: 0
image: *aodh_image
user: root
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/aodh && chown aodh:aodh /var/log/aodh']
volumes:
- - logs:/var/log
+ - /var/log/containers/aodh:/var/log/aodh
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
aodh_db_sync:
start_order: 1
image: *aodh_image
net: host
privileged: false
detach: false
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
- - logs:/var/log
- command: /usr/bin/aodh-dbsync
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/log/containers/aodh:/var/log/aodh
+ command: "/usr/bin/bootstrap_host_exec aodh_api su aodh -s /bin/bash -c /usr/bin/aodh-dbsync"
step_4:
aodh_api:
image: *aodh_image
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
- - /var/lib/config-data/aodh/etc/httpd/:/etc/httpd/:ro
- - /var/lib/config-data/aodh/var/www/:/var/www/:ro
- - logs:/var/log
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/aodh-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/lib/config-data/aodh/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/aodh/var/www/:/var/www/:ro
+ - /var/log/containers/aodh:/var/log/aodh
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/aodh
+ state: directory
upgrade_tasks:
- name: Stop and disable aodh service (running under httpd)
tags: step2
service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [AodhApiPuppetBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Aodh Evaluator service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/aodh-evaluator.json:
command: /usr/bin/aodh-evaluator
+ permissions:
+ - path: /var/log/aodh
+ owner: aodh:aodh
+ recurse: true
docker_config:
step_4:
aodh_evaluator:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/aodh-evaluator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/log/containers/aodh:/var/log/aodh
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/aodh
+ state: directory
upgrade_tasks:
- name: Stop and disable openstack-aodh-evaluator service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Aodh Listener service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/aodh-listener.json:
command: /usr/bin/aodh-listener
+ permissions:
+ - path: /var/log/aodh
+ owner: aodh:aodh
+ recurse: true
docker_config:
step_4:
aodh_listener:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/aodh-listener.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/log/containers/aodh:/var/log/aodh
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/aodh
+ state: directory
upgrade_tasks:
- name: Stop and disable openstack-aodh-listener service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Aodh Notifier service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/aodh-notifier.json:
command: /usr/bin/aodh-notifier
+ permissions:
+ - path: /var/log/aodh
+ owner: aodh:aodh
+ recurse: true
docker_config:
step_4:
aodh_notifier:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/aodh-notifier.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/aodh/etc/aodh/:/etc/aodh/:ro
+ - /var/log/containers/aodh:/var/log/aodh
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/aodh
+ state: directory
upgrade_tasks:
- name: Stop and disable openstack-aodh-notifier service
tags: step2
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Ceilometer Agent Central service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCeilometerCentralImage:
+ description: image
+ default: 'centos-binary-ceilometer-central:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CeilometerAgentCentralBase:
+ type: ../../puppet/services/ceilometer-agent-central.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Central role.
+ value:
+ service_name: {get_attr: [CeilometerAgentCentralBase, role_data, service_name]}
+ config_settings: {get_attr: [CeilometerAgentCentralBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CeilometerAgentCentralBase, role_data, step_config]
+ service_config_settings: {get_attr: [CeilometerAgentCentralBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ceilometer
+ puppet_tags: ceilometer_config
+ step_config: *step_config
+ config_image: &ceilometer_agent_central_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerCentralImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ceilometer-agent-central.json:
+ command: /usr/bin/ceilometer-polling --polling-namespaces central
+ docker_config:
+ step_3:
+ ceilometer_init_log:
+ start_order: 0
+ image: *ceilometer_agent_central_image
+ user: root
+ command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+ volumes:
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ step_4:
+ ceilometer_agent_central:
+ image: *ceilometer_agent_central_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ceilometer-agent-central.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
+ ceilometer_gnocchi_upgrade:
+ start_order: 1
+ image: *ceilometer_agent_central_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
+ upgrade_tasks:
+ - name: Stop and disable ceilometer agent central service
+ tags: step2
+ service: name=openstack-ceilometer-agent-central state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Ceilometer Agent Compute service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCeilometerComputeImage:
+ description: image
+ default: 'centos-binary-ceilometer-compute:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CeilometerAgentComputeBase:
+ type: ../../puppet/services/ceilometer-agent-compute.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Compute role.
+ value:
+ service_name: {get_attr: [CeilometerAgentComputeBase, role_data, service_name]}
+ config_settings: {get_attr: [CeilometerAgentComputeBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CeilometerAgentComputeBase, role_data, step_config]
+ service_config_settings: {get_attr: [CeilometerAgentComputeBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ceilometer
+ puppet_tags: ceilometer_config
+ step_config: *step_config
+ config_image: &ceilometer_agent_compute_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerComputeImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ceilometer-agent-compute.json:
+ command: /usr/bin/ceilometer-polling --polling-namespaces compute
+ docker_config:
+ step_4:
+ ceilometer_agent-compute:
+ image: *ceilometer_agent_compute_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ceilometer-agent-compute.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ upgrade_tasks:
+ - name: Stop and disable ceilometer-agent-compute service
+ tags: step2
+ service: name=openstack-ceilometer-agent-compute state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Ceilometer Agent Notification service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerCeilometerNotificationImage:
+ description: image
+ default: 'centos-binary-ceilometer-notification:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ CeilometerAgentNotificationBase:
+ type: ../../puppet/services/ceilometer-agent-notification.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Ceilometer Agent Notification role.
+ value:
+ service_name: {get_attr: [CeilometerAgentNotificationBase, role_data, service_name]}
+ config_settings: {get_attr: [CeilometerAgentNotificationBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [CeilometerAgentNotificationBase, role_data, step_config]
+ service_config_settings: {get_attr: [CeilometerAgentNotificationBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: ceilometer
+ puppet_tags: ceilometer_config
+ step_config: *step_config
+ config_image: &ceilometer_agent_notification_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerNotificationImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/ceilometer-agent-notification.json:
+ command: /usr/bin/ceilometer-agent-notification
+ docker_config:
+ step_3:
+ ceilometer_init_log:
+ start_order: 0
+ image: *ceilometer_agent_notification_image
+ user: root
+ command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+ volumes:
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ step_4:
+ ceilometer_agent-notification:
+ image: *ceilometer_agent_notification_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ceilometer-agent-notification.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ step_5:
+ ceilometer_gnocchi_upgrade:
+ start_order: 1
+ image: *ceilometer_agent_notification_image
+ net: host
+ detach: false
+ privileged: false
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+ - /var/log/containers/ceilometer:/var/log/ceilometer
+ command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
+ upgrade_tasks:
+ - name: Stop and disable ceilometer agent notification service
+ tags: step2
+ service: name=openstack-ceilometer-notification state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contains a static list of common things necessary for containers
value:
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
+ # required for bootstrap_host_exec
+ - /etc/puppet:/etc/puppet:ro
# OpenSSL trusted CAs
- /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
- /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
- /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
- /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
+ # Syslog socket
+ - /dev/log:/dev/log
-heat_template_version: ocata
+heat_template_version: pike
description: >
MongoDB service deployment using puppet and docker
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- path: /var/lib/mongodb
owner: mongodb:mongodb
recurse: true
+ - path: /var/log/mongodb
+ owner: mongodb:mongodb
+ recurse: true
docker_config:
step_2:
mongodb:
- /var/lib/kolla/config_files/mongodb.json:/var/lib/kolla/config_files/config.json
- /var/lib/config-data/mongodb/etc/:/etc/:ro
- /etc/localtime:/etc/localtime:ro
- - logs:/var/log/kolla
+ - /var/log/containers/mongodb:/var/log/mongodb
- /var/lib/mongodb:/var/lib/mongodb
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
step_config: 'include ::tripleo::profile::base::database::mongodb'
config_image: *mongodb_image
volumes:
- - /var/lib/mongodb:/var/lib/mongodb
- - logs:/var/log/kolla:ro
+ - /var/lib/mongodb:/var/lib/mongodb
+ - /var/log/containers/mongodb:/var/log/mongodb
host_prep_tasks:
- - name: create /var/lib/mongodb
+ - name: create persistent directories
file:
- path: /var/lib/mongodb
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/mongodb
+ - /var/lib/mongodb
upgrade_tasks:
- name: Stop and disable mongodb service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
MySQL service deployment using puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
MysqlRootPassword:
type: string
hidden: true
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
owner: mysql:mysql
recurse: true
docker_config:
+ # Kolla_bootstrap runs before permissions set by kolla_config
step_2:
- mysql_bootstrap:
+ mysql_init_logs:
start_order: 0
+ image: *mysql_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/mysql:/var/log/mariadb
+ command: ['/bin/bash', '-c', 'chown -R mysql:mysql /var/log/mariadb']
+ mysql_bootstrap:
+ start_order: 1
detach: false
image: *mysql_image
net: host
- /etc/localtime:/etc/localtime:ro
- /etc/hosts:/etc/hosts:ro
- /var/lib/mysql:/var/lib/mysql
+ - /var/log/containers/mysql:/var/log/mariadb
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
# NOTE(mandre) skip wsrep cluster status check
- KOLLA_KUBERNETES=True
- -
+ -
list_join:
- '='
- - 'DB_ROOT_PASSWORD'
- {get_param: MysqlRootPassword}
- {get_param: [DefaultPasswords, mysql_root_password]}
mysql:
- start_order: 1
+ start_order: 2
image: *mysql_image
restart: always
net: host
config_image: *mysql_image
volumes:
- /var/lib/mysql:/var/lib/mysql/:ro
+ - /var/log/containers/mysql:/var/log/mariadb
- /var/lib/config-data/mysql/root:/root:ro #provides .my.cnf
host_prep_tasks:
- - name: create /var/lib/mysql
+ - name: create persistent directories
file:
- path: /var/lib/mysql
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/mysql
+ - /var/lib/mysql
upgrade_tasks:
- name: Stop and disable mysql service
tags: step2
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Redis services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerRedisImage:
+ description: image
+ default: 'centos-binary-redis:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ RedisBase:
+ type: ../../../puppet/services/database/redis.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+ role_data:
+ description: Role data for the Redis API role.
+ value:
+ service_name: {get_attr: [RedisBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - {get_attr: [RedisBase, role_data, config_settings]}
+ - redis::daemonize: false
+ step_config: &step_config
+ get_attr: [RedisBase, role_data, step_config]
+ service_config_settings: {get_attr: [RedisBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: 'redis'
+ # NOTE: we need the exec tag to copy /etc/redis.conf.puppet to
+ # /etc/redis.conf
+ # https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763
+ puppet_tags: 'exec'
+ step_config: *step_config
+ config_image: &redis_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerRedisImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/redis.json:
+ command: /usr/bin/redis-server /etc/redis.conf
+ permissions:
+ - path: /var/run/redis
+ owner: redis:redis
+ recurse: true
+ docker_config:
+ step_1:
+ redis:
+ image: *redis_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /run:/run
+ - /var/lib/kolla/config_files/redis.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/redis/etc/:/etc/:ro
+ - /etc/localtime:/etc/localtime:ro
+ - logs:/var/log/kolla
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create /var/run/redis
+ file:
+ path: /var/run/redis
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable redis service
+ tags: step2
+ service: name=redis state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized etcd services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerEtcdImage:
+ description: image
+ default: 'centos-binary-etcd:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EtcdInitialClusterToken:
+ description: Initial cluster token for the etcd cluster during bootstrap.
+ type: string
+ hidden: true
+
+resources:
+
+ EtcdPuppetBase:
+ type: ../../puppet/services/etcd.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EtcdInitialClusterToken: {get_param: EtcdInitialClusterToken}
+
+outputs:
+ role_data:
+ description: Role data for the etcd role.
+ value:
+ service_name: {get_attr: [EtcdPuppetBase, role_data, service_name]}
+ step_config: &step_config
+ list_join:
+ - "\n"
+ - - "['Etcd_key'].each |String $val| { noop_resource($val) }"
+ - get_attr: [EtcdPuppetBase, role_data, step_config]
+ config_settings:
+ map_merge:
+ - {get_attr: [EtcdPuppetBase, role_data, config_settings]}
+ - etcd::manage_service: false
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: etcd
+ step_config: *step_config
+ config_image: &etcd_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerEtcdImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/etcd.json:
+ command: /usr/bin/etcd --config-file /etc/etcd/etcd.yml
+ permissions:
+ - path: /var/lib/etcd
+ owner: etcd:etcd
+ recurse: true
+ docker_config:
+ step_2:
+ etcd:
+ image: *etcd_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ - /var/lib/etcd:/var/lib/etcd
+ - /etc/localtime:/etc/localtime:ro
+ - /var/lib/kolla/config_files/etcd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/etcd/etc/etcd/etcd.yml:/etc/etcd/etcd.yml:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ docker_puppet_tasks:
+ # Etcd keys initialization occurs only on single node
+ step_2:
+ config_volume: 'etcd_init_tasks'
+ puppet_tags: 'etcd_key'
+ step_config: 'include ::tripleo::profile::base::etcd'
+ config_image: *etcd_image
+ volumes:
+ - /var/lib/config-data/etcd/etc/:/etc
+ - /var/lib/etcd:/var/lib/etcd:ro
+ host_prep_tasks:
+ - name: create /var/lib/etcd
+ file:
+ path: /var/lib/etcd
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable etcd service
+ tags: step2
+ service: name=etcd state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Glance service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/glance-api.json:
command: /usr/bin/glance-api --config-file /usr/share/glance/glance-api-dist.conf --config-file /etc/glance/glance-api.conf
+ /var/lib/kolla/config_files/glance_api_tls_proxy.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
+ # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
step_3:
+ glance_init_logs:
+ start_order: 0
+ image: *glance_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/glance:/var/log/glance
+ command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
glance_api_db_sync:
+ start_order: 1
image: *glance_image
net: host
privileged: false
detach: false
+ user: root
volumes: &glance_volumes
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
- - /var/lib/config-data/glance_api/etc/glance/:/etc/glance/:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/glance-api.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/glance_api/etc/glance/:/etc/glance/:ro
+ - /var/log/containers/glance:/var/log/glance
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ command: "/usr/bin/bootstrap_host_exec glance_api su glance -s /bin/bash -c '/usr/local/bin/kolla_start'"
step_4:
- glance_api:
- image: *glance_image
- net: host
- privileged: false
- restart: always
- volumes: *glance_volumes
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ map_merge:
+ - glance_api:
+ start_order: 2
+ image: *glance_image
+ net: host
+ privileged: false
+ restart: always
+ volumes: *glance_volumes
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - if:
+ - internal_tls_enabled
+ - glance_api_tls_proxy:
+ start_order: 2
+ image: *glance_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/glance_api_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/glance_api/etc/httpd/:/etc/httpd/:ro
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/glance
+ state: directory
upgrade_tasks:
- name: Stop and disable glance_api service
tags: step2
service: name=openstack-glance-api state=stopped enabled=no
+ metadata_settings:
+ get_attr: [GlanceApiPuppetBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized gnocchi service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/gnocchi-api.json:
command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/gnocchi
+ owner: gnocchi:gnocchi
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
gnocchi_init_log:
start_order: 0
image: *gnocchi_image
user: root
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/gnocchi && chown gnocchi:gnocchi /var/log/gnocchi']
volumes:
- - logs:/var/log
+ - /var/log/containers/gnocchi:/var/log/gnocchi
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
gnocchi_db_sync:
start_order: 1
image: *gnocchi_image
net: host
detach: false
privileged: false
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
- - logs:/var/log
- command: ["/usr/bin/gnocchi-upgrade", "--skip-storage"]
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/log/containers/gnocchi:/var/log/gnocchi
+ command: "/usr/bin/bootstrap_host_exec gnocchi_api su gnocchi -s /bin/bash -c '/usr/bin/gnocchi-upgrade --skip-storage'"
step_4:
gnocchi_api:
image: *gnocchi_image
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
- - /var/lib/config-data/gnocchi/etc/httpd/:/etc/httpd/:ro
- - /var/lib/config-data/gnocchi/var/www/:/var/www/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/gnocchi-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/lib/config-data/gnocchi/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/gnocchi/var/www/:/var/www/:ro
+ - /var/log/containers/gnocchi:/var/log/gnocchi
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/gnocchi
+ state: directory
upgrade_tasks:
- name: Stop and disable httpd service
tags: step2
service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [GnocchiApiPuppetBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Gnocchi Metricd service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/gnocchi-metricd.json:
command: /usr/bin/gnocchi-metricd
+ permissions:
+ - path: /var/log/gnocchi
+ owner: gnocchi:gnocchi
+ recurse: true
docker_config:
step_4:
gnocchi_metricd:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/gnocchi-metricd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/log/containers/gnocchi:/var/log/gnocchi
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/gnocchi
+ state: directory
upgrade_tasks:
- name: Stop and disable openstack-gnocchi-metricd service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Gnocchi Statsd service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/gnocchi-statsd.json:
command: /usr/bin/gnocchi-statsd
+ permissions:
+ - path: /var/log/gnocchi
+ owner: gnocchi:gnocchi
+ recurse: true
docker_config:
step_4:
gnocchi_statsd:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/gnocchi-statsd.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/gnocchi/etc/gnocchi/:/etc/gnocchi/:ro
+ - /var/log/containers/gnocchi:/var/log/gnocchi
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/gnocchi
+ state: directory
upgrade_tasks:
- name: Stop and disable openstack-gnocchi-statsd service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Heat API CFN service
description: image
default: 'centos-binary-heat-api-cfn:latest'
type: string
- # we configure all heat services in the same heat engine container
+ # puppet needs the heat-wsgi-api-cfn binary from centos-binary-heat-api-cfn
DockerHeatConfigImage:
description: image
- default: 'centos-binary-heat-engine:latest'
+ default: 'centos-binary-heat-api-cfn:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
- config_volume: heat
+ config_volume: heat_api_cfn
puppet_tags: heat_config,file,concat,file_line
step_config: *step_config
config_image:
- [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/heat_api_cfn.json:
- command: /usr/bin/heat-api-cfn --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/heat
+ owner: heat:heat
+ recurse: true
docker_config:
step_4:
heat_api_cfn:
net: host
privileged: false
restart: always
+ # NOTE(mandre) kolla image changes the user to 'heat', we need it
+ # to be root to run httpd
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
- - /dev:/dev
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/heat_api_cfn.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat_api_cfn/etc/heat/:/etc/heat/:ro
+ - /var/lib/config-data/heat_api_cfn/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/heat_api_cfn/var/www/:/var/www/:ro
+ - /var/log/containers/heat:/var/log/heat
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/heat
+ state: directory
upgrade_tasks:
- name: Stop and disable heat_api_cfn service
tags: step2
- service: name=openstack-heat-api-cfn state=stopped enabled=no
+ service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [HeatBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Heat API service
description: image
default: 'centos-binary-heat-api:latest'
type: string
- # we configure all heat services in the same heat engine container
+ # puppet needs the heat-wsgi-api binary from centos-binary-heat-api
DockerHeatConfigImage:
description: image
- default: 'centos-binary-heat-engine:latest'
+ default: 'centos-binary-heat-api:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
service_config_settings: {get_attr: [HeatBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
- config_volume: heat
+ config_volume: heat_api
puppet_tags: heat_config,file,concat,file_line
step_config: *step_config
config_image:
- [ {get_param: DockerNamespace}, {get_param: DockerHeatConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/heat_api.json:
- command: /usr/bin/heat-api --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/heat
+ owner: heat:heat
+ recurse: true
docker_config:
step_4:
heat_api:
net: host
privileged: false
restart: always
+ # NOTE(mandre) kolla image changes the user to 'heat', we need it
+ # to be root to run httpd
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
- - /dev:/dev
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/heat_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat_api/etc/heat/:/etc/heat/:ro
+ - /var/lib/config-data/heat_api/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/heat_api/var/www/:/var/www/:ro
+ - /var/log/containers/heat:/var/log/heat
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/heat
+ state: directory
upgrade_tasks:
- name: Stop and disable heat_api service
tags: step2
- service: name=openstack-heat-api state=stopped enabled=no
+ service: name=httpd state=stopped enabled=no
+ metadata_settings:
+ get_attr: [HeatBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Heat Engine service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/heat_engine.json:
command: /usr/bin/heat-engine --config-file /usr/share/heat/heat-dist.conf --config-file /etc/heat/heat.conf
+ permissions:
+ - path: /var/log/heat
+ owner: heat:heat
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
+ heat_init_log:
+ start_order: 0
+ image: *heat_engine_image
+ user: root
+ volumes:
+ - /var/log/containers/heat:/var/log/heat
+ command: ['/bin/bash', '-c', 'chown -R heat:heat /var/log/heat']
heat_engine_db_sync:
+ start_order: 1
image: *heat_engine_image
net: host
privileged: false
detach: false
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
- command: ['heat-manage', 'db_sync']
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+ - /var/log/containers/heat:/var/log/heat
+ command: "/usr/bin/bootstrap_host_exec heat_engine su heat -s /bin/bash -c 'heat-manage db_sync'"
step_4:
heat_engine:
image: *heat_engine_image
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/heat_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/heat/etc/heat/:/etc/heat/:ro
+ - /var/log/containers/heat:/var/log/heat
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/heat
+ state: directory
upgrade_tasks:
- name: Stop and disable heat_engine service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Ironic API service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/ironic_api.json:
command: /usr/bin/ironic-api
+ permissions:
+ - path: /var/log/ironic
+ owner: ironic:ironic
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
- ironic_db_sync:
+ ironic_init_logs:
+ start_order: 0
image: &ironic_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerIronicApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/ironic:/var/log/ironic
+ command: ['/bin/bash', '-c', 'chown -R ironic:ironic /var/log/ironic']
+ ironic_db_sync:
+ start_order: 1
+ image: *ironic_image
net: host
privileged: false
detach: false
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/ironic/etc/:/etc/:ro
- command: ['ironic-dbsync', '--config-file', '/etc/ironic/ironic.conf']
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/ironic/etc/:/etc/:ro
+ - /var/log/containers/ironic:/var/log/ironic
+ command: "/usr/bin/bootstrap_host_exec ironic_api su ironic -s /bin/bash -c 'ironic-dbsync --config-file /etc/ironic/ironic.conf'"
step_4:
ironic_api:
start_order: 10
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/etc/:/etc/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ironic_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/:/etc/:ro
+ - /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/ironic
+ state: directory
upgrade_tasks:
- name: Stop and disable ironic_api service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Ironic Conductor service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- path: /var/lib/ironic
owner: ironic:ironic
recurse: true
+ - path: /var/log/ironic
+ owner: ironic:ironic
+ recurse: true
docker_config:
step_4:
ironic_conductor:
privileged: true
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
- - /lib/modules:/lib/modules:ro
- - /sys:/sys
- - /dev:/dev
- - /run:/run #shared?
- - /var/lib/ironic:/var/lib/ironic
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ironic_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ - /lib/modules:/lib/modules:ro
+ - /sys:/sys
+ - /dev:/dev
+ - /run:/run #shared?
+ - /var/lib/ironic:/var/lib/ironic
+ - /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- - name: create ironic persistent data directory
+ - name: create persistent directories
file:
- path: /var/lib/ironic
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/ironic
+ - /var/lib/ironic
- name: stat /httpboot
stat: path=/httpboot
register: stat_httpboot
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Ironic PXE service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
command: /usr/sbin/httpd -DFOREGROUND
/var/lib/kolla/config_files/ironic_pxe_tftp.json:
command: /usr/sbin/in.tftpd --foreground --user root --address 0.0.0.0:69 --map-file /var/lib/ironic/tftpboot/map-file /var/lib/ironic/tftpboot
+ permissions:
+ - path: /var/log/ironic
+ owner: ironic:ironic
+ recurse: true
docker_config:
step_4:
ironic_pxe_tftp:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
- # TODO(mandre) check how docker like mounting in a bind-mounted tree
- # This directory may contain migrated data from BM
- - /var/lib/ironic:/var/lib/ironic/
- # These files were generated by puppet inside the config container
- # TODO(mandre) check the mount permission (ro/rw)
- - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/chain.c32:/var/lib/ironic/tftpboot/chain.c32
- - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/pxelinux.0:/var/lib/ironic/tftpboot/pxelinux.0
- - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/ipxe.efi:/var/lib/ironic/tftpboot/ipxe.efi
- - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/undionly.kpxe:/var/lib/ironic/tftpboot/undionly.kpxe
- - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/map-file:/var/lib/ironic/tftpboot/map-file
- - /dev/log:/dev/log
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ironic_pxe_tftp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ # TODO(mandre) check how docker like mounting in a bind-mounted tree
+ # This directory may contain migrated data from BM
+ - /var/lib/ironic:/var/lib/ironic/
+ # These files were generated by puppet inside the config container
+ # TODO(mandre) check the mount permission (ro/rw)
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/chain.c32:/var/lib/ironic/tftpboot/chain.c32
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/pxelinux.0:/var/lib/ironic/tftpboot/pxelinux.0
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/ipxe.efi:/var/lib/ironic/tftpboot/ipxe.efi
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/undionly.kpxe:/var/lib/ironic/tftpboot/undionly.kpxe
+ - /var/lib/config-data/ironic/var/lib/ironic/tftpboot/map-file:/var/lib/ironic/tftpboot/map-file
+ - /dev/log:/dev/log
+ - /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
ironic_pxe_http:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
- - /var/lib/config-data/ironic/etc/httpd/:/etc/httpd/:ro
- - /var/lib/ironic:/var/lib/ironic/
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/ironic_pxe_http.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/ironic/etc/ironic/:/etc/ironic/:ro
+ - /var/lib/config-data/ironic/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/ironic/var/www/:/var/www/:ro
+ - /var/lib/ironic:/var/lib/ironic/
+ - /var/log/containers/ironic:/var/log/ironic
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- - name: create ironic persistent data directory
+ - name: create persistent directories
file:
- path: /var/lib/ironic
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/lib/ironic
+ - /var/log/containers/ironic
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Keystone service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
AdminPassword:
description: The password for the keystone admin account, used for monitoring, querying neutron etc.
type: string
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
conditions:
/var/lib/kolla/config_files/keystone.json:
command: /usr/sbin/httpd -DFOREGROUND
docker_config:
+ # Kolla_bootstrap/db sync runs before permissions set by kolla_config
step_3:
- keystone-init-log:
+ keystone_init_log:
start_order: 0
image: *keystone_image
user: root
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/keystone && chown keystone:keystone /var/log/keystone']
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
volumes:
- - logs:/var/log
+ - /var/log/containers/keystone:/var/log/keystone
keystone_db_sync:
start_order: 1
image: *keystone_image
privileged: false
detach: false
volumes: &keystone_volumes
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/keystone/var/www/:/var/www/:ro
- - /var/lib/config-data/keystone/etc/keystone/:/etc/keystone/:ro
- - /var/lib/config-data/keystone/etc/httpd/:/etc/httpd/:ro
- - logs:/var/log
- -
- if:
- - internal_tls_enabled
- - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
- - ''
- -
- if:
- - internal_tls_enabled
- - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
- - ''
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/keystone.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/keystone/var/www/:/var/www/:ro
+ - /var/lib/config-data/keystone/etc/keystone/:/etc/keystone/:ro
+ - /var/lib/config-data/keystone/etc/httpd/:/etc/httpd/:ro
+ - /var/log/containers/keystone:/var/log/keystone
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ command: ['/usr/bin/bootstrap_host_exec', 'keystone', '/usr/local/bin/kolla_start']
keystone:
- start_order: 1
+ start_order: 2
image: *keystone_image
net: host
privileged: false
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
keystone_bootstrap:
- start_order: 2
+ start_order: 3
action: exec
command:
- [ 'keystone', 'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
+ [ 'keystone', '/usr/bin/bootstrap_host_exec', 'keystone' ,'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
docker_puppet_tasks:
# Keystone endpoint creation occurs only on single node
step_3:
puppet_tags: 'keystone_config,keystone_domain_config,keystone_endpoint,keystone_identity_provider,keystone_paste_ini,keystone_role,keystone_service,keystone_tenant,keystone_user,keystone_user_role,keystone_domain'
step_config: 'include ::tripleo::profile::base::keystone'
config_image: *keystone_image
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/keystone
+ state: directory
upgrade_tasks:
- name: Stop and disable keystone service (running under httpd)
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Memcached services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config: {}
docker_config:
step_1:
+ memcached_init_logs:
+ start_order: 0
+ image: *memcached_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+ - /var/log/memcached.log:/var/log/memcached.log
+ command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; chown ${USER} /var/log/memcached.log']
memcached:
+ start_order: 1
image: *memcached_image
net: host
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
+ # TODO(bogdando) capture memcached syslog logs from a container
command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Mistral API service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/mistral_api.json:
command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/api.log --server=api
+ permissions:
+ - path: /var/log/mistral
+ owner: mistral:mistral
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
- mistral_db_sync:
- start_order: 1
+ mistral_init_logs:
+ start_order: 0
image: &mistral_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerMistralApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/mistral:/var/log/mistral
+ command: ['/bin/bash', '-c', 'chown -R mistral:mistral /var/log/mistral']
+ mistral_db_sync:
+ start_order: 1
+ image: *mistral_image
net: host
privileged: false
detach: false
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/mistral/etc/:/etc/:ro
- command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'upgrade', 'head']
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
+ - /var/log/containers/mistral:/var/log/mistral
+ command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head'"
mistral_db_populate:
start_order: 2
image: *mistral_image
net: host
privileged: false
detach: false
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/mistral/etc/:/etc/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/mistral/etc/:/etc/:ro
+ - /var/log/containers/mistral:/var/log/mistral
# NOTE: dprince this requires that we install openstack-tripleo-common into
# the Mistral API image so that we get tripleo* actions
- command: ['mistral-db-manage', '--config-file', '/etc/mistral/mistral.conf', 'populate']
+ command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf populate'"
step_4:
mistral_api:
start_order: 15
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/mistral_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ - /var/log/containers/mistral:/var/log/mistral
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/mistral
+ state: directory
upgrade_tasks:
- name: Stop and disable mistral_api service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Mistral Engine service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/mistral_engine.json:
command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/engine.log --server=engine
+ permissions:
+ - path: /var/log/mistral
+ owner: mistral:mistral
+ recurse: true
docker_config:
step_4:
mistral_engine:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /run:/run
- - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /run:/run
+ - /var/lib/kolla/config_files/mistral_engine.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ - /var/log/containers/mistral:/var/log/mistral
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/mistral
+ state: directory
upgrade_tasks:
- name: Stop and disable mistral_engine service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Mistral Executor service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/mistral_executor.json:
command: /usr/bin/mistral-server --config-file=/etc/mistral/mistral.conf --log-file=/var/log/mistral/executor.log --server=executor
+ permissions:
+ - path: /var/log/mistral
+ owner: mistral:mistral
+ recurse: true
docker_config:
step_4:
mistral_executor:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
- - /run:/run
- # FIXME: this is required in order for Nova cells
- # initialization workflows on the Undercloud. Need to
- # exclude this on the overcloud for security reasons.
- - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/mistral_executor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/mistral/etc/mistral/:/etc/mistral/:ro
+ - /run:/run
+ # FIXME: this is required in order for Nova cells
+ # initialization workflows on the Undercloud. Need to
+ # exclude this on the overcloud for security reasons.
+ - /var/lib/config-data/nova/etc/nova:/etc/nova:ro
+ - /var/log/containers/mistral:/var/log/mistral
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/mistral
+ state: directory
upgrade_tasks:
- name: Stop and disable mistral_executor service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Neutron API service
# we configure all neutron services in the same neutron
DockerNeutronConfigImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent:latest'
+ default: 'centos-binary-neutron-server:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/neutron_api.json:
command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
+ /var/lib/kolla/config_files/neutron_server_tls_proxy.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
- neutron_db_sync:
+ neutron_init_logs:
+ start_order: 0
image: &neutron_api_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNeutronApiImage} ]
- net: host
privileged: false
- detach: false
- # FIXME: we should make config file permissions right
- # and run as neutron user
user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
- - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
- command: ['neutron-db-manage', 'upgrade', 'heads']
- step_4:
- neutron_api:
+ - /var/log/containers/neutron:/var/log/neutron
+ command: ['/bin/bash', '-c', 'chown -R neutron:neutron /var/log/neutron']
+ neutron_db_sync:
+ start_order: 1
image: *neutron_api_image
net: host
privileged: false
- restart: always
+ detach: false
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/neutron/etc/neutron:/etc/neutron:ro
+ - /var/lib/config-data/neutron/usr/share/neutron:/usr/share/neutron:ro
+ - /var/log/containers/neutron:/var/log/neutron
+ command: ['/usr/bin/bootstrap_host_exec', 'neutron_api', 'neutron-db-manage', 'upgrade', 'heads']
+ # FIXME: we should make config file permissions right
+ # and run as neutron user
+ #command: "/usr/bin/bootstrap_host_exec neutron_api su neutron -s /bin/bash -c 'neutron-db-manage upgrade heads'"
+ step_4:
+ map_merge:
+ - neutron_api:
+ image: *neutron_api_image
+ net: host
+ privileged: false
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /var/log/containers/neutron:/var/log/neutron
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - if:
+ - internal_tls_enabled
+ - neutron_server_tls_proxy:
+ image: *neutron_api_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron_server_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/httpd/:/etc/httpd/:ro
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
upgrade_tasks:
- name: Stop and disable neutron_api service
tags: step2
service: name=neutron-server state=stopped enabled=no
+ metadata_settings:
+ get_attr: [NeutronBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Neutron DHCP service
# we configure all neutron services in the same neutron
DockerNeutronConfigImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent:latest'
+ default: 'centos-binary-neutron-server:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/neutron_dhcp.json:
command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
docker_config:
step_4:
neutron_dhcp:
privileged: true
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
- - /lib/modules:/lib/modules:ro
- - /run/:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron_dhcp.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run/:/run
+ - /var/log/containers/neutron:/var/log/neutron
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
upgrade_tasks:
- name: Stop and disable neutron_dhcp service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Neutron L3 agent
# we configure all neutron services in the same neutron
DockerNeutronConfigImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent:latest'
+ default: 'centos-binary-neutron-server:latest'
type: string
ServiceNetMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/neutron-l3-agent.json:
command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
docker_config:
step_4:
neutronl3agent:
privileged: true
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron-l3-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/log/containers/neutron:/var/log/neutron
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Neutron Metadata agent
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerNeutronMetadataImage:
+ description: image
+ default: 'centos-binary-neutron-metadata-agent:latest'
+ type: string
+ # we configure all neutron services in the same neutron
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-server:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ./containers-common.yaml
+
+ NeutronMetadataBase:
+ type: ../../puppet/services/neutron-metadata.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for Neutron Metadata agent
+ value:
+ service_name: {get_attr: [NeutronMetadataBase, role_data, service_name]}
+ config_settings: {get_attr: [NeutronMetadataBase, role_data, config_settings]}
+ step_config: &step_config
+ get_attr: [NeutronMetadataBase, role_data, step_config]
+ puppet_config:
+ puppet_tags: neutron_config,neutron_metadata_agent_config
+ config_volume: neutron
+ step_config: *step_config
+ config_image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/neutron-metadata-agent.json:
+ command: /usr/bin/neutron-metadata-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-metadata-agent
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
+ docker_config:
+ step_4:
+ neutron_metadata_agent:
+ image:
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronMetadataImage} ]
+ net: host
+ pid: host
+ privileged: true
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron-metadata-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/log/containers/neutron:/var/log/neutron
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable neutron_metadata service
+ tags: step2
+ service: name=neutron-metadata-agent state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron openvswitch service
description: image
default: 'centos-binary-neutron-openvswitch-agent:latest'
type: string
+ DockerNeutronConfigImage:
+ description: image
+ default: 'centos-binary-neutron-server:latest'
+ type: string
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
config_volume: neutron
puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
step_config: *step_config
- config_image: &neutron_ovs_agent_image
+ config_image:
list_join:
- '/'
- - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
+ - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron-openvswitch-agent.json:
- command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+ command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+ permissions:
+ - path: /var/log/neutron
+ owner: neutron:neutron
+ recurse: true
docker_config:
step_4:
neutronovsagent:
- image: *neutron_ovs_agent_image
+ image: &neutron_ovs_agent_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerOpenvswitchImage} ]
net: host
pid: host
privileged: true
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
- - /lib/modules:/lib/modules:ro
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/neutron-openvswitch-agent.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/neutron/etc/neutron/:/etc/neutron/:ro
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/log/containers/neutron:/var/log/neutron
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/neutron
+ state: directory
upgrade_tasks:
- name: Stop and disable neutron_ovs_agent service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Neutron ML2 Plugin configured with Puppet
type: string
DockerNeutronConfigImage:
description: image
- default: 'centos-binary-neutron-openvswitch-agent:latest'
+ default: 'centos-binary-neutron-server:latest'
type: string
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova API service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/nova_api.json:
command: /usr/bin/nova-api
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
+ # db sync runs before permissions set by kolla_config
step_3:
- nova_api_db_sync:
- start_order: 1
+ nova_init_logs:
+ start_order: 0
image: &nova_api_image
list_join:
- '/'
- [ {get_param: DockerNamespace}, {get_param: DockerNovaApiImage} ]
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/nova:/var/log/nova
+ command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
+ nova_api_db_sync:
+ start_order: 1
+ image: *nova_api_image
net: host
detach: false
+ user: root
volumes: &nova_api_volumes
- - /var/lib/config-data/nova/etc/:/etc/:ro
- - /etc/hosts:/etc/hosts:ro
- - /etc/localtime:/etc/localtime:ro
- command: ['/usr/bin/nova-manage', 'api_db', 'sync']
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /var/log/containers/nova:/var/log/nova
+ command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage api_db sync'"
# FIXME: we probably want to wait on the 'cell_v2 update' in order for this
# to be capable of upgrading a baremetal setup. This is to ensure the name
# of the cell is 'default'
image: *nova_api_image
net: host
detach: false
+ user: root
volumes: *nova_api_volumes
- command:
- - '/usr/bin/nova-manage'
- - 'cell_v2'
- - 'map_cell0'
+ command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 map_cell0'"
nova_api_create_default_cell:
start_order: 3
image: *nova_api_image
# this idempotent (if the resource already exists a conflict
# is raised)
exit_codes: [0,2]
- command:
- - '/usr/bin/nova-manage'
- - 'cell_v2'
- - 'create_cell'
- - '--name="default"'
+ user: root
+ command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 create_cell --name=default'"
nova_db_sync:
start_order: 4
image: *nova_api_image
net: host
detach: false
volumes: *nova_api_volumes
- command: ['/usr/bin/nova-manage', 'db', 'sync']
+ user: root
+ command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage db sync'"
step_4:
nova_api:
start_order: 2
user: nova
privileged: true
restart: always
- volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/nova_api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ volumes: *nova_api_volumes
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
step_5:
net: host
detach: false
volumes: *nova_api_volumes
- command:
- - '/usr/bin/nova-manage'
- - 'cell_v2'
- - 'discover_hosts'
+ user: root
+ command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 discover_hosts'"
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
upgrade_tasks:
- name: Stop and disable nova_api service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Compute service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Nova Compute service.
value:
service_name: {get_attr: [NovaComputeBase, role_data, service_name]}
- config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaComputeBase, role_data, config_settings]
+ # FIXME: we need to disable migration for now as the
+ # hieradata is common for all services, and this means nova
+ # and nova_placement puppet runs also try to configure
+ # libvirt, and they fail. We can remove this override when
+ # we have hieradata separation between containers.
+ - tripleo::profile::base::nova::manage_migration: false
step_config: &step_config
get_attr: [NovaComputeBase, role_data, step_config]
puppet_config:
kolla_config:
/var/lib/kolla/config_files/nova-compute.json:
command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
+ - path: /var/lib/nova
+ owner: nova:nova
+ recurse: true
docker_config:
# FIXME: run discover hosts here
step_4:
user: root
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_libvirt/etc/nova/:/etc/nova/:ro
- - /dev:/dev
- - /etc/iscsi:/etc/iscsi
- - /lib/modules:/lib/modules:ro
- - /run:/run
- - /var/lib/nova:/var/lib/nova
- - /var/lib/libvirt:/var/lib/libvirt
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova-compute.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt/etc/nova/:/etc/nova/:ro
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - /lib/modules:/lib/modules:ro
+ - /run:/run
+ - /var/lib/nova:/var/lib/nova
+ - /var/lib/libvirt:/var/lib/libvirt
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
- - name: create /var/lib/libvirt
+ - name: create persistent directories
file:
- path: /var/lib/libvirt
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/nova
+ - /var/lib/nova
+ - /var/lib/libvirt
upgrade_tasks:
- name: Stop and disable nova-compute service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Conductor service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/nova_conductor.json:
command: /usr/bin/nova-conductor
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
step_4:
nova_conductor:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_conductor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
upgrade_tasks:
- name: Stop and disable nova_conductor service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Ironic Compute service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
NovaIronicBase:
type: ../../puppet/services/nova-ironic.yaml
properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/nova_ironic.json:
- command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
+ - path: /var/lib/nova
+ owner: nova:nova
+ recurse: true
docker_config:
step_5:
novacompute:
user: root
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
- - /run:/run
- - /dev:/dev
- - /etc/iscsi:/etc/iscsi
- - /var/lib/nova/:/var/lib/nova
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_ironic.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /run:/run
+ - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - /var/lib/nova/:/var/lib/nova
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - /var/log/containers/nova
+ - /var/lib/nova
upgrade_tasks:
- name: Stop and disable nova-compute service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Libvirt Service
description: image
default: 'centos-binary-nova-compute:latest'
type: string
+ EnablePackageInstall:
+ default: 'false'
+ description: Set to true to enable package installation
+ type: boolean
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Libvirt service.
value:
service_name: {get_attr: [NovaLibvirtBase, role_data, service_name]}
- config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]}
+ config_settings:
+ map_merge:
+ - get_attr: [NovaLibvirtBase, role_data, config_settings]
+ # FIXME: we need to disable migration for now as the
+ # hieradata is common for all services, and this means nova
+ # and nova_placement puppet runs also try to configure
+ # libvirt, and they fail. We can remove this override when
+ # we have hieradata separation between containers.
+ - tripleo::profile::base::nova::manage_migration: false
step_config: &step_config
get_attr: [NovaLibvirtBase, role_data, step_config]
puppet_config:
kolla_config:
/var/lib/kolla/config_files/nova-libvirt.json:
command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
step_3:
nova_libvirt:
privileged: true
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_libvirt/etc/libvirt/:/etc/libvirt/:ro
- - /lib/modules:/lib/modules:ro
- - /dev:/dev
- - /run:/run
- - /sys/fs/cgroup:/sys/fs/cgroup
- - /var/lib/nova:/var/lib/nova
- # Needed to use host's virtlogd
- - /var/run/libvirt:/var/run/libvirt
- - /var/lib/libvirt:/var/lib/libvirt
- - /etc/libvirt/qemu:/etc/libvirt/qemu
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova-libvirt.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_libvirt/etc/libvirt/:/etc/libvirt/:ro
+ - /lib/modules:/lib/modules:ro
+ - /dev:/dev
+ - /run:/run
+ - /sys/fs/cgroup:/sys/fs/cgroup
+ - /var/lib/nova:/var/lib/nova
+ # Needed to use host's virtlogd
+ - /var/run/libvirt:/var/run/libvirt
+ - /var/lib/libvirt:/var/lib/libvirt
+ - /etc/libvirt/qemu:/etc/libvirt/qemu
+ - /var/log/libvirt/qemu:/var/log/libvirt/qemu:ro
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
host_prep_tasks:
with_items:
- /etc/libvirt/qemu
- /var/lib/libvirt
+ - /var/log/containers/nova
+ - name: set enable_package_install fact
+ set_fact:
+ enable_package_install: {get_param: EnablePackageInstall}
+ # We use virtlogd on host, so when using Deployed Server
+ # feature, we need to ensure libvirt is installed.
+ - name: install libvirt-daemon
+ package:
+ name: libvirt-daemon
+ state: present
+ when: enable_package_install
+ - name: start virtlogd socket
+ service:
+ name: virtlogd.socket
+ state: started
+ enabled: yes
+ when: enable_package_install
upgrade_tasks:
- name: Stop and disable libvirtd service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Metadata service
DefaultPasswords:
default: {}
type: json
-
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Placement API service
type: string
DockerNovaPlacementImage:
description: image
- default: 'centos-binary-nova-placement-api'
+ default: 'centos-binary-nova-placement-api:latest'
type: string
EndpointMap:
default: {}
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/nova_placement.json:
command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
# start this early so it is up before computes start reporting
step_3:
user: root
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova_placement/etc/nova/:/etc/nova/:ro
- - /var/lib/config-data/nova_placement/etc/httpd/:/etc/httpd/:ro
- - /var/lib/config-data/nova_placement/var/www/:/var/www/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_placement.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova_placement/etc/nova/:/etc/nova/:ro
+ - /var/lib/config-data/nova_placement/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/nova_placement/var/www/:/var/www/:ro
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
upgrade_tasks:
- name: Stop and disable nova_placement service (running under httpd)
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Nova Scheduler service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/nova_scheduler.json:
command: /usr/bin/nova-scheduler
+ permissions:
+ - path: /var/log/nova
+ owner: nova:nova
+ recurse: true
docker_config:
step_4:
nova_scheduler:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
- - /run:/run
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/nova_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+ - /run:/run
+ - /var/log/containers/nova:/var/log/nova
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/nova
+ state: directory
upgrade_tasks:
- name: Stop and disable nova_scheduler service
tags: step2
--- /dev/null
+heat_template_version: pike
+
+description: >
+ MySQL HA clustercheck service deployment using puppet
+ This service is used by HAProxy in a HA scenario to report whether
+ the local galera node is synced
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerClustercheckImage:
+ description: image
+ default: 'centos-binary-mariadb:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ../containers-common.yaml
+
+ MysqlPuppetBase:
+ type: ../../../puppet/services/pacemaker/database/mysql.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Containerized service clustercheck using composable services.
+ value:
+ service_name: clustercheck
+ config_settings: {get_attr: [MysqlPuppetBase, role_data, config_settings]}
+ step_config: "include ::tripleo::profile::pacemaker::clustercheck"
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: clustercheck
+ puppet_tags: file # set this even though file is the default
+ step_config: "include ::tripleo::profile::pacemaker::clustercheck"
+ config_image: &clustercheck_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerClustercheckImage} ]
+ kolla_config:
+ /var/lib/kolla/config_files/clustercheck.json:
+ command: /usr/sbin/xinetd -dontfork
+ config_files:
+ - dest: /etc/xinetd.conf
+ source: /var/lib/kolla/config_files/src/etc/xinetd.conf
+ owner: mysql
+ perm: '0644'
+ - dest: /etc/xinetd.d/galera-monitor
+ source: /var/lib/kolla/config_files/src/etc/xinetd.d/galera-monitor
+ owner: mysql
+ perm: '0644'
+ - dest: /etc/sysconfig/clustercheck
+ source: /var/lib/kolla/config_files/src/etc/sysconfig/clustercheck
+ owner: mysql
+ perm: '0600'
+ docker_config:
+ step_2:
+ clustercheck:
+ start_order: 1
+ image: *clustercheck_image
+ restart: always
+ net: host
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/clustercheck.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/clustercheck/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/mysql:/var/lib/mysql
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ upgrade_tasks:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ MySQL service deployment with pacemaker bundle
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerMysqlImage:
+ description: image
+ default: 'centos-binary-mariadb:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ MysqlRootPassword:
+ type: string
+ hidden: true
+ default: ''
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ ContainersCommon:
+ type: ../../containers-common.yaml
+
+ MysqlPuppetBase:
+ type: ../../../../puppet/services/pacemaker/database/mysql.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Containerized service MySQL using composable services.
+ value:
+ service_name: {get_attr: [MysqlPuppetBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - {get_attr: [MysqlPuppetBase, role_data, config_settings]}
+ - tripleo::profile::pacemaker::database::mysql_bundle::mysql_docker_image: &mysql_image
+ list_join:
+ - '/'
+ - - {get_param: DockerNamespace}
+ - {get_param: DockerMysqlImage}
+ step_config: ""
+ # BEGIN DOCKER SETTINGS #
+ puppet_config:
+ config_volume: mysql
+ puppet_tags: file # set this even though file is the default
+ step_config:
+ list_join:
+ - "\n"
+ - - "['Mysql_datadir', 'Mysql_user', 'Mysql_database', 'Mysql_grant', 'Mysql_plugin'].each |String $val| { noop_resource($val) }"
+ - "exec {'wait-for-settle': command => '/bin/true' }"
+ - "include ::tripleo::profile::pacemaker::database::mysql_bundle"
+ config_image: *mysql_image
+ kolla_config:
+ /var/lib/kolla/config_files/mysql.json:
+ command: /usr/sbin/pacemaker_remoted
+ config_files:
+ - dest: /etc/libqb/force-filesystem-sockets
+ source: /dev/null
+ owner: root
+ perm: '0644'
+ - dest: /etc/my.cnf
+ source: /var/lib/kolla/config_files/src/etc/my.cnf
+ owner: mysql
+ perm: '0644'
+ - dest: /etc/my.cnf.d/galera.cnf
+ source: /var/lib/kolla/config_files/src/etc/my.cnf.d/galera.cnf
+ owner: mysql
+ perm: '0644'
+ - dest: /etc/sysconfig/clustercheck
+ source: /var/lib/kolla/config_files/src/etc/sysconfig/clustercheck
+ owner: root
+ perm: '0600'
+ docker_config:
+ step_1:
+ mysql_data_ownership:
+ start_order: 0
+ detach: false
+ image: *mysql_image
+ net: host
+ user: root
+ # Kolla does only non-recursive chown
+ command: ['chown', '-R', 'mysql:', '/var/lib/mysql']
+ volumes:
+ - /var/lib/mysql:/var/lib/mysql
+ mysql_bootstrap:
+ start_order: 1
+ detach: false
+ image: *mysql_image
+ net: host
+ # Kolla bootstraps aren't idempotent, explicitly checking if bootstrap was done
+ command: ['bash', '-c', 'test -e /var/lib/mysql/mysql || kolla_start']
+ volumes: &mysql_volumes
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/mysql.json:/var/lib/kolla/config_files/config.json
+ - /var/lib/config-data/mysql/:/var/lib/kolla/config_files/src:ro
+ - /var/lib/mysql:/var/lib/mysql
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - KOLLA_BOOTSTRAP=True
+ # NOTE(mandre) skip wsrep cluster status check
+ - KOLLA_KUBERNETES=True
+ -
+ list_join:
+ - '='
+ - - 'DB_ROOT_PASSWORD'
+ -
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: MysqlRootPassword}
+ - {get_param: [DefaultPasswords, mysql_root_password]}
+ step_2:
+ mysql_init_bundle:
+ start_order: 1
+ detach: false
+ net: host
+ user: root
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation,galera_ready,mysql_database,mysql_grant,mysql_user'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::mysql_bundle'
+ image: *mysql_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ - /var/lib/config-data/mysql/etc/my.cnf:/etc/my.cnf:ro
+ - /var/lib/config-data/mysql/etc/my.cnf.d:/etc/my.cnf.d:ro
+ - /var/lib/mysql:/var/lib/mysql:rw
+ host_prep_tasks:
+ - name: create /var/lib/mysql
+ file:
+ path: /var/lib/mysql
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable mysql service
+ tags: step2
+ service: name=mariadb state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Redis services
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerRedisImage:
+ description: image
+ default: 'centos-binary-redis:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ RedisBase:
+ type: ../../../../puppet/services/database/redis.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Redis API role.
+ value:
+ service_name: {get_attr: [RedisBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - {get_attr: [RedisBase, role_data, config_settings]}
+ - redis::service_manage: false
+ redis::notify_service: false
+ redis::managed_by_cluster_manager: true
+ tripleo::profile::pacemaker::database::redis_bundle::redis_docker_image: &redis_image
+ list_join:
+ - '/'
+ - - {get_param: DockerNamespace}
+ - {get_param: DockerRedisImage}
+
+ step_config: ""
+ service_config_settings: {get_attr: [RedisBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: 'redis'
+ # NOTE: we need the exec tag to copy /etc/redis.conf.puppet to
+ # /etc/redis.conf
+ # https://github.com/arioch/puppet-redis/commit/1c004143223e660cbd433422ff8194508aab9763
+ puppet_tags: 'exec'
+ step_config:
+ get_attr: [RedisBase, role_data, step_config]
+ config_image: *redis_image
+ kolla_config:
+ /var/lib/kolla/config_files/redis.json:
+ command: /usr/sbin/pacemaker_remoted
+ config_files:
+ - dest: /etc/libqb/force-filesystem-sockets
+ source: /dev/null
+ owner: root
+ perm: '0644'
+ permissions:
+ - path: /var/run/redis
+ owner: redis:redis
+ recurse: true
+ - path: /var/lib/redis
+ owner: redis:redis
+ recurse: true
+ - path: /var/log/redis
+ owner: redis:redis
+ recurse: true
+ docker_config:
+ step_2:
+ redis_init_bundle:
+ start_order: 2
+ detach: false
+ net: host
+ user: root
+ config_volume: 'redis_init_bundle'
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::database::redis_bundle'
+ image: *redis_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create /var/run/redis
+ file:
+ path: /var/run/redis
+ state: directory
+ - name: create /var/log/redis
+ file:
+ path: /var/log/redis
+ state: directory
+ - name: create /var/lib/redis
+ file:
+ path: /var/lib/redis
+ state: directory
+ upgrade_tasks:
+ - name: Stop and disable redis service
+ tags: step2
+ service: name=redis state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized HAproxy service for pacemaker
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerHAProxyImage:
+ description: image
+ default: 'centos-binary-haproxy:latest'
+ type: string
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ HAProxyBase:
+ type: ../../../puppet/services/pacemaker/haproxy.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the HAproxy role.
+ value:
+ service_name: {get_attr: [HAProxyBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - get_attr: [HAProxyBase, role_data, config_settings]
+ - tripleo::haproxy::haproxy_daemon: false
+ haproxy_docker: true
+ tripleo::profile::pacemaker::haproxy_bundle::haproxy_docker_image: &haproxy_image
+ list_join:
+ - '/'
+ - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
+ step_config:
+ list_join:
+ - "\n"
+ - - &noop_pcmk "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
+ - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
+ service_config_settings: {get_attr: [HAProxyBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: haproxy
+ puppet_tags: haproxy_config
+ step_config:
+ list_join:
+ - "\n"
+ - - "exec {'wait-for-settle': command => '/bin/true' }"
+ - &noop_firewall "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
+ - *noop_pcmk
+ - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
+ config_image: *haproxy_image
+ kolla_config:
+ /var/lib/kolla/config_files/haproxy.json:
+ command: haproxy -f /etc/haproxy/haproxy.cfg
+ docker_config:
+ step_2:
+ haproxy_init_bundle:
+ start_order: 3
+ detach: false
+ net: host
+ user: root
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+ CONFIG:
+ list_join:
+ - ';'
+ - - *noop_firewall
+ - 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::haproxy_bundle'
+ image: *haproxy_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ metadata_settings:
+ get_attr: [HAProxyBase, role_data, metadata_settings]
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack containerized Rabbitmq service
+
+parameters:
+ DockerNamespace:
+ description: namespace
+ default: 'tripleoupstream'
+ type: string
+ DockerRabbitmqImage:
+ description: image
+ default: 'centos-binary-rabbitmq:latest'
+ type: string
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RabbitCookie:
+ type: string
+ default: ''
+ hidden: true
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+resources:
+
+ RabbitmqBase:
+ type: ../../../puppet/services/rabbitmq.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the Rabbitmq API role.
+ value:
+ service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
+ config_settings:
+ map_merge:
+ - {get_attr: [RabbitmqBase, role_data, config_settings]}
+ - rabbitmq::service_manage: false
+ tripleo::profile::pacemaker::rabbitmq_bundle::rabbitmq_docker_image: &rabbitmq_image
+ list_join:
+ - '/'
+ - - {get_param: DockerNamespace}
+ - {get_param: DockerRabbitmqImage}
+ step_config: &step_config
+ get_attr: [RabbitmqBase, role_data, step_config]
+ service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
+ # BEGIN DOCKER SETTINGS
+ puppet_config:
+ config_volume: rabbitmq
+ puppet_tags: file
+ step_config: *step_config
+ config_image: *rabbitmq_image
+ kolla_config:
+ /var/lib/kolla/config_files/rabbitmq.json:
+ command: /usr/sbin/pacemaker_remoted
+ config_files:
+ - dest: /etc/libqb/force-filesystem-sockets
+ source: /dev/null
+ owner: root
+ perm: '0644'
+ permissions:
+ - path: /var/lib/rabbitmq
+ owner: rabbitmq:rabbitmq
+ recurse: true
+ - path: /var/log/rabbitmq
+ owner: rabbitmq:rabbitmq
+ recurse: true
+ # When using pacemaker we don't launch the container, instead that is done by pacemaker
+ # itself.
+ docker_config:
+ step_1:
+ rabbitmq_bootstrap:
+ start_order: 0
+ image: *rabbitmq_image
+ net: host
+ privileged: false
+ volumes:
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/etc/rabbitmq:/etc/rabbitmq:ro
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - KOLLA_BOOTSTRAP=True
+ -
+ list_join:
+ - '='
+ - - 'RABBITMQ_CLUSTER_COOKIE'
+ -
+ yaql:
+ expression: $.data.passwords.where($ != '').first()
+ data:
+ passwords:
+ - {get_param: RabbitCookie}
+ - {get_param: [DefaultPasswords, rabbit_cookie]}
+ step_2:
+ rabbitmq_init_bundle:
+ start_order: 0
+ detach: false
+ net: host
+ user: root
+ command:
+ - '/bin/bash'
+ - '-c'
+ - str_replace:
+ template:
+ list_join:
+ - '; '
+ - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
+ - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
+ params:
+ TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+ CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::rabbitmq_bundle'
+ image: *rabbitmq_image
+ volumes:
+ - /etc/hosts:/etc/hosts:ro
+ - /etc/localtime:/etc/localtime:ro
+ - /etc/puppet:/tmp/puppet-etc:ro
+ - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+ - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+ - /dev/shm:/dev/shm:rw
+ host_prep_tasks:
+ - name: create /var/lib/rabbitmq
+ file:
+ path: /var/lib/rabbitmq
+ state: directory
+ - name: stop the Erlang port mapper on the host and make sure it cannot bind to the port used by container
+ shell: |
+ echo 'export ERL_EPMD_ADDRESS=127.0.0.1' > /etc/rabbitmq/rabbitmq-env.conf
+ echo 'export ERL_EPMD_PORT=4370' >> /etc/rabbitmq/rabbitmq-env.conf
+ for pid in $(pgrep epmd); do if [ "$(lsns -o NS -p $pid)" == "$(lsns -o NS -p 1)" ]; then kill $pid; break; fi; done
+ upgrade_tasks:
+ - name: Stop and disable rabbitmq service
+ tags: step2
+ service: name=rabbitmq-server state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
- OpenStack Panko service configured with docker
+ OpenStack Panko service configured with docker.
+ Note, this service is deprecated in Pike release and
+ will be disabled in future releases.
parameters:
DockerNamespace:
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/panko-api.json:
command: /usr/sbin/httpd -DFOREGROUND
+ permissions:
+ - path: /var/log/panko
+ owner: panko:panko
+ recurse: true
docker_config:
step_3:
panko-init-log:
start_order: 0
image: *panko_image
user: root
- command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd && mkdir -p /var/log/panko && chown panko:panko /var/log/panko']
volumes:
- - logs:/var/log
+ - /var/log/containers/panko:/var/log/panko
+ command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
panko_db_sync:
start_order: 1
image: *panko_image
net: host
detach: false
privileged: false
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/config-data/panko/etc/panko:/etc/panko:ro
- - logs:/var/log
- command: /usr/bin/panko-dbsync
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/config-data/panko/etc/panko:/etc/panko:ro
+ - /var/log/containers/panko:/var/log/panko
+ command: "/usr/bin/bootstrap_host_exec panko_api su panko -s /bin/bash -c '/usr/bin/panko-dbsync'"
step_4:
panko_api:
start_order: 2
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/panko-api.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/panko/etc/panko/:/etc/panko/:ro
- - /var/lib/config-data/panko/etc/httpd/:/etc/httpd/:ro
- - /var/lib/config-data/panko/var/www/:/var/www/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/panko-api.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/panko/etc/panko/:/etc/panko/:ro
+ - /var/lib/config-data/panko/etc/httpd/:/etc/httpd/:ro
+ - /var/lib/config-data/panko/var/www/:/var/www/:ro
+ - /var/log/containers/panko:/var/log/panko
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - ''
+ -
+ if:
+ - internal_tls_enabled
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ - ''
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/panko
+ state: directory
+ metadata_settings:
+ get_attr: [PankoApiPuppetBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Rabbitmq service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
RabbitCookie:
type: string
default: ''
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
description: Role data for the Rabbitmq API role.
value:
service_name: {get_attr: [RabbitmqBase, role_data, service_name]}
- config_settings: {get_attr: [RabbitmqBase, role_data, config_settings]}
+ # RabbitMQ plugins initialization occurs on every node
+ config_settings:
+ map_merge:
+ - {get_attr: [RabbitmqBase, role_data, config_settings]}
+ - rabbitmq::admin_enable: false
step_config: &step_config
- get_attr: [RabbitmqBase, role_data, step_config]
+ list_join:
+ - "\n"
+ - - "['Rabbitmq_policy', 'Rabbitmq_user'].each |String $val| { noop_resource($val) }"
+ - get_attr: [RabbitmqBase, role_data, step_config]
service_config_settings: {get_attr: [RabbitmqBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: rabbitmq
- puppet_tags: file
step_config: *step_config
config_image: &rabbitmq_image
list_join:
kolla_config:
/var/lib/kolla/config_files/rabbitmq.json:
command: /usr/lib/rabbitmq/bin/rabbitmq-server
+ permissions:
+ - path: /var/lib/rabbitmq
+ owner: rabbitmq:rabbitmq
+ recurse: true
docker_config:
+ # Kolla_bootstrap runs before permissions set by kolla_config
step_1:
- rabbitmq_bootstrap:
+ rabbitmq_init_logs:
start_order: 0
image: *rabbitmq_image
+ privileged: false
+ user: root
+ volumes:
+ - /var/log/containers/rabbitmq:/var/log/rabbitmq
+ command: ['/bin/bash', '-c', 'chown -R rabbitmq:rabbitmq /var/log/rabbitmq']
+ rabbitmq_bootstrap:
+ start_order: 1
+ image: *rabbitmq_image
net: host
privileged: false
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
- - /var/lib/rabbitmq:/var/lib/rabbitmq
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
+ - /var/log/containers/rabbitmq:/var/log/rabbitmq
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- KOLLA_BOOTSTRAP=True
- -
+ -
list_join:
- '='
- - 'RABBITMQ_CLUSTER_COOKIE'
- {get_param: RabbitCookie}
- {get_param: [DefaultPasswords, rabbit_cookie]}
rabbitmq:
- start_order: 1
+ start_order: 2
image: *rabbitmq_image
net: host
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
- - /var/lib/rabbitmq:/var/lib/rabbitmq
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/rabbitmq.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/rabbitmq/etc/rabbitmq/:/etc/rabbitmq/:ro
+ - /var/lib/rabbitmq:/var/lib/rabbitmq
+ - /var/log/containers/rabbitmq:/var/log/rabbitmq
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ docker_puppet_tasks:
+ # RabbitMQ users and policies initialization occurs only on single node
+ step_1:
+ config_volume: 'rabbit_init_tasks'
+ puppet_tags: 'rabbitmq_policy,rabbitmq_user'
+ step_config: 'include ::tripleo::profile::base::rabbitmq'
+ config_image: *rabbitmq_image
+ volumes:
+ - /var/lib/config-data/rabbitmq/etc/:/etc/
+ - /var/lib/rabbitmq:/var/lib/rabbitmq:ro
host_prep_tasks:
- - name: create /var/lib/rabbitmq
+ - name: create persistent directories
file:
- path: /var/lib/rabbitmq
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/rabbitmq
+ - /var/lib/rabbitmq
upgrade_tasks:
- name: Stop and disable rabbitmq service
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
Utility stack to convert an array of services into a set of combined
description: Mapping of service -> default password. Used to help
pass top level passwords managed by Heat into services.
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ServiceChain:
type: OS::Heat::ResourceChain
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized swift proxy service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
kolla_config:
/var/lib/kolla/config_files/swift_proxy.json:
command: /usr/bin/swift-proxy-server /etc/swift/proxy-server.conf
+ permissions:
+ - path: /var/log/swift
+ owner: swift:swift
+ recurse: true
+ /var/lib/kolla/config_files/swift_proxy_tls_proxy.json:
+ command: /usr/sbin/httpd -DFOREGROUND
docker_config:
step_4:
- swift_proxy:
- image: *swift_proxy_image
- net: host
- user: swift
- restart: always
- volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
- # FIXME I'm mounting /etc/swift as rw. Are the rings written to
- # at all during runtime?
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
- environment:
- - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ map_merge:
+ - swift_proxy:
+ image: *swift_proxy_image
+ net: host
+ user: swift
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ # FIXME I'm mounting /etc/swift as rw. Are the rings written to
+ # at all during runtime?
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - if:
+ - internal_tls_enabled
+ - swift_proxy_tls_proxy:
+ image: *swift_proxy_image
+ net: host
+ user: root
+ restart: always
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_proxy_tls_proxy.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/httpd/:/etc/httpd/:ro
+ - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+ - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+ environment:
+ - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ - {}
host_prep_tasks:
- - name: create /srv/node
+ - name: create persistent directories
file:
- path: /srv/node
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/swift
+ - /srv/node
upgrade_tasks:
- name: Stop and disable swift_proxy service
tags: step2
service: name=openstack-swift-proxy state=stopped enabled=no
+ metadata_settings:
+ get_attr: [SwiftProxyBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Ringbuilder
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Swift Storage services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
command: /usr/bin/swift-object-updater /etc/swift/object-server.conf
/var/lib/kolla/config_files/swift_object_server.json:
command: /usr/bin/swift-object-server /etc/swift/object-server.conf
+ permissions:
+ - path: /var/log/swift
+ owner: swift:swift
+ recurse: true
docker_config:
step_3:
# The puppet config sets this up but we don't have a way to mount the named
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_account_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: &kolla_env
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
swift_account_reaper:
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_account_reaper.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_account_replicator:
image: *swift_account_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_account_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_account_server:
image: *swift_account_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_account_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_container_auditor:
image: &swift_container_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_container_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_container_replicator:
image: *swift_container_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_container_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_container_updater:
image: *swift_container_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_container_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_container_server:
image: *swift_container_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_container_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_auditor:
image: &swift_object_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_auditor.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_expirer:
image: *swift_proxy_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_expirer.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_replicator:
image: *swift_object_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_replicator.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_updater:
image: *swift_object_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_updater.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
swift_object_server:
image: *swift_object_image
user: swift
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
- - /run:/run
- - /srv/node:/srv/node
- - /dev:/dev
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_object_server.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc/swift:/etc/swift:rw
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
environment: *kolla_env
host_prep_tasks:
- - name: create /srv/node
+ - name: create persistent directories
file:
- path: /srv/node
+ path: "{{ item }}"
state: directory
+ with_items:
+ - /var/log/containers/swift
+ - /srv/node
upgrade_tasks:
- name: Stop and disable swift storage services
tags: step2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack containerized Zaqar services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
EndpointMap: {get_param: EndpointMap}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
kolla_config:
/var/lib/kolla/config_files/zaqar.json:
- command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf
+ command: /usr/sbin/httpd -DFOREGROUND
/var/lib/kolla/config_files/zaqar_websocket.json:
command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf
+ permissions:
+ - path: /var/log/zaqar
+ owner: zaqar:zaqar
+ recurse: true
docker_config:
step_4:
zaqar:
net: host
privileged: false
restart: always
+ # NOTE(mandre) kolla image changes the user to 'zaqar', we need it
+ # to be root to run httpd
+ user: root
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
+ - /var/lib/config-data/zaqar/var/www/:/var/www/:ro
+ - /var/lib/config-data/zaqar/etc/httpd/:/etc/httpd/:ro
+ - /var/log/containers/zaqar:/var/log/zaqar
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
zaqar_websocket:
privileged: false
restart: always
volumes:
- yaql:
- expression: $.data.common.concat($.data.service)
- data:
- common: {get_attr: [ContainersCommon, volumes]}
- service:
- - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
- - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/zaqar_websocket.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
+ - /var/lib/config-data/zaqar/var/www/:/var/www/:ro
+ - /var/lib/config-data/zaqar/etc/httpd/:/etc/httpd/:ro
+ - /var/log/containers/zaqar:/var/log/zaqar
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+ host_prep_tasks:
+ - name: create persistent logs directory
+ file:
+ path: /var/log/containers/zaqar
+ state: directory
upgrade_tasks:
- name: Stop and disable zaqar service
tags: step2
- service: name=openstack-zaqar.service state=stopped enabled=no
-
+ service: name=httpd state=stopped enabled=no
CinderDellScSanIp: ''
CinderDellScSanLogin: 'Admin'
CinderDellScSanPassword: ''
- CinderDellScSsn: '64702'
+ CinderDellScSsn: 64702
CinderDellScIscsiIpAddress: ''
- CinderDellScIscsiPort: '3260'
- CinderDellScApiPort: '3033'
+ CinderDellScIscsiPort: 3260
+ CinderDellScApiPort: 3033
CinderDellScServerFolder: 'dellsc_server'
CinderDellScVolumeFolder: 'dellsc_volume'
+ CinderDellScSecondarySanIp: ''
+ CinderDellScSecondarySanLogin: 'Admin'
+ CinderDellScSecondarySanPassword: ''
+ CinderDellScSecondaryScApiPort: 3033
--- /dev/null
+# A Heat environment file which can be used to enable a
+# Cinder Pure Storage FlashArray iSCSI backend, configured via puppet
+resource_registry:
+ OS::TripleO::Services::CinderBackendPure: ../puppet/services/cinder-backend-pure.yaml
+
+parameter_defaults:
+ CinderEnablePureBackend: true
+ CinderPureBackendName: 'tripleo_pure'
+ CinderPureStorageProtocol: 'iSCSI'
+ CinderPureSanIp: ''
+ CinderPureAPIToken: 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'
+ CinderPureUseChap: false
+ CinderPureMultipathXfer: true
# parameter_defaults:
#
-## You can specify additional plugins to load using the
-## CollectdExtraPlugins key:
+## Collectd server configuration
+# CollectdServer: collectd0.example.com
+#
+################
+#### Other config parameters, the values shown here are the defaults
+################
+#
+# CollectdServerPort: 25826
+# CollectdSecurityLevel: None
+#
+################
+#### If CollectdSecurityLevel is set to Encrypt or Sign
+#### the following parameters are also needed
+###############
+#
+# CollectdUsername: user
+# CollectdPassword: password
+#
+## CollectdDefaultPlugins, These are the default plugins used by collectd
+#
+# CollectdDefaultPlugins:
+# - disk
+# - interface
+# - load
+# - memory
+# - processes
+# - tcpconns
+#
+## Extra plugins can be enabled by the CollectdExtraPlugins parameter:
+## All the plugins availables are:
#
# CollectdExtraPlugins:
# - disk
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces
resource_registry:
- OS::TripleO::Tasks::ControllerDeployedServerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerDeployedServerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerDeployedServerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerDeployedServerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
--- /dev/null
+# This heat environment can be used to disable all of the telemetry services.
+# It is most useful in a resource constrained environment or one in which
+# telemetry is not needed.
+
+resource_registry:
+ OS::TripleO::Services::CeilometerApi: OS::Heat::None
+ OS::TripleO::Services::CeilometerCollector: OS::Heat::None
+ OS::TripleO::Services::CeilometerExpirer: OS::Heat::None
+ OS::TripleO::Services::CeilometerAgentCentral: OS::Heat::None
+ OS::TripleO::Services::CeilometerAgentNotification: OS::Heat::None
+ OS::TripleO::Services::CeilometerAgentIpmi: OS::Heat::None
+ OS::TripleO::Services::ComputeCeilometerAgent: OS::Heat::None
+ OS::TripleO::Services::GnocchiApi: OS::Heat::None
+ OS::TripleO::Services::GnocchiMetricd: OS::Heat::None
+ OS::TripleO::Services::GnocchiStatsd: OS::Heat::None
+ OS::TripleO::Services::AodhApi: OS::Heat::None
+ OS::TripleO::Services::AodhEvaluator: OS::Heat::None
+ OS::TripleO::Services::AodhNotifier: OS::Heat::None
+ OS::TripleO::Services::AodhListener: OS::Heat::None
+ OS::TripleO::Services::PankoApi: OS::Heat::None
OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
# NOTE: add roles to be docker enabled as we support them.
+ OS::TripleO::Services::AodhApi: ../docker/services/aodh-api.yaml
+ OS::TripleO::Services::AodhEvaluator: ../docker/services/aodh-evaluator.yaml
+ OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
+ OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+ OS::TripleO::Services::GlanceApi: ../docker/services/glance-api.yaml
+ OS::TripleO::Services::GnocchiApi: ../docker/services/gnocchi-api.yaml
+ OS::TripleO::Services::GnocchiMetricd: ../docker/services/gnocchi-metricd.yaml
+ OS::TripleO::Services::GnocchiStatsd: ../docker/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::HeatApi: ../docker/services/heat-api.yaml
+ OS::TripleO::Services::HeatApiCfn: ../docker/services/heat-api-cfn.yaml
+ OS::TripleO::Services::HeatEngine: ../docker/services/heat-engine.yaml
OS::TripleO::Services::Keystone: ../docker/services/keystone.yaml
+ OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
+ OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
+ OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
+ OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
+ OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
+ OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
+ OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
+ OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
+ OS::TripleO::Services::SwiftRingBuilder: ../docker/services/swift-ringbuilder.yaml
+ OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
OS::TripleO::PostDeploySteps: ../docker/post.yaml
OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
DockerNamespaceIsRegistry: false
ComputeServices:
+ - OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::ComputeNeutronOvsAgent
- OS::TripleO::Services::Docker
+ - OS::TripleO::Services::Sshd
OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
+ OS::TripleO::Services::NeutronMetadataAgent: ../docker/services/neutron-metadata.yaml
OS::TripleO::Services::NeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
OS::TripleO::Services::HAProxy: ../docker/services/haproxy.yaml
OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
+ OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
OS::TripleO::Services::Memcached: ../docker/services/memcached.yaml
OS::TripleO::Services::SwiftProxy: ../docker/services/swift-proxy.yaml
OS::TripleO::Services::SwiftStorage: ../docker/services/swift-storage.yaml
OS::TripleO::Services::AodhNotifier: ../docker/services/aodh-notifier.yaml
OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
+ OS::TripleO::Services::CeilometerAgentCentral: ../docker/services/ceilometer-agent-central.yaml
+ OS::TripleO::Services::CeilometerAgentCompute: ../docker/services/ceilometer-agent-compute.yaml
+ OS::TripleO::Services::CeilometerAgentNotification: ../docker/services/ceilometer-agent-notification.yaml
OS::TripleO::PostDeploySteps: ../docker/post.yaml
OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
OS::TripleO::Services: ../docker/services/services.yaml
parameter_defaults:
- # Defaults to 'tripleoupstream'. Specify a local docker registry
- # Example: 192.168.24.1:8787/tripleoupstream
- DockerNamespace: tripleoupstream
- DockerNamespaceIsRegistry: false
+ # To specify a local docker registry, enable these
+ # where 192.168.24.1 is the host running docker-distribution
+ #DockerNamespace: 192.168.24.1:8787/tripleoupstream
+ #DockerNamespaceIsRegistry: true
ComputeServices:
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::ComputeNeutronOvsAgent
- OS::TripleO::Services::Docker
+ - OS::TripleO::Services::CeilometerAgentCompute
+ - OS::TripleO::Services::Sshd
resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip_v6.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool_v6.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool_v6.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool_v6.yaml
resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external_from_pool.yaml
OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api_from_pool.yaml
OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage_from_pool.yaml
--- /dev/null
+# This template allows the IPs to be preselected for each VIP. Note that
+# this template should be included after other templates which affect the
+# network such as network-isolation.yaml.
+
+resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external_v6.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api_v6.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage_v6.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt_v6.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+parameter_defaults:
+ # Set the IP addresses of the VIPs here.
+ # NOTE: we will eventually move to one VIP per service
+ #
+ ControlFixedIPs: [{'ip_address':'192.168.24.240'}]
+ PublicVirtualFixedIps: [{'ip_address':'2001:db8:fd00:1000:0000:0000:0000:0005'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0005'}]
+ StorageVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:3000:0000:0000:0000:000'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:4000:0000:0000:0000:0005'}]
+ RedisVirtualFixedIPs: [{'ip_address':'fd00:fd00:fd00:2000:0000:0000:0000:0006'}]
--- /dev/null
+# This template allows the IPs to be preselected for each VIP. Note that
+# this template should be included after other templates which affect the
+# network such as network-isolation.yaml.
+
+resource_registry:
+ OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
+ OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
+ OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
+ OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
+ OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+parameter_defaults:
+ # Set the IP addresses of the VIPs here.
+ # NOTE: we will eventually move to one VIP per service
+ #
+ ControlFixedIPs: [{'ip_address':'192.168.24.240'}]
+ PublicVirtualFixedIps: [{'ip_address':'10.0.0.240'}]
+ InternalApiVirtualFixedIPs: [{'ip_address':'172.16.2.240'}]
+ StorageVirtualFixedIPs: [{'ip_address':'172.16.1.240'}]
+ StorageMgmtVirtualFixedIPs: [{'ip_address':'172.16.3.240'}]
+ RedisVirtualFixedIPs: [{'ip_address':'172.16.2.241'}]
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::ComputeNeutronCorePlugin
- OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::NeutronLinuxbridgeAgent
- OS::TripleO::Services::ComputeCeilometerAgent
- OS::TripleO::Services::ComputeNeutronL3Agent
- OS::TripleO::Services::ComputeNeutronMetadataAgent
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Docker
ControllerExtraConfig:
'nova::network::neutron::neutron_url_timeout': '60'
+
+ DatabaseSyncTimeout: 900
+++ /dev/null
-resource_registry:
- # aodh data migration
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
-
- # no-op the rest
- OS::TripleO::PostDeploySteps: OS::Heat::None
+++ /dev/null
-resource_registry:
-
- # This initiates the upgrades for ceilometer api to run under apache wsgi
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_ceilometer_wsgi_mitaka_newton.yaml
-
- # no-op the rest
- OS::TripleO::PostDeploySteps: OS::Heat::None
OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
parameter_defaults:
EnableConfigPurge: false
+ StackUpdateType: UPGRADE
UpgradeLevelNovaCompute: auto
UpgradeInitCommonCommand: |
#!/bin/bash
OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
parameter_defaults:
EnableConfigPurge: true
+ StackUpdateType: UPGRADE
UpgradeLevelNovaCompute: auto
UpgradeInitCommonCommand: |
#!/bin/bash
OS::TripleO::PostDeploySteps: ../docker/post.yaml
parameter_defaults:
EnableConfigPurge: false
+ StackUpdateType: ''
UpgradeLevelNovaCompute: ''
UpgradeInitCommonCommand: ''
+ UpgradeInitCommand: ''
OS::TripleO::PostDeploySteps: ../puppet/post.yaml
parameter_defaults:
EnableConfigPurge: false
+ StackUpdateType: ''
UpgradeLevelNovaCompute: ''
UpgradeInitCommonCommand: ''
+ UpgradeInitCommand: ''
+++ /dev/null
-parameter_defaults:
- UpgradeLevelNovaCompute: ''
-
-resource_registry:
- OS::TripleO::Services::SaharaApi: ../puppet/services/sahara-api.yaml
- OS::TripleO::Services::SaharaEngine: ../puppet/services/sahara-engine.yaml
+++ /dev/null
-parameter_defaults:
- UpgradeLevelNovaCompute: mitaka
-
-resource_registry:
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
- OS::TripleO::PostDeploySteps: OS::Heat::None
+++ /dev/null
-parameter_defaults:
- UpgradeLevelNovaCompute: mitaka
-
-resource_registry:
- OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml
- OS::TripleO::PostDeploySteps: OS::Heat::None
+++ /dev/null
-parameter_defaults:
- KeepSaharaServicesOnUpgrade: false
-resource_registry:
- OS::TripleO::Services::SaharaApi: OS::Heat::None
- OS::TripleO::Services::SaharaEngine: OS::Heat::None
-
ManilaCephFSNativeCephFSConfPath: '/etc/ceph/ceph.conf'
ManilaCephFSNativeCephFSAuthId: 'manila'
ManilaCephFSNativeCephFSClusterName: 'ceph'
- ManilaCephFSNativeCephFSEnableSnapshots: true
+ ManilaCephFSNativeCephFSEnableSnapshots: false
--- /dev/null
+# A Heat environment file that can be used to deploy Neutron BGPVPN service
+#
+# This environment file deploys Neutron BGPVPN service and configures
+# Opendaylight as its service provider.
+#
+# - OpenDaylight: BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default
+resource_registry:
+ OS::TripleO::Services::NeutronBgpVpnApi: ../puppet/services/neutron-bgpvpn-api.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: 'router, networking_bgpvpn.neutron.services.plugin.BGPVPNPlugin'
+ BgpvpnServiceProvider: 'BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default'
--- /dev/null
+# A Heat environment file that can be used to deploy Neutron L2 Gateway service
+#
+# Currently there are only two service provider for Neutron L2 Gateway
+# This file enables L2GW service with OpenDaylight as driver.
+#
+# - OpenDaylight: L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
+resource_registry:
+ OS::TripleO::Services::NeutronL2gwApi: ../puppet/services/neutron-l2gw-api.yaml
+
+parameter_defaults:
+ NeutronServicePlugins: "networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
+ L2gwServiceProvider: ['L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default']
+
+ # Optional
+ # L2gwServiceDefaultInterfaceName: "FortyGigE1/0/1"
+ # L2gwServiceDefaultDeviceName: "Switch1"
+ # L2gwServiceQuotaL2Gateway: 10
+ # L2gwServicePeriodicMonitoringInterval: 5
# - OpenDaylight: L2GW:OpenDaylight:networking_odl.l2gateway.driver.OpenDaylightL2gwDriver:default
resource_registry:
OS::TripleO::Services::NeutronL2gwApi: ../puppet/services/neutron-l2gw-api.yaml
+ OS::TripleO::Services::NeutronL2gwAgent: ../puppet/services/neutron-l2gw-agent.yaml
parameter_defaults:
NeutronServicePlugins: "networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin"
- L2gwServiceProvider: ["L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default"]
+ L2gwServiceProvider: ['L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.L2gwDriver:default']
# Optional
- # L2gwServiceDefaultInterfaceName:
- # L2gwServiceDefaultDeviceName:
- # L2gwServiceQuotaL2Gateway:
- # L2gwServicePeriodicMonitoringInterval:
+ # L2gwServiceDefaultInterfaceName: "FortyGigE1/0/1"
+ # L2gwServiceDefaultDeviceName: "Switch1"
+ # L2gwServiceQuotaL2Gateway: 10
+ # L2gwServicePeriodicMonitoringInterval: 5
+ # L2gwAgentOvsdbHosts: ["ovsdb1:127.0.0.1:6632"]
+ # L2gwAgentEnableManager: False
+ # L2gwAgentManagerTableListeningPort: "6633"
+ # L2gwAgentPeriodicInterval: 20
+ # L2gwAgentMaxConnectionRetries: 10
+ # L2gwAgentSocketTimeout: 30
--- /dev/null
+## A Heat environment that can be used to deploy linuxbridge
+resource_registry:
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronLinuxbridgeAgent: ../puppet/services/neutron-linuxbridge-agent.yaml
+
+parameter_defaults:
+ NeutronMechanismDrivers: ['linuxbridge']
# a Cisco Neutron plugin.
resource_registry:
OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
parameter_defaults:
NetworkUCSMIp: '127.0.0.1'
--- /dev/null
+# Environment file used to enable networking-vpp ML2 mechanism driver
+
+resource_registry:
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronVppAgent: ../puppet/services/neutron-vpp-agent.yaml
+ OS::TripleO::Services::Etcd: ../puppet/services/etcd.yaml
+ OS::TripleO::Services::Vpp: ../puppet/services/vpp.yaml
+
+parameter_defaults:
+ #Comma delimited list of <physical_network>:<VPP Interface>.
+ #Example: "datacentre:GigabitEthernet2/2/0"
+ #NeutronVPPAgentPhysnets: ""
+
+ NeutronMechanismDrivers: vpp
+ NeutronNetworkType: vlan
+ NeutronServicePlugins: router
+ NeutronTypeDrivers: vlan,flat
+ ExtraConfig:
+ # Use Linux Bridge driver for DHCP and L3 agent.
+ neutron::agents::dhcp::interface_driver: "neutron.agent.linux.interface.BridgeInterfaceDriver"
+ neutron::agents::l3::interface_driver: "neutron.agent.linux.interface.BridgeInterfaceDriver"
--- /dev/null
+# A Heat environment that can be used to deploy NSX Services
+# extensions, configured via puppet
+resource_registry:
+ # NSX doesn't require dhcp, l3, metadata, and ovs agents
+ OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+ OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+ OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+ OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+ # Override the Neutron core plugin to use NSX
+ OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginNSX
+ OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
+
+parameter_defaults:
+ NeutronCorePlugin: vmware_nsx.plugin.NsxV3Plugin
#NeutronDpdkMemoryChannels: ""
NeutronDatapathType: "netdev"
- NeutronVhostuserSocketDir: "/var/run/openvswitch"
+ NeutronVhostuserSocketDir: "/var/lib/vhost_sockets"
#NeutronDpdkSocketMemory: ""
#NeutronDpdkDriverType: "vfio-pci"
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
+ CephPoolDefaultSite: 1
+
# An environment which enables configuration of an
# Overcloud controller with Pacemaker.
resource_registry:
- OS::TripleO::Tasks::ControllerPrePuppet: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
- OS::TripleO::Tasks::ControllerPostPuppet: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
# custom pacemaker services
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::Etcd: ../../docker/services/etcd.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::UndercloudAodhApi: ../../docker/services/aodh-api.yaml
+ OS::TripleO::Services::UndercloudAodhEvaluator: ../../docker/services/aodh-evaluator.yaml
+ OS::TripleO::Services::UndercloudAodhNotifier: ../../docker/services/aodh-notifier.yaml
+ OS::TripleO::Services::UndercloudAodhListener: ../../docker/services/aodh-listener.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::UndercloudCeilometerAgentCentral: ../../docker/services/ceilometer-agent-central.yaml
+ OS::TripleO::Services::UndercloudCeilometerAgentNotification: ../../docker/services/ceilometer-agent-notification.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::UndercloudGnocchiApi: ../../docker/services/gnocchi-api.yaml
+ OS::TripleO::Services::UndercloudGnocchiMetricd: ../../docker/services/gnocchi-metricd.yaml
+ OS::TripleO::Services::UndercloudGnocchiStatsd: ../../docker/services/gnocchi-statsd.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::UndercloudPankoApi: ../../docker/services/panko-api.yaml
resource_registry:
OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
+ OS::TripleO::Services::MongoDb: ../../docker/services/database/mongodb.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::CeilometerCollector: ../../puppet/services/ceilometer-collector.yaml
+ OS::TripleO::Services::MongoDb: ../../puppet/services/database/mongodb.yaml
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::CeilometerExpirer: ../../puppet/services/ceilometer-expirer.yaml
resource_registry:
OS::TripleO::Services::IronicApi: ../../puppet/services/ironic-api.yaml
OS::TripleO::Services::IronicConductor: ../../puppet/services/ironic-conductor.yaml
+ OS::TripleO::Services::IronicPxe: ../../puppet/services/ironic-pxe.yaml
OS::TripleO::Services::NovaIronic: ../../puppet/services/nova-ironic.yaml
resource_registry:
OS::TripleO::Services::Zaqar: ../../puppet/services/zaqar.yaml
+ OS::TripleO::Services::MongoDb: ../../puppet/services/database/mongodb.yaml
-resource_registry:
- OS::TripleO::Services::Sshd: ../puppet/services/sshd.yaml
-
parameter_defaults:
BannerText: |
******************************************************************
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
+ MessageOfTheDay: |
+ ALERT! You are entering into a secured area!
+ This service is restricted to authorized users only.
IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
IronicAdmin: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
IronicInternal: {protocol: 'http', port: '6385', host: 'IP_ADDRESS'}
IronicPublic: {protocol: 'https', port: '13385', host: 'IP_ADDRESS'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'IP_ADDRESS'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'IP_ADDRESS'}
KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
KeystonePublic: {protocol: 'https', port: '13000', host: 'IP_ADDRESS'}
IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
+ IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+ IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
NeutronBridgeMappings: ctlplane:br-ctlplane
NeutronAgentExtensions: []
NeutronFlatNetworks: '*'
+ NeutronDnsDomain: ''
NovaSchedulerAvailableFilters: 'tripleo_common.filters.list.tripleo_filters'
NovaSchedulerDefaultFilters: ['RetryFilter', 'TripleOCapabilitiesFilter', 'ComputeCapabilitiesFilter', 'AvailabilityZoneFilter', 'RamFilter', 'DiskFilter', 'ComputeFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']
NeutronDhcpAgentsPerNetwork: 2
-heat_template_version: ocata
+heat_template_version: pike
description: >
Example extra config for cluster config
-heat_template_version: ocata
+heat_template_version: pike
description: >
Example extra config for cluster config
-heat_template_version: ocata
+heat_template_version: pike
description: Template file to add a swap partition to a node.
-heat_template_version: ocata
+heat_template_version: pike
description: Template file to add a swap file to a node.
-heat_template_version: ocata
+heat_template_version: pike
description: 'Generates the relevant service principals for a server'
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: 'Extra Post Deployment Config'
parameters:
servers:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Example extra config for post-deployment
-heat_template_version: ocata
+heat_template_version: pike
description: >
Example extra config for post-deployment, this re-runs every update
-heat_template_version: ocata
+heat_template_version: pike
description: >
Post-deployment for the TripleO undercloud
auth_url:
if:
- ssl_disabled
- - list_join:
- - ''
- - - 'http://'
- - {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
- - ':5000/v2.0'
- - list_join:
- - ''
- - - 'https://'
- - {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]}
- - ':13000/v2.0'
+ - make_url:
+ scheme: http
+ host: {get_param: [DeployedServerPortMap, 'control_virtual_ip', fixed_ips, 0, ip_address]}
+ port: 5000
+ path: /v2.0
+ - make_url:
+ scheme: https
+ host: {get_param: [DeployedServerPortMap, 'public_virtual_ip', fixed_ips, 0, ip_address]}
+ port: 13000
+ path: /v2.0
-heat_template_version: ocata
+heat_template_version: pike
description: >
RHEL Registration and unregistration software deployments.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Do some configuration, then reboot - sometimes needed for early-boot
-heat_template_version: ocata
+heat_template_version: pike
description: >
Do some configuration, then reboot - sometimes needed for early-boot
}
+# This code is meant to fix https://bugs.launchpad.net/tripleo/+bug/1686357 on
+# existing setups via a minor update workflow and be idempotent. We need to
+# run this before the yum update because we fix this up even when there are no
+# packages to update on the system (in which case the script exits).
+# This code must be called with set +eu (due to the ocf scripts being sourced)
+function fixup_wrong_ipv6_vip {
+ # This XPath query identifies of all the VIPs in pacemaker with netmask /64. Those are IPv6 only resources that have the wrong netmask
+ # This gives the address of the resource in the CIB, one address per line. For example:
+ # /cib/configuration/resources/primitive[@id='ip-2001.db8.ca2.4..10']/instance_attributes[@id='ip-2001.db8.ca2.4..10-instance_attributes']\
+ # /nvpair[@id='ip-2001.db8.ca2.4..10-instance_attributes-cidr_netmask']
+ vip_xpath_query="//resources/primitive[@type='IPaddr2']/instance_attributes/nvpair[@name='cidr_netmask' and @value='64']"
+ vip_xpath_xml_addresses=$(cibadmin --query --xpath "$vip_xpath_query" -e 2>/dev/null)
+ # The following extracts the @id value of the resource
+ vip_resources_to_fix=$(echo -e "$vip_xpath_xml_addresses" | sed -n "s/.*primitive\[@id='\([^']*\)'.*/\1/p")
+ # Runnning this in a subshell so that sourcing files cannot possibly affect the running script
+ (
+ OCF_PATH="/usr/lib/ocf/lib/heartbeat"
+ if [ -n "$vip_resources_to_fix" -a -f $OCF_PATH/ocf-shellfuncs -a -f $OCF_PATH/findif.sh ]; then
+ source $OCF_PATH/ocf-shellfuncs
+ source $OCF_PATH/findif.sh
+ for resource in $vip_resources_to_fix; do
+ echo "Updating IPv6 VIP $resource with a /128 and a correct addrlabel"
+ # The following will give us something like:
+ # <nvpair id="ip-2001.db8.ca2.4..10-instance_attributes-ip" name="ip" value="2001:db8:ca2:4::10"/>
+ ip_cib_nvpair=$(cibadmin --query --xpath "//resources/primitive[@type='IPaddr2' and @id='$resource']/instance_attributes/nvpair[@name='ip']")
+ # Let's filter out the value of the nvpair to get the ip address
+ ip_address=$(echo $ip_cib_nvpair | xmllint --xpath 'string(//nvpair/@value)' -)
+ OCF_RESKEY_cidr_netmask="64"
+ OCF_RESKEY_ip="$ip_address"
+ # Unfortunately due to https://bugzilla.redhat.com/show_bug.cgi?id=1445628
+ # we need to find out the appropiate nic given the ip address.
+ nic=$(findif $ip_address | awk '{ print $1 }')
+ ret=$?
+ if [ -z "$nic" -o $ret -ne 0 ]; then
+ echo "NIC autodetection failed for VIP $ip_address, not updating VIPs"
+ # Only exits the subshell
+ exit 1
+ fi
+ ocf_run -info pcs resource update --wait "$resource" ip="$ip_address" cidr_netmask=128 nic="$nic" lvs_ipv6_addrlabel=true lvs_ipv6_addrlabel_value=99
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ echo "pcs resource update for VIP $resource failed, not updating VIPs"
+ # Only exits the subshell
+ exit 1
+ fi
+ done
+ fi
+ )
+}
-heat_template_version: ocata
+heat_template_version: pike
description: 'Post-Puppet Config for Pacemaker deployments'
parameters:
resources:
- ControllerPostPuppetMaintenanceModeConfig:
+{%- for role in roles -%}
+{% if "controller" in role.tags %}
+ {{role.name}}PostPuppetMaintenanceModeConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
pcs property set maintenance-mode=false
fi
- ControllerPostPuppetMaintenanceModeDeployment:
+ {{role.name}}PostPuppetMaintenanceModeDeployment:
type: OS::Heat::SoftwareDeployments
properties:
- servers: {get_param: servers}
- config: {get_resource: ControllerPostPuppetMaintenanceModeConfig}
+ servers: {get_param: [servers, {{role.name}}]}
+ config: {get_resource: {{role.name}}PostPuppetMaintenanceModeConfig}
input_values: {get_param: input_values}
- ControllerPostPuppetRestart:
- type: OS::TripleO::Tasks::ControllerPostPuppetRestart
- depends_on: ControllerPostPuppetMaintenanceModeDeployment
+ {{role.name}}PostPuppetRestart:
+ type: OS::TripleO::Tasks::{{role.name}}PostPuppetRestart
+ depends_on: {{role.name}}PostPuppetMaintenanceModeDeployment
properties:
- servers: {get_param: servers}
+ servers: {get_param: [servers, {{role.name}}]}
input_values: {get_param: input_values}
+{%- endif -%}
+{% endfor %}
+
-heat_template_version: ocata
+heat_template_version: pike
description: 'Post-Puppet restart config for Pacemaker deployments'
parameters:
ControllerPostPuppetRestartDeployment:
type: OS::Heat::SoftwareDeployments
properties:
- servers: {get_param: servers}
+ servers: {get_param: servers}
config: {get_resource: ControllerPostPuppetRestartConfig}
input_values: {get_param: input_values}
-heat_template_version: ocata
+heat_template_version: pike
description: 'Pre-Puppet Config for Pacemaker deployments'
parameters:
ControllerPrePuppetMaintenanceModeDeployment:
type: OS::Heat::SoftwareDeployments
properties:
- servers: {get_param: servers}
+ servers: {get_param: servers}
config: {get_resource: ControllerPrePuppetMaintenanceModeConfig}
input_values: {get_param: input_values}
export FACTER_deploy_config_name="${role}Deployment_Step${step}"
if [ -e "/etc/puppet/hieradata/heat_config_${FACTER_deploy_config_name}.json" ]; then
set +e
- puppet apply --detailed-exitcodes "${manifest}"
+ puppet apply --detailed-exitcodes \
+ --modulepath \
+ /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules \
+ "${manifest}"
rc=$?
echo "puppet apply exited with exit code $rc"
else
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is a template which will fetch the ssh host public key.
-heat_template_version: ocata
+heat_template_version: pike
description: 'SSH Known Hosts Config'
parameters:
if [[ -n \$NOVA_COMPUTE ]]; then
log_debug "Restarting openstack ceilometer agent compute"
systemctl restart openstack-ceilometer-compute
+ yum install -y openstack-nova-migration
fi
# Apply puppet manifest to converge just right after the ${ROLE} upgrade
fi
touch "$timestamp_file"
+pacemaker_status=""
+# We include word boundaries in order to not match pacemaker_remote
+if hiera -c /etc/puppet/hiera.yaml service_names | grep -q '\bpacemaker\b'; then
+ pacemaker_status=$(systemctl is-active pacemaker)
+fi
+
+# (NB: when backporting this s/pacemaker_short_bootstrap_node_name/bootstrap_nodeid)
+# This runs before the yum_update so we are guaranteed to run it even in the absence
+# of packages to update (the check for -z "$update_identifier" guarantees that this
+# is run only on overcloud stack update -i)
+if [[ "$pacemaker_status" == "active" && \
+ "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \
+ # OCF scripts don't cope with -eu
+ echo "Verifying if we need to fix up any IPv6 VIPs"
+ set +eu
+ fixup_wrong_ipv6_vip
+ ret=$?
+ set -eu
+ if [ $ret -ne 0 ]; then
+ echo "Fixing up IPv6 VIPs failed. Stopping here. (See https://bugs.launchpad.net/tripleo/+bug/1686357 for more info)"
+ exit 1
+ fi
+fi
+
command_arguments=${command_arguments:-}
# yum check-update exits 100 if updates are available
exit 0
fi
-pacemaker_status=""
-if hiera -c /etc/puppet/hiera.yaml service_names | grep -q pacemaker; then
- pacemaker_status=$(systemctl is-active pacemaker)
-fi
-
-# Fix the redis/rabbit resource start/stop timeouts. See https://bugs.launchpad.net/tripleo/+bug/1633455
-# and https://bugs.launchpad.net/tripleo/+bug/1634851
-if [[ "$pacemaker_status" == "active" && \
- "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]] ; then
- if pcs resource show rabbitmq | grep -E "start.*timeout=100"; then
- pcs resource update rabbitmq op start timeout=200s
- fi
- if pcs resource show rabbitmq | grep -E "stop.*timeout=90"; then
- pcs resource update rabbitmq op stop timeout=200s
- fi
- if pcs resource show redis | grep -E "start.*timeout=120"; then
- pcs resource update redis op start timeout=200s
- fi
- if pcs resource show redis | grep -E "stop.*timeout=120"; then
- pcs resource update redis op stop timeout=200s
- fi
-fi
-
# special case https://bugs.launchpad.net/tripleo/+bug/1635205 +bug/1669714
special_case_ovs_upgrade_if_needed
pcs status
fi
-echo "Finished yum_update.sh on server $deploy_server_id at `date`"
+
+echo "Finished yum_update.sh on server $deploy_server_id at `date` with return code: $return_code"
exit $return_code
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software-config for performing package updates using yum
-heat_template_version: ocata
+heat_template_version: pike
description: 'No-op yum update task'
resources:
-heat_template_version: ocata
+heat_template_version: pike
parameters:
ContrailRepo:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure os-net-config mappings for specific nodes
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is a default no-op template which provides empty user-data
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is first boot configuration for development purposes. It allows
-heat_template_version: ocata
+heat_template_version: pike
# NOTE: You don't need to pass the parameter explicitly from the
# parent template, it can be specified via the parameter_defaults
-heat_template_version: ocata
+heat_template_version: pike
parameters:
# Can be overridden via parameter_defaults in the environment
-heat_template_version: ocata
+heat_template_version: pike
description: >
Uses cloud-init to enable root logins and set the root password.
-heat_template_version: ocata
+heat_template_version: pike
description: 'All Hosts Config'
parameters:
The content that should be appended to your /etc/hosts if you want to get
hostname-based access to the deployed nodes (useful for testing without
setting up a DNS).
- value: {get_attr: [hostsConfigImpl, config, hosts]}
+ value: {get_param: hosts}
OS::stack_id:
description: The ID of the hostsConfigImpl resource.
value: {get_resource: hostsConfigImpl}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to no-op for os-net-config. Using this will allow you
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config for a simple bridge configured with a static IP address for the ctlplane network.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the ceph storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the cinder storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role with IPv6
on the External network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge with VLANs attached for the swift storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the ceph storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the cinder storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the
compute role with external bridge for DVR.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the controller role with IPv6 on the External
network. The IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure multiple interfaces for the swift storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the ceph storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the cinder storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the swift storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the ceph storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the cinder storage role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the compute role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role. No external IP is configured.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role with IPv6 on the External network. The
IPv6 default route is on the External network, and the IPv4 default route is on the Control Plane.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the controller role.
parameters:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to drive os-net-config to configure VLANs for the swift storage role.
parameters:
def generate_endpoint_map_template(config):
return collections.OrderedDict([
- ('heat_template_version', 'ocata'),
+ ('heat_template_version', 'pike'),
('description', 'A map of OpenStack endpoints. Since the endpoints '
'are URLs, we need to have brackets around IPv6 IP addresses. The '
'inputs to these parameters come from net_ip_uri_map, which will '
'': /v1
port: 6385
+IronicInspector:
+ Internal:
+ net_param: IronicInspector
+ Public:
+ net_param: Public
+ Admin:
+ net_param: IronicInspector
+ port: 5050
+
Zaqar:
Internal:
net_param: ZaqarApi
### This file is automatically generated from endpoint_data.yaml
### by the script build_endpoint_map.py
-heat_template_version: ocata
+heat_template_version: pike
description: A map of OpenStack endpoints. Since the endpoints are URLs,
we need to have brackets around IPv6 IP addresses. The inputs to these
parameters come from net_ip_uri_map, which will include these brackets
IronicAdmin: {protocol: http, port: '6385', host: IP_ADDRESS}
IronicInternal: {protocol: http, port: '6385', host: IP_ADDRESS}
IronicPublic: {protocol: http, port: '6385', host: IP_ADDRESS}
+ IronicInspectorAdmin: {protocol: http, port: '5050', host: IP_ADDRESS}
+ IronicInspectorInternal: {protocol: http, port: '5050', host: IP_ADDRESS}
+ IronicInspectorPublic: {protocol: http, port: '5050', host: IP_ADDRESS}
KeystoneAdmin: {protocol: http, port: '35357', host: IP_ADDRESS}
KeystoneInternal: {protocol: http, port: '5000', host: IP_ADDRESS}
KeystonePublic: {protocol: http, port: '5000', host: IP_ADDRESS}
template: NETWORK_uri
- ':'
- get_param: [EndpointMap, IronicPublic, port]
+ IronicInspectorAdmin:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, IronicInspectorNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ port:
+ get_param: [EndpointMap, IronicInspectorAdmin, port]
+ protocol:
+ get_param: [EndpointMap, IronicInspectorAdmin, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, IronicInspectorAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, IronicInspectorNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, IronicInspectorAdmin, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, IronicInspectorAdmin, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorAdmin, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, IronicInspectorNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, IronicInspectorAdmin, port]
+ IronicInspectorInternal:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, IronicInspectorNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ port:
+ get_param: [EndpointMap, IronicInspectorInternal, port]
+ protocol:
+ get_param: [EndpointMap, IronicInspectorInternal, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, IronicInspectorInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, IronicInspectorNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, IronicInspectorInternal, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, IronicInspectorInternal, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorInternal, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, IronicInspectorNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, IronicInspectorNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, IronicInspectorInternal, port]
+ IronicInspectorPublic:
+ host:
+ str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ host_nobrackets:
+ str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - get_param: [ServiceNetMap, PublicNetwork]
+ port:
+ get_param: [EndpointMap, IronicInspectorPublic, port]
+ protocol:
+ get_param: [EndpointMap, IronicInspectorPublic, protocol]
+ uri:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, IronicInspectorPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, IronicInspectorPublic, port]
+ uri_no_suffix:
+ list_join:
+ - ''
+ - - get_param: [EndpointMap, IronicInspectorPublic, protocol]
+ - ://
+ - str_replace:
+ template:
+ get_param: [EndpointMap, IronicInspectorPublic, host]
+ params:
+ CLOUDNAME:
+ get_param:
+ - CloudEndpoints
+ - get_param: [ServiceNetMap, PublicNetwork]
+ IP_ADDRESS:
+ get_param:
+ - NetIpMap
+ - str_replace:
+ params:
+ NETWORK:
+ get_param: [ServiceNetMap, PublicNetwork]
+ template: NETWORK_uri
+ - ':'
+ - get_param: [EndpointMap, IronicInspectorPublic, port]
KeystoneAdmin:
host:
str_replace:
-heat_template_version: ocata
+heat_template_version: pike
description: >
External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
-heat_template_version: ocata
+heat_template_version: pike
description: >
External network. Public traffic, Neutron l3router for floating IPs/SNAT, etc.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Internal API network. Used for most APIs, Database, RPC.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Internal API network. Used for most APIs, Database, RPC.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Management network. System administration, SSH, DNS, NTP, etc. This network
-heat_template_version: ocata
+heat_template_version: pike
description: >
Management network. System administration, SSH, DNS, NTP, etc. This network
--- /dev/null
+heat_template_version: pike
+
+description: Create networks to split out Overcloud traffic
+
+resources:
+
+ {%- for network in networks %}
+ {%- if network.name != 'InternalApi' %}
+ {{network.name}}Network:
+ {%- else %}
+ InternalNetwork:
+ {%- endif %}
+ type: OS::TripleO::Network::{{network.name}}
+ {%- endfor %}
+
+ NetworkExtraConfig:
+ type: OS::TripleO::Network::ExtraConfig
+++ /dev/null
-heat_template_version: ocata
-
-description: Create networks to split out Overcloud traffic
-
-resources:
-
- ExternalNetwork:
- type: OS::TripleO::Network::External
-
- InternalNetwork:
- type: OS::TripleO::Network::InternalApi
-
- StorageMgmtNetwork:
- type: OS::TripleO::Network::StorageMgmt
-
- StorageNetwork:
- type: OS::TripleO::Network::Storage
-
- TenantNetwork:
- type: OS::TripleO::Network::Tenant
-
- ManagementNetwork:
- type: OS::TripleO::Network::Management
-
- NetworkExtraConfig:
- type: OS::TripleO::Network::ExtraConfig
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port for a VIP on the undercloud ctlplane network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the external network. The IP address will be chosen
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the external network. The IP address will be chosen
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a service mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a service mapped list of IPv6 IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the internal_api network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the internal_api network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the management network. The IP address will be chosen
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the management network. The IP address will be chosen
-heat_template_version: ocata
+heat_template_version: pike
parameters:
ControlPlaneIpList:
-heat_template_version: ocata
+heat_template_version: pike
parameters:
ControlPlaneIp:
-heat_template_version: ocata
+heat_template_version: pike
parameters:
# Set these via parameter defaults to configure external VIPs
-heat_template_version: ocata
+heat_template_version: pike
parameters:
# Set these via parameter defaults to configure external VIPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns the control plane port (provisioning network) as the ip_address.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the storage network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs. This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the storage_mgmt API network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs This version is for IPv6
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the storage_mgmt API network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the storage network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the tenant network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Returns an IP from a network mapped list of IPs
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port on the tenant network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port for a VIP on the isolated network NetworkName.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Creates a port for a VIP on the isolated network NetworkName.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Mapping of service_name_network -> network name
GlanceApiNetwork: storage
IronicApiNetwork: ctlplane
IronicNetwork: ctlplane
+ IronicInspectorNetwork: ctlplane
KeystoneAdminApiNetwork: ctlplane # allows undercloud to config endpoints
KeystonePublicApiNetwork: internal_api
ManilaApiNetwork: internal_api
HeatApiCfnNetwork: internal_api
HeatApiCloudwatchNetwork: internal_api
NovaApiNetwork: internal_api
+ NovaColdMigrationNetwork: ctlplane
NovaPlacementNetwork: internal_api
NovaMetadataNetwork: internal_api
NovaVncProxyNetwork: internal_api
-heat_template_version: ocata
+heat_template_version: pike
description: >
Storage network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Storage management network. Storage replication, etc.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Storage management network. Storage replication, etc.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Storage network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Tenant network.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Tenant IPv6 network.
--- /dev/null
+# List of networks, used for j2 templating of enabled networks
+#
+# Supported values:
+#
+# name: Name of the network (mandatory)
+# name_lower: lowercase version of name used for filenames
+# (optional, defaults to name.lower())
+# vlan: vlan for the network (optional)
+# gateway: gateway for the network (optional)
+# enabled: Is the network enabled (optional, defaults to true)
+# vip: Enable creation of a virtual IP on this network
+# [TODO] (dsneddon@redhat.com) - Enable dynamic creation of VIP ports, to support
+# VIPs on non-default networks. See https://bugs.launchpad.net/tripleo/+bug/1667104
+#
+- name: External
+ vip: true
+- name: InternalApi
+ name_lower: internal_api
+ vip: true
+- name: Storage
+ vip: true
+- name: StorageMgmt
+ name_lower: storage_mgmt
+ vip: true
+- name: Tenant
+ vip: false # Tenant network does not use VIPs
+- name: Management
+ # Management network is disabled by default
+ enabled: false
+ vip: false # Management network does not use VIPs
OS::TripleO::Tasks::{{role.name}}PostConfig: OS::Heat::None
OS::TripleO::{{role.name}}ExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
# Port assignments for the {{role.name}} role
+ {%- if role.name != 'ObjectStorage' %}
+ {%- for network in networks %}
+ OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: network/ports/noop.yaml
+ {%- endfor %}
+ {%- else %}
# Note we have to special-case ObjectStorage for backwards compatibility
- {% if role.name != 'ObjectStorage' %}
- OS::TripleO::{{role.name}}::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::{{role.name}}::Ports::ManagementPort: network/ports/noop.yaml
- {% else %}
- OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml
- OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml
- {% endif %}
+ {%- for network in networks %}
+ OS::TripleO::SwiftStorage::Ports::{{network.name}}Port: network/ports/noop.yaml
+ {%- endfor %}
+ {%- endif %}
OS::TripleO::{{role.name}}::Net::SoftwareConfig: net-config-noop.yaml
{% endfor %}
OS::TripleO::ServiceServerMetadataHook: OS::Heat::None
OS::TripleO::Server: OS::Nova::Server
+{% for role in roles %}
+ OS::TripleO::{{role.name}}Server: OS::TripleO::Server
+{% endfor %}
# This creates the "heat-admin" user for all OS images by default
# To disable, replace with firstboot/userdata_default.yaml
OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
-{% for role in roles %}
- OS::TripleO::Tasks::{{role.name}}PrePuppet: OS::Heat::None
- OS::TripleO::Tasks::{{role.name}}PostPuppet: OS::Heat::None
-{% endfor %}
-
# "AllNodes" Extra cluster config, runs on all nodes prior to the post_deploy
# phase, e.g when puppet is applied, but after the pre_deploy phase. Useful when
# configuration with knowledge of all nodes in the cluster is required vs single
# TripleO overcloud networks
OS::TripleO::Network: network/networks.yaml
- OS::TripleO::Network::External: OS::Heat::None
- OS::TripleO::Network::InternalApi: OS::Heat::None
- OS::TripleO::Network::StorageMgmt: OS::Heat::None
- OS::TripleO::Network::Storage: OS::Heat::None
- OS::TripleO::Network::Tenant: OS::Heat::None
- OS::TripleO::Network::Management: OS::Heat::None
+ {%- for network in networks %}
+ OS::TripleO::Network::{{network.name}}: OS::Heat::None
+ {%- endfor %}
OS::TripleO::Network::ExtraConfig: OS::Heat::None
OS::TripleO::Network::Ports::NetIpListMap: network/ports/net_ip_list_map.yaml
# Port assignments for the VIPs
- OS::TripleO::Network::Ports::ExternalVipPort: network/ports/noop.yaml
- OS::TripleO::Network::Ports::InternalApiVipPort: network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageVipPort: network/ports/noop.yaml
- OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
+ {%- for network in networks if network.vip|default(false) %}
+ OS::TripleO::Network::Ports::{{network.name}}VipPort: network/ports/noop.yaml
+ {%- endfor %}
+
OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
OS::TripleO::Network::Ports::ControlPlaneVipPort: OS::Neutron::Port
OS::TripleO::Services::Congress: OS::Heat::None
OS::TripleO::Services::Keystone: puppet/services/keystone.yaml
OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml
- OS::TripleO::Services::GlanceRegistry: puppet/services/disabled/glance-registry.yaml
+ OS::TripleO::Services::GlanceRegistry: puppet/services/disabled/glance-registry-disabled.yaml
OS::TripleO::Services::HeatApi: puppet/services/heat-api.yaml
OS::TripleO::Services::HeatApiCfn: puppet/services/heat-api-cfn.yaml
OS::TripleO::Services::HeatApiCloudwatch: puppet/services/heat-api-cloudwatch.yaml
OS::TripleO::Services::NeutronDhcpAgent: puppet/services/neutron-dhcp.yaml
OS::TripleO::Services::NeutronL2gwApi: OS::Heat::None
OS::TripleO::Services::NeutronL3Agent: puppet/services/neutron-l3.yaml
+ OS::TripleO::Services::NeutronL2gwAgent: OS::Heat::None
OS::TripleO::Services::NeutronMetadataAgent: puppet/services/neutron-metadata.yaml
# FIXME(shardy) the duplicate NeutronServer line can be removed when we've updated
# the multinode job ControllerServices after this patch merges
OS::TripleO::Services::NeutronCorePluginML2OVN: puppet/services/neutron-plugin-ml2-ovn.yaml
OS::TripleO::Services::NeutronCorePluginPlumgrid: puppet/services/neutron-plugin-plumgrid.yaml
OS::TripleO::Services::NeutronCorePluginNuage: puppet/services/neutron-plugin-nuage.yaml
+ OS::TripleO::Services::NeutronCorePluginNSX: puppet/services/neutron-plugin-nsx.yaml
OS::TripleO::Services::OVNDBs: OS::Heat::None
OS::TripleO::Services::NeutronCorePluginMidonet: puppet/services/neutron-midonet.yaml
OS::TripleO::Services::NeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
+ OS::TripleO::Services::NeutronLinuxbridgeAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: puppet/services/neutron-ovs-agent.yaml
OS::TripleO::Services::Pacemaker: OS::Heat::None
OS::TripleO::Services::PacemakerRemote: OS::Heat::None
OS::TripleO::Services::Memcached: puppet/services/memcached.yaml
OS::TripleO::Services::SaharaApi: OS::Heat::None
OS::TripleO::Services::SaharaEngine: OS::Heat::None
- OS::TripleO::Services::Sshd: OS::Heat::None
OS::TripleO::Services::Securetty: OS::Heat::None
+ OS::TripleO::Services::Sshd: puppet/services/sshd.yaml
OS::TripleO::Services::Redis: puppet/services/database/redis.yaml
OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
- OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
+ OS::TripleO::Services::MongoDb: puppet/services/disabled/mongodb-disabled.yaml
OS::TripleO::Services::NovaApi: puppet/services/nova-api.yaml
OS::TripleO::Services::NovaPlacement: puppet/services/nova-placement.yaml
OS::TripleO::Services::NovaMetadata: puppet/services/nova-metadata.yaml
OS::TripleO::Services::Tacker: OS::Heat::None
OS::TripleO::Services::Timezone: puppet/services/time/timezone.yaml
OS::TripleO::Services::CeilometerApi: puppet/services/ceilometer-api.yaml
- OS::TripleO::Services::CeilometerCollector: puppet/services/ceilometer-collector.yaml
- OS::TripleO::Services::CeilometerExpirer: puppet/services/ceilometer-expirer.yaml
+ OS::TripleO::Services::CeilometerCollector: puppet/services/disabled/ceilometer-collector-disabled.yaml
+ OS::TripleO::Services::CeilometerExpirer: puppet/services/disabled/ceilometer-expirer-disabled.yaml
OS::TripleO::Services::CeilometerAgentCentral: puppet/services/ceilometer-agent-central.yaml
OS::TripleO::Services::CeilometerAgentNotification: puppet/services/ceilometer-agent-notification.yaml
OS::TripleO::Services::ComputeCeilometerAgent: puppet/services/ceilometer-agent-compute.yaml
OS::TripleO::Services::CeilometerAgentIpmi: puppet/services/ceilometer-agent-ipmi.yaml
OS::TripleO::Services::Horizon: puppet/services/horizon.yaml
+ # Undercloud Telemetry services
+ OS::TripleO::Services::UndercloudCeilometerAgentCentral: OS::Heat::None
+ OS::TripleO::Services::UndercloudCeilometerAgentNotification: OS::Heat::None
+
#Gnocchi services
OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
OS::TripleO::Services::GnocchiMetricd: puppet/services/gnocchi-metricd.yaml
OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml
+ OS::TripleO::Services::UndercloudGnocchiApi: OS::Heat::None
+ OS::TripleO::Services::UndercloudGnocchiMetricd: OS::Heat::None
+ OS::TripleO::Services::UndercloudGnocchiStatsd: OS::Heat::None
# Services that are disabled by default (use relevant environment files):
OS::TripleO::Services::FluentdClient: OS::Heat::None
OS::TripleO::Services::Collectd: OS::Heat::None
OS::TripleO::Services::AodhEvaluator: puppet/services/aodh-evaluator.yaml
OS::TripleO::Services::AodhNotifier: puppet/services/aodh-notifier.yaml
OS::TripleO::Services::AodhListener: puppet/services/aodh-listener.yaml
+ OS::TripleO::Services::UndercloudAodhApi: OS::Heat::None
+ OS::TripleO::Services::UndercloudAodhEvaluator: OS::Heat::None
+ OS::TripleO::Services::UndercloudAodhNotifier: OS::Heat::None
+ OS::TripleO::Services::UndercloudAodhListener: OS::Heat::None
OS::TripleO::Services::PankoApi: puppet/services/panko-api.yaml
+ OS::TripleO::Services::UndercloudPankoApi: OS::Heat::None
OS::TripleO::Services::MistralEngine: OS::Heat::None
OS::TripleO::Services::MistralApi: OS::Heat::None
OS::TripleO::Services::MistralExecutor: OS::Heat::None
OS::TripleO::Services::OctaviaWorker: OS::Heat::None
OS::TripleO::Services::MySQLClient: puppet/services/database/mysql-client.yaml
OS::TripleO::Services::Vpp: OS::Heat::None
+ OS::TripleO::Services::NeutronVppAgent: OS::Heat::None
OS::TripleO::Services::Docker: OS::Heat::None
OS::TripleO::Services::CertmongerUser: OS::Heat::None
{%- endfor -%}
{%- set primary_role_name = primary_role[0].name -%}
# primary role is: {{primary_role_name}}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Deploy an OpenStack environment, consisting of several node types (roles),
type: string
ControlFixedIPs:
default: []
- description: Should be used for arbitrary ips.
+ description: >
+ Control the IP allocation for the ControlVirtualIP port. E.g.
+ [{'ip_address':'1.2.3.4'}]
type: json
InternalApiVirtualFixedIPs:
default: []
type: json
description: Optional scheduler hints to pass to nova
default: {}
+
+ {{role.name}}Parameters:
+ type: json
+ description: Optional Role Specific parameters to be provided to service
+ default: {}
{% endfor %}
# Identifiers to trigger tasks on nodes
description: >
Set to true to append per network Vips to /etc/hosts on each node.
+ DeploymentServerBlacklist:
+ default: []
+ type: comma_delimited_list
+ description: >
+ List of server hostnames to blacklist from any triggered deployments.
+
conditions:
add_vips_to_etc_hosts: {equals : [{get_param: AddVipsToEtcHosts}, True]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map]}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
DefaultPasswords: {get_attr: [DefaultPasswords, passwords]}
+ RoleName: {{role.name}}
+ RoleParameters: {get_param: {{role.name}}Parameters}
+
+ # Lookup of role_data via heat outputs is slow, so workaround this by caching
+ # the value in an OS::Heat::Value resource
+ {{role.name}}ServiceChainRoleData:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value: {get_attr: [{{role.name}}ServiceChain, role_data]}
# Filter any null/None service_names which may be present due to mapping
# of services to OS::Heat::None
value:
yaql:
expression: coalesce($.data, []).where($ != null)
- data: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+ data: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_names]}
{{role.name}}HostsDeployment:
type: OS::Heat::StructuredDeployments
properties:
name: {{role.name}}HostsDeployment
config: {get_attr: [hostsConfig, config_id]}
- servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}SshKnownHostsDeployment:
type: OS::Heat::StructuredDeployments
properties:
name: {{role.name}}SshKnownHostsDeployment
config: {get_resource: SshKnownHostsConfig}
- servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}AllNodesDeployment:
type: OS::Heat::StructuredDeployments
properties:
name: {{role.name}}AllNodesDeployment
config: {get_attr: [allNodesConfig, config_id]}
- servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ servers: {get_attr: [{{role.name}}Servers, value]}
input_values:
# Note we have to use yaql to look up the first hostname/ip in the
# list because heat path based attributes operate on the attribute
properties:
name: {{role.name}}AllNodesValidationDeployment
config: {get_resource: AllNodesValidationConfig}
- servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ servers: {get_attr: [{{role.name}}Servers, value]}
{{role.name}}IpListMap:
type: OS::TripleO::Network::Ports::NetIpListMap
EnabledServices: {get_attr: [{{role.name}}ServiceNames, value]}
ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
- NetworkHostnameMap:
+ NetworkHostnameMap: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
+
+ {{role.name}}NetworkHostnameMap:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
# Note (shardy) this somewhat complex yaql may be replaced
# with a map_deep_merge function in ocata. It merges the
# list of maps, but appends to colliding lists so we can
{% endif %}
ServiceConfigSettings:
map_merge:
- - get_attr: [{{role.name}}ServiceChain, role_data, config_settings]
+ - get_attr: [{{role.name}}ServiceChainRoleData, value, config_settings]
{% for r in roles %}
- get_attr: [{{r.name}}ServiceChain, role_data, global_config_settings]
{% endfor %}
{% endfor %}
services: {get_attr: [{{role.name}}ServiceNames, value]}
ServiceNames: {get_attr: [{{role.name}}ServiceNames, value]}
- MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
- ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChain, role_data, service_metadata_settings]}
+ MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
+ ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
+ DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
{% endfor %}
+{% for role in roles %}
+ {{role.name}}Servers:
+ type: OS::Heat::Value
+ depends_on: {{role.name}}
+ properties:
+ type: json
+ value:
+ yaql:
+ expression: let(servers=>switch(isDict($.data.servers) => $.data.servers, true => {})) -> $servers.deleteAll($servers.keys().where($servers[$] = null))
+ data:
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
+
+ # This resource just creates a dict out of the DeploymentServerBlacklist,
+ # which is a list. The dict is used in the role templates to set a condition
+ # on whether to create the deployment resources. We can't use the list
+ # directly because there is no way to ask Heat if a list contains a specific
+ # value.
+ DeploymentServerBlacklistDict:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_merge:
+ repeat:
+ template:
+ hostname: 1
+ for_each:
+ hostname: {get_param: DeploymentServerBlacklist}
+
hostsConfig:
type: OS::TripleO::Hosts::SoftwareConfig
properties:
data:
groups:
{% for role in roles %}
- - {get_attr: [{{role.name}}ServiceChain, role_data, logging_groups]}
+ - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_groups]}
{% endfor %}
logging_sources:
yaql:
data:
sources:
{% for role in roles %}
- - {get_attr: [{{role.name}}ServiceChain, role_data, logging_sources]}
+ - {get_attr: [{{role.name}}ServiceChainRoleData, value, logging_sources]}
{% endfor %}
controller_ips: {get_attr: [{{primary_role_name}}, ip_address]}
controller_names: {get_attr: [{{primary_role_name}}, hostname]}
properties:
servers:
{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
input_values:
deploy_identifier: {get_param: DeployIdentifier}
properties:
servers:
{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
# Post deployment steps for all roles
AllNodesDeploySteps:
type: OS::TripleO::PostDeploySteps
depends_on:
+ - AllNodesExtraConfig
{% for role in roles %}
- {{role.name}}AllNodesDeployment
{% endfor %}
properties:
servers:
{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}: {get_attr: [{{role.name}}Servers, value]}
{% endfor %}
EndpointMap: {get_attr: [EndpointMap, endpoint_map]}
role_data:
{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{% endfor %}
outputs:
description: The configuration data associated with each role
value:
{% for role in roles %}
- {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+ {{role.name}}: {get_attr: [{{role.name}}ServiceChainRoleData, value]}
{% endfor %}
RoleNetIpMap:
description: Mapping of each network to a list of IPs for each role
value:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
+{% endfor %}
+ RoleNetHostnameMap:
+ description: Mapping of each network to a list of hostnames for each role
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}NetworkHostnameMap, value]}
{% endfor %}
-heat_template_version: ocata
+heat_template_version: pike
description: 'All Nodes Config for Puppet'
parameters:
StackAction:
type: string
description: >
- Heat action on performed top-level stack.
+ Heat action on performed top-level stack. Note StackUpdateType is
+ set to UPGRADE when a major-version upgrade is in progress.
constraints:
- allowed_values: ['CREATE', 'UPDATE']
+ StackUpdateType:
+ type: string
+ description: >
+ Type of update, to differentiate between UPGRADE and UPDATE cases
+ when StackAction is UPDATE (both are the same stack action).
+ constraints:
+ - allowed_values: ['', 'UPGRADE']
+ default: ''
# NOTE(jaosorior): This is being set as IPA as it's the first
# CA we'll actually be testing out. But we can change this if
# people request it.
deploy_identifier: {get_param: DeployIdentifier}
update_identifier: {get_param: UpdateIdentifier}
stack_action: {get_param: StackAction}
+ stack_update_type: {get_param: StackUpdateType}
vip_data:
map_merge:
# Dynamically generate per-service VIP data based on enabled_services
-heat_template_version: ocata
+heat_template_version: pike
description: 'OpenStack cinder storage configured by Puppet'
parameters:
BlockStorageImage:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
environment files.
default: ''
+ DeploymentServerBlacklistDict:
+ default: {}
+ type: json
+ description: >
+ Map of server hostnames to blacklist from any triggered
+ deployments. If the value is 1, the server will be blacklisted. This
+ parameter is generated from the parent template.
+
+conditions:
+ server_not_blacklisted:
+ not:
+ equals:
+ - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+ - 1
resources:
BlockStorage:
- type: OS::TripleO::Server
+ type: OS::TripleO::BlockStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image:
{get_param: BlockStorageImage}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
+ condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
BlockStorageUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
name: BlockStorageUpgradeInitDeployment
server: {get_resource: BlockStorage}
BlockStorageDeployment:
type: OS::Heat::StructuredDeployment
depends_on: BlockStorageUpgradeInitDeployment
+ condition: server_not_blacklisted
properties:
name: BlockStorageDeployment
server: {get_resource: BlockStorage}
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
description: Heat resource handle for the block storage server
value:
{get_resource: BlockStorage}
+ condition: server_not_blacklisted
external_ip_address:
description: IP address of the server in the external network
value: {get_attr: [ExternalPort, ip_address]}
-heat_template_version: ocata
+heat_template_version: pike
description: 'OpenStack ceph storage node configured by Puppet'
parameters:
OvercloudCephStorageFlavor:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
environment files.
default: ''
+ DeploymentServerBlacklistDict:
+ default: {}
+ type: json
+ description: >
+ Map of server hostnames to blacklist from any triggered
+ deployments. If the value is 1, the server will be blacklisted. This
+ parameter is generated from the parent template.
+
+conditions:
+ server_not_blacklisted:
+ not:
+ equals:
+ - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+ - 1
resources:
CephStorage:
- type: OS::TripleO::Server
+ type: OS::TripleO::CephStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: CephStorageImage}
image_update_policy: {get_param: ImageUpdatePolicy}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
+ condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
CephStorageUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
name: CephStorageUpgradeInitDeployment
server: {get_resource: CephStorage}
CephStorageDeployment:
type: OS::Heat::StructuredDeployment
depends_on: CephStorageUpgradeInitDeployment
+ condition: server_not_blacklisted
properties:
name: CephStorageDeployment
config: {get_resource: CephStorageConfig}
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
config: {get_resource: UpdateConfig}
server: {get_resource: CephStorage}
description: Heat resource handle for the ceph storage server
value:
{get_resource: CephStorage}
+ condition: server_not_blacklisted
external_ip_address:
description: IP address of the server in the external network
value: {get_attr: [ExternalPort, ip_address]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack hypervisor node configured via Puppet.
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
environment files.
default: ''
+ DeploymentServerBlacklistDict:
+ default: {}
+ type: json
+ description: >
+ Map of server hostnames to blacklist from any triggered
+ deployments. If the value is 1, the server will be blacklisted. This
+ parameter is generated from the parent template.
+
+conditions:
+ server_not_blacklisted:
+ not:
+ equals:
+ - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+ - 1
resources:
NovaCompute:
- type: OS::TripleO::Server
+ type: OS::TripleO::ComputeServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: NovaImage}
image_update_policy:
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
+ condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
NovaComputeUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
name: NovaComputeUpgradeInitDeployment
server: {get_resource: NovaCompute}
NovaComputeDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: NovaComputeUpgradeInitDeployment
+ condition: server_not_blacklisted
properties:
name: NovaComputeDeployment
config: {get_resource: NovaComputeConfig}
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
- {get_resource: NovaCompute}
\ No newline at end of file
+ {get_resource: NovaCompute}
+ condition: server_not_blacklisted
-heat_template_version: ocata
+heat_template_version: pike
description: >
A software config which runs puppet on the {{role}} role
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack controller node configured by Puppet.
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
environment files.
default: ''
+ DeploymentServerBlacklistDict:
+ default: {}
+ type: json
+ description: >
+ Map of server hostnames to blacklist from any triggered
+ deployments. If the value is 1, the server will be blacklisted. This
+ parameter is generated from the parent template.
parameter_groups:
- label: deprecated
parameters:
- controllerExtraConfig
+conditions:
+ server_not_blacklisted:
+ not:
+ equals:
+ - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+ - 1
+
+
resources:
Controller:
- type: OS::TripleO::Server
+ type: OS::TripleO::ControllerServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: controllerImage}
image_update_policy: {get_param: ImageUpdatePolicy}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
+ condition: server_not_blacklisted
depends_on: PreNetworkConfig
properties:
name: NetworkDeployment
# but https://bugs.launchpad.net/heat/+bug/1649900 needs fixing first
ControllerUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
+ condition: server_not_blacklisted
depends_on: NetworkDeployment
properties:
name: ControllerUpgradeInitDeployment
ControllerDeployment:
type: OS::TripleO::SoftwareDeployment
+ condition: server_not_blacklisted
depends_on: ControllerUpgradeInitDeployment
properties:
name: ControllerDeployment
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
+ condition: server_not_blacklisted
depends_on: NetworkDeployment
properties:
name: UpdateDeployment
description: Heat resource handle for the Nova compute server
value:
{get_resource: Controller}
+ condition: server_not_blacklisted
tls_key_modulus_md5:
description: MD5 checksum of the TLS Key Modulus
value: {get_attr: [NodeTLSData, key_modulus_md5]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Software Config to install deployment artifacts (tarball's and/or
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for all MidoNet nodes
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Network Cisco configuration
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Big Switch agents on compute node
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Nuage configuration on the Compute
-heat_template_version: ocata
+heat_template_version: pike
description: 'Extra Pre-Deployment Config, multiple'
parameters:
server:
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Neutron Big Switch configuration
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Cisco N1KV configuration
-heat_template_version: ocata
+heat_template_version: pike
description: 'Noop Extra Pre-Deployment Config'
parameters:
server:
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata overrides for specific nodes
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is a template which will inject the trusted anchor.
-heat_template_version: ocata
+heat_template_version: pike
description: Enroll nodes to FreeIPA
-heat_template_version: ocata
+heat_template_version: pike
description: >
This is a template which will build the TLS Certificates necessary
{% set batch_upgrade_steps_max = 3 -%}
{% set upgrade_steps_max = 6 -%}
{% set deliver_script = {'deliver': False} -%}
-heat_template_version: ocata
+heat_template_version: pike
description: 'Upgrade steps for all roles'
parameters:
type: string
hidden: true
-
resources:
{% for role in roles if role.disable_upgrade_deployment|default(false) %}
- ''
- - "#!/bin/bash\n\n"
- "set -eu\n\n"
- - "if hiera -c /etc/puppet/hiera.yaml service_names | grep nova_compute ; then\n\n"
- - " crudini --set /etc/nova/nova.conf placement auth_type password\n\n"
- - " crudini --set /etc/nova/nova.conf placement username placement\n\n"
- - " crudini --set /etc/nova/nova.conf placement project_domain_name Default\n\n"
- - " crudini --set /etc/nova/nova.conf placement user_domain_name Default\n\n"
- - " crudini --set /etc/nova/nova.conf placement project_name service\n\n"
- - str_replace:
- template: |
- crudini --set /etc/nova/nova.conf placement password 'SERVICE_PASSWORD'
- crudini --set /etc/nova/nova.conf placement region_name 'REGION_NAME'
- crudini --set /etc/nova/nova.conf placement auth_url 'AUTH_URL'
- params:
- SERVICE_PASSWORD: { get_param: NovaPassword }
- REGION_NAME: { get_param: KeystoneRegion }
- AUTH_URL: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
- - " systemctl restart openstack-nova-compute\n\n"
- - "fi\n\n"
- str_replace:
template: |
ROLE='ROLE_NAME'
{%- for role in roles %}
{{role.name}}UpgradeBatchConfig_Step{{step}}:
type: OS::TripleO::UpgradeConfig
- {%- if step > 0 %}
- {%- if role in enabled_roles %}
+ {%- if step > 0 %}
depends_on:
- - {{role.name}}UpgradeBatch_Step{{step -1}}
- {%- endif %}
- {%- else %}
+ {%- for role_inside in enabled_roles %}
+ - {{role_inside.name}}UpgradeBatch_Step{{step -1}}
+ {%- endfor %}
+ {% else %}
{% for role in roles if role.disable_upgrade_deployment|default(false) %}
{% if deliver_script.update({'deliver': True}) %} {% endif %}
{% endfor %}
{% if deliver_script.deliver %}
depends_on:
- {% endif %}
{% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
- {{dep.name}}DeliverUpgradeScriptDeployment
{% endfor %}
- {% endif %}
+ {% endif %}
+ {% endif %}
properties:
UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
step: {{step}}
{%- for role_inside in enabled_roles %}
- {{role_inside.name}}UpgradeBatch_Step{{step -1}}
{%- endfor %}
- {%- endif %}
+ {% else %}
+ {% for role in roles if role.disable_upgrade_deployment|default(false) %}
+ {% if deliver_script.update({'deliver': True}) %} {% endif %}
+ {% endfor %}
+ {% if deliver_script.deliver %}
+ depends_on:
+ {% for dep in roles if dep.disable_upgrade_deployment|default(false) %}
+ - {{dep.name}}DeliverUpgradeScriptDeployment
+ {% endfor %}
+ {% endif %}
+ {% endif %}
update_policy:
batch_create:
max_batch_size: {{role.upgrade_batch_size|default(1)}}
rolling_update:
max_batch_size: {{role.upgrade_batch_size|default(1)}}
properties:
- name: {{role.name}}UpgradeBatch_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}UpgradeBatchConfig_Step{{step}}}
input_values:
{%- for role in roles %}
{{role.name}}UpgradeConfig_Step{{step}}:
type: OS::TripleO::UpgradeConfig
- # The UpgradeConfig resources could actually be created without
- # serialization, but the event output is easier to follow if we
- # do, and there should be minimal performance hit (creating the
- # config is cheap compared to the time to apply the deployment).
- {%- if step > 0 %}
- {%- if role in enabled_roles %}
+ # The UpgradeConfig resources could actually be created without
+ # serialization, but the event output is easier to follow if we
+ # do, and there should be minimal performance hit (creating the
+ # config is cheap compared to the time to apply the deployment).
depends_on:
- - {{role.name}}Upgrade_Step{{step -1}}
- {%- endif %}
- {%- endif %}
+ {%- for role_inside in enabled_roles %}
+ {%- if step > 0 %}
+ - {{role_inside.name}}Upgrade_Step{{step -1}}
+ {%- else %}
+ - {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+ {%- endif %}
+ {%- endfor %}
properties:
UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
step: {{step}}
{%- for role in enabled_roles %}
{{role.name}}Upgrade_Step{{step}}:
type: OS::Heat::SoftwareDeploymentGroup
- {%- if step > 0 %}
- # Make sure we wait that all roles have finished their own
- # previous step before going to the next, so we can guarantee
- # state for each steps.
depends_on:
{%- for role_inside in enabled_roles %}
+ {%- if step > 0 %}
- {{role_inside.name}}Upgrade_Step{{step -1}}
- {%- endfor %}
- {%- else %}
- depends_on:
- {%- for role_inside in enabled_roles %}
+ {%- else %}
- {{role_inside.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+ {%- endif %}
{%- endfor %}
- {%- endif %}
properties:
- name: {{role.name}}Upgrade_Step{{step}}
servers: {get_param: [servers, {{role.name}}]}
config: {get_resource: {{role.name}}UpgradeConfig_Step{{step}}}
input_values:
-heat_template_version: ocata
+heat_template_version: pike
description: 'OpenStack swift storage node configured by Puppet'
parameters:
OvercloudSwiftStorageFlavor:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
UpgradeInitCommand:
type: string
description: |
major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
environment files.
default: ''
+ DeploymentServerBlacklistDict:
+ default: {}
+ type: json
+ description: >
+ Map of server hostnames to blacklist from any triggered
+ deployments. If the value is 1, the server will be blacklisted. This
+ parameter is generated from the parent template.
+
+conditions:
+ server_not_blacklisted:
+ not:
+ equals:
+ - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+ - 1
resources:
SwiftStorage:
- type: OS::Nova::Server
+ type: OS::TripleO::ObjectStorageServer
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: SwiftStorageImage}
flavor: {get_param: OvercloudSwiftStorageFlavor}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
+ condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
SwiftStorageUpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
name: SwiftStorageUpgradeInitDeployment
server: {get_resource: SwiftStorage}
SwiftStorageHieraDeploy:
type: OS::Heat::StructuredDeployment
depends_on: SwiftStorageUpgradeInitDeployment
+ condition: server_not_blacklisted
properties:
name: SwiftStorageHieraDeploy
server: {get_resource: SwiftStorage}
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
config: {get_resource: UpdateConfig}
server: {get_resource: SwiftStorage}
description: Heat resource handle for the swift storage server
value:
{get_resource: SwiftStorage}
+ condition: server_not_blacklisted
external_ip_address:
description: IP address of the server in the external network
value: {get_attr: [ExternalPort, ip_address]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Post-upgrade configuration steps via puppet for all roles
-heat_template_version: ocata
+heat_template_version: pike
description: >
Post-deploy configuration steps via puppet for all roles,
properties:
StepConfig: {list_join: ["\n", {get_param: [role_data, {{role.name}}, step_config]}]}
- {{role.name}}PrePuppet:
- type: OS::TripleO::Tasks::{{role.name}}PrePuppet
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
-
# Step through a series of configuration steps
{% for step in range(1, 6) %}
{{role.name}}Deployment_Step{{step}}:
type: OS::Heat::StructuredDeploymentGroup
{% if step == 1 %}
- depends_on: [{{role.name}}PrePuppet, {{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+ depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
{% else %}
depends_on:
{% for dep in roles %}
- {{dep.name}}Deployment_Step5
{% endfor %}
properties:
- servers: {get_param: servers}
+ servers: {get_param: servers}
input_values:
update_identifier: {get_param: DeployIdentifier}
properties:
servers: {get_param: [servers, {{role.name}}]}
- {{role.name}}PostPuppet:
- depends_on:
- - {{role.name}}ExtraConfigPost
- type: OS::TripleO::Tasks::{{role.name}}PostPuppet
- properties:
- servers: {get_param: [servers, {{role.name}}]}
- input_values:
- update_identifier: {get_param: DeployIdentifier}
{% endfor %}
-heat_template_version: ocata
+heat_template_version: pike
description: 'OpenStack {{role}} node configured by Puppet'
parameters:
Overcloud{{role}}Flavor:
type: string
description: Command which will be run whenever configuration data changes
default: os-refresh-config --timeout 14400
+ ConfigCollectSplay:
+ type: number
+ default: 30
+ description: |
+ Maximum amount of time to possibly to delay configuation collection
+ polling. Defaults to 30 seconds. Set to 0 to disable it which will cause
+ the configuration collection to occur as soon as the collection process
+ starts. This setting is used to prevent the configuration collection
+ processes from polling all at the exact same time.
LoggingSources:
type: json
default: []
major-upgrade-composable-steps.yaml and major-upgrade-converge.yaml
environment files.
default: ''
+ DeploymentServerBlacklistDict:
+ default: {}
+ type: json
+ description: >
+ Map of server hostnames to blacklist from any triggered
+ deployments. If the value is 1, the server will be blacklisted. This
+ parameter is generated from the parent template.
+
+conditions:
+ server_not_blacklisted:
+ not:
+ equals:
+ - {get_param: [DeploymentServerBlacklistDict, {get_param: Hostname}]}
+ - 1
resources:
{{role}}:
- type: OS::TripleO::Server
+ type: OS::TripleO::{{role.name}}Server
metadata:
os-collect-config:
command: {get_param: ConfigCommand}
+ splay: {get_param: ConfigCollectSplay}
properties:
image: {get_param: {{role}}Image}
image_update_policy: {get_param: ImageUpdatePolicy}
NetworkDeployment:
type: OS::TripleO::SoftwareDeployment
depends_on: PreNetworkConfig
+ condition: server_not_blacklisted
properties:
name: NetworkDeployment
config: {get_resource: NetworkConfig}
{{role}}UpgradeInitDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
name: {{role}}UpgradeInitDeployment
server: {get_resource: {{role}}}
{{role}}Deployment:
type: OS::Heat::StructuredDeployment
depends_on: {{role}}UpgradeInitDeployment
+ condition: server_not_blacklisted
properties:
name: {{role}}Deployment
config: {get_resource: {{role}}Config}
UpdateDeployment:
type: OS::Heat::SoftwareDeployment
depends_on: NetworkDeployment
+ condition: server_not_blacklisted
properties:
name: UpdateDeployment
config: {get_resource: UpdateConfig}
description: Heat resource handle for {{role}} server
value:
{get_resource: {{role}}}
+ condition: server_not_blacklisted
external_ip_address:
description: IP address of the server in the external network
value: {get_attr: [ExternalPort, ip_address]}
Operators will use the parameter_defaults section of any Heat
environment to set per service parameters.
+Apart from sevice specific inputs, there are few default parameters for all
+the services. Following are the list of default parameters:
+
+ * ServiceNetMap: Mapping of service_name -> network name. Default mappings
+ for service to network names are defined in
+ ../network/service_net_map.j2.yaml, which may be overridden via
+ ServiceNetMap values added to a user environment file via
+ parameter_defaults.
+
+ * EndpointMap: Mapping of service endpoint -> protocol. Contains a mapping of
+ endpoint data generated for all services, based on the data included in
+ ../network/endpoints/endpoint_data.yaml.
+
+ * DefaultPasswords: Mapping of service -> default password. Used to pass some
+ passwords from the parent templates, this is a legacy interface and should
+ not be used by new services.
+
+ * RoleName: Name of the role on which this service is deployed. A service can
+ be deployed in multiple roles. This is an internal parameter (should not be
+ set via environment file), which is fetched from the name attribute of the
+ roles_data.yaml template.
+
+ * RoleParameters: Parameter specific to a role on which the service is
+ applied. Using the format "<RoleName>Parameters" in the parameter_defaults
+ of user environment file, parameters can be provided for a specific role.
+ For example, in order to provide a parameter specific to "Compute" role,
+ below is the format::
+
+ parameter_defaults:
+ ComputeParameters:
+ Param1: value
+
+
Config Settings
---------------
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ./apache.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Stop aodh_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ yaql:
+ expression: $.data.apache_upgrade + $.data.aodh_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ aodh_api_upgrade:
+ - name: Stop aodh_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_settings:
aodh_redis_password: {get_param: RedisPassword}
aodh::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://aodh:'
- - {get_param: AodhPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/aodh'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: aodh
+ password: {get_param: AodhPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /aodh
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
aodh::debug: {get_param: Debug}
aodh::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::rabbit_userid: {get_param: RabbitUserName}
aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
aodh::auth::auth_password: {get_param: AodhPassword}
- aodh::auth::auth_region: 'regionOne'
+ aodh::auth::auth_region: {get_param: KeystoneRegion}
aodh::auth::auth_tenant_name: 'service'
service_config_settings:
keystone:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh Evaluator service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh Listener service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Aodh Notifier service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Apache service configured with Puppet. Note this is typically included
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
conditions:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
apache::ip: {get_param: [ServiceNetMap, ApacheNetwork]}
+ apache::default_vhost: false
apache::server_signature: 'Off'
apache::server_tokens: 'Prod'
apache_remote_proxy_ips_network:
apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit }
apache::mod::remoteip::proxy_ips:
- "%{hiera('apache_remote_proxy_ips_network')}"
- -
- generate_service_certificates: true
- tripleo::certmonger::apache_dirs::certificate_dir: '/etc/pki/tls/certs/httpd'
- tripleo::certmonger::apache_dirs::key_dir: '/etc/pki/tls/private/httpd'
- apache_certificates_specs:
- map_merge:
- repeat:
- template:
- httpd-NETWORK:
- service_certificate: '/etc/pki/tls/certs/httpd/httpd-NETWORK.crt'
- service_key: '/etc/pki/tls/private/httpd/httpd-NETWORK.key'
- hostname: "%{hiera('fqdn_NETWORK')}"
- principal: "HTTP/%{hiera('fqdn_NETWORK')}"
- for_each:
- NETWORK: {get_attr: [ApacheNetworks, value]}
+ - if:
+ - internal_tls_enabled
+ -
+ generate_service_certificates: true
+ apache::mod::ssl::ssl_ca: {get_param: InternalTLSCAFile}
+ tripleo::certmonger::apache_dirs::certificate_dir: '/etc/pki/tls/certs/httpd'
+ tripleo::certmonger::apache_dirs::key_dir: '/etc/pki/tls/private/httpd'
+ apache_certificates_specs:
+ map_merge:
+ repeat:
+ template:
+ httpd-NETWORK:
+ service_certificate: '/etc/pki/tls/certs/httpd/httpd-NETWORK.crt'
+ service_key: '/etc/pki/tls/private/httpd/httpd-NETWORK.key'
+ hostname: "%{hiera('fqdn_NETWORK')}"
+ principal: "HTTP/%{hiera('fqdn_NETWORK')}"
+ for_each:
+ NETWORK: {get_attr: [ApacheNetworks, value]}
+ - {}
metadata_settings:
if:
- internal_tls_enabled
shell: /usr/bin/systemctl show 'httpd' --property ActiveState | grep '\bactive\b'
when: httpd_enabled.rc == 0
tags: step0,validation
+ - name: Ensure mod_ssl package is installed
+ tags: step3
+ yum: name=mod_ssl state=latest
-heat_template_version: ocata
+heat_template_version: pike
description: >
AuditD configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Barbican API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
params:
$NETWORK: {get_param: [ServiceNetMap, BarbicanApiNetwork]}
barbican::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://barbican:'
- - {get_param: BarbicanPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/barbican'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: barbican
+ password: {get_param: BarbicanPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /barbican
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
tripleo.barbican_api.firewall_rules:
'117 barbican':
dport:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Check if barbican_api is deployed
- command: systemctl is-enabled openstack-barbican-api
- tags: common
- ignore_errors: True
- register: barbican_api_enabled
- - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
- shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
- when: barbican_api_enabled.rc == 0
- tags: step0,validation
- - name: Install openstack-barbican-api package if it was disabled
- tags: step3
- yum: name=openstack-barbican-api state=latest
- when: barbican_api_enabled.rc != 0
+ yaql:
+ expression: $.data.apache_upgrade + $.data.barbican_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ barbican_api_upgrade:
+ - name: Check if barbican_api is deployed
+ command: systemctl is-enabled openstack-barbican-api
+ tags: common
+ ignore_errors: True
+ register: barbican_api_enabled
+ - name: "PreUpgrade step0,validation: Check service openstack-barbican-api is running"
+ shell: /usr/bin/systemctl show 'openstack-barbican-api' --property ActiveState | grep '\bactive\b'
+ when: barbican_api_enabled.rc == 0
+ tags: step0,validation
+ - name: Install openstack-barbican-api package if it was disabled
+ tags: step3
+ yum: name=openstack-barbican-api state=latest
+ when: barbican_api_enabled.rc != 0
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAproxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Central Agent service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
- ceilometer_redis_password: {get_param: RedisPassword}
central_namespace: true
+ service_config_settings:
+ get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::agent::polling
upgrade_tasks:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Compute Agent service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [CeilometerServiceBase, role_data, config_settings]
- ceilometer::agent::compute::instance_discovery_method: {get_param: InstanceDiscoveryMethod}
compute_namespace: true
+ service_config_settings:
+ get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::agent::polling
upgrade_tasks:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Ipmi Agent service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Notification Agent service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- ceilometer
config_settings:
get_attr: [CeilometerServiceBase, role_data, config_settings]
+ service_config_settings:
+ get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
include ::tripleo::profile::base::ceilometer::agent::notification
upgrade_tasks:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ./apache.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Stop ceilometer_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ yaql:
+ expression: $.data.apache_upgrade + $.data.ceilometer_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ ceilometer_api_upgrade:
+ - name: Stop ceilometer_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
- CeilometerBackend:
- default: 'mongodb'
- description: The ceilometer backend type.
- type: string
CeilometerMeteringSecret:
description: Secret shared by the ceilometer services.
type: string
description: The password for the ceilometer service account.
type: string
hidden: true
- CeilometerMeterDispatcher:
- default: ['gnocchi']
- description: Comma-seperated list of Dispatcher to process meter data
- type: comma_delimited_list
- constraints:
- - allowed_values: ['gnocchi', 'database']
- CeilometerEventDispatcher:
- default: ['panko', 'gnocchi']
- description: Comma-separated list of Dispatchers to process events data
- type: comma_delimited_list
- constraints:
- - allowed_values: ['panko', 'gnocchi', 'database']
CeilometerWorkers:
default: 0
description: Number of workers for Ceilometer service.
type: number
+ ManageEventPipeline:
+ default: false
+ description: Whether to manage event_pipeline.yaml.
+ type: boolean
EventPipelinePublishers:
- default: ['notifier://?topic=alarm.all']
- description: A list of publishers to put in event_pipeline.yaml.
+ default: ['gnocchi://']
+ description: >
+ A list of publishers to put in event_pipeline.yaml. When the
+ collector is used, override this with notifier:// publisher.
+ Set ManageEventPipeline to true for override to take effect.
+ type: comma_delimited_list
+ ManagePipeline:
+ default: false
+ description: Whether to manage pipeline.yaml.
+ type: boolean
+ PipelinePublishers:
+ default: ['gnocchi://']
+ description: >
+ A list of publishers to put in pipeline.yaml. When the
+ collector is used, override this with notifier:// publisher.
+ Set ManagePipeline to true for override to take effect.
type: comma_delimited_list
Debug:
default: ''
description: Whether to create or skip API endpoint. Set this to
false, if you choose to disable Ceilometer API service.
type: boolean
+ SnmpdReadonlyUserName:
+ default: ro_snmp_user
+ description: The user name for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ SnmpdReadonlyUserPassword:
+ description: The user password for SNMPd with readonly rights running on all Overcloud nodes
+ type: string
+ hidden: true
outputs:
role_data:
value:
service_name: ceilometer_base
config_settings:
- ceilometer_auth_enabled: true
ceilometer::debug: {get_param: Debug}
- ceilometer::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - - '://ceilometer:'
- - {get_param: CeilometerPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ceilometer'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- ceilometer_backend: {get_param: CeilometerBackend}
- # we include db_sync class in puppet-tripleo
- ceilometer::db::sync_db: false
ceilometer::keystone::authtoken::project_name: 'service'
ceilometer::keystone::authtoken::user_domain_name: 'Default'
ceilometer::keystone::authtoken::project_domain_name: 'Default'
ceilometer::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
ceilometer::agent::auth::auth_password: {get_param: CeilometerPassword}
ceilometer::agent::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
+ ceilometer::agent::notification::manage_event_pipeline: {get_param: ManageEventPipeline}
ceilometer::agent::notification::event_pipeline_publishers: {get_param: EventPipelinePublishers}
+ ceilometer::agent::notification::manage_pipeline: {get_param: ManagePipeline}
+ ceilometer::agent::notification::pipeline_publishers: {get_param: PipelinePublishers}
ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
ceilometer::agent::auth::auth_tenant_name: 'service'
ceilometer::agent::auth::auth_user_domain_name: 'Default'
ceilometer::agent::auth::auth_project_domain_name: 'Default'
ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
- ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
- ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
ceilometer::dispatcher::gnocchi::filter_project: 'service'
ceilometer::dispatcher::gnocchi::archive_policy: 'low'
ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
ceilometer::rabbit_port: {get_param: RabbitClientPort}
ceilometer::rabbit_heartbeat_timeout_threshold: 60
- ceilometer::db::database_db_max_retries: -1
- ceilometer::db::database_max_retries: -1
ceilometer::telemetry_secret: {get_param: CeilometerMeteringSecret}
+ ceilometer::snmpd_readonly_username: {get_param: SnmpdReadonlyUserName}
+ ceilometer::snmpd_readonly_user_password: {get_param: SnmpdReadonlyUserPassword}
service_config_settings:
keystone:
+ ceilometer_auth_enabled: true
ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Collector service configured with Puppet
+ This service is deprecated and will be removed in future releases.
parameters:
ServiceNetMap:
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ CeilometerBackend:
+ default: 'mongodb'
+ description: The ceilometer backend type.
+ type: string
+ CeilometerPassword:
+ description: The password for the ceilometer service account.
+ type: string
+ hidden: true
MonitoringSubscriptionCeilometerCollector:
default: 'overcloud-ceilometer-collector'
type: string
default:
tag: openstack.ceilometer.collector
path: /var/log/ceilometer/collector.log
-
+ CeilometerMeterDispatcher:
+ default: ['gnocchi']
+ description: Comma-seperated list of Dispatcher to process meter data
+ Note that database option is deprecated and will not be
+ supported in future.
+ type: comma_delimited_list
+ constraints:
+ - allowed_values: ['gnocchi', 'database']
+ CeilometerEventDispatcher:
+ default: ['panko', 'gnocchi']
+ description: Comma-separated list of Dispatchers to process events data
+ Note that database option is deprecated and will not be
+ supported in future.
+ type: comma_delimited_list
+ constraints:
+ - allowed_values: ['panko', 'gnocchi', 'database']
+ CeilometerEventTTL:
+ default: '86400'
+ description: Number of seconds that events are kept in the database for
+ (<= 0 means forever)
+ type: string
+ CeilometerMeteringTTL:
+ default: '86400'
+ description: Number of seconds that samples are kept in the database for
+ (<= 0 means forever)
+ type: string
resources:
CeilometerServiceBase:
type: ./ceilometer-base.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
MongoDbBase:
type: ./database/mongodb-base.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [MongoDbBase, role_data, config_settings]
- get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::db::database_connection:
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: ceilometer
+ password: {get_param: CeilometerPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ceilometer
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
+ ceilometer_backend: {get_param: CeilometerBackend}
+ ceilometer::event_time_to_live: {get_param: CeilometerEventTTL}
+ ceilometer::metering_time_to_live: {get_param: CeilometerMeteringTTL}
+ # we include db_sync class in puppet-tripleo
+ ceilometer::db::sync_db: false
+ ceilometer::db::database_db_max_retries: -1
+ ceilometer::db::database_max_retries: -1
+ ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
+ ceilometer::collector::event_dispatcher: {get_param: CeilometerEventDispatcher}
service_config_settings:
get_attr: [CeilometerServiceBase, role_data, service_config_settings]
step_config: |
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ceilometer Expirer service configured with Puppet
+ Note, This service is deprecated and will be removed in
+ future releases.
parameters:
ServiceNetMap:
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph base service. Shared by all Ceph services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: ceph_base
config_settings:
tripleo::profile::base::ceph::enable_ceph_storage: {get_param: ControllerEnableCephStorage}
- ceph::profile::params::osd_pool_default_min_size: 1
ceph::profile::params::osds: {/srv/data: {}}
ceph::profile::params::manage_repo: false
ceph::profile::params::authentication_type: cephx
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph Client service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph External service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph MDS service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph Monitor service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
MonitoringSubscriptionCephMon:
default: 'overcloud-ceph-mon'
type: string
+ CephPoolDefaultSize:
+ description: default minimum replication for RBD copies
+ type: number
+ default: 3
resources:
CephBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
ceph::profile::params::mon_key: {get_param: CephMonKey}
ceph::profile::params::osd_pool_default_pg_num: 32
ceph::profile::params::osd_pool_default_pgp_num: 32
- ceph::profile::params::osd_pool_default_size: 3
+ ceph::profile::params::osd_pool_default_size: {get_param: CephPoolDefaultSize}
# repeat returns items in a list, so we need to map_merge twice
tripleo::profile::base::ceph::mon::ceph_pools:
map_merge:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph OSD service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph RadosGW service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Requests certificates using certmonger through Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
CinderBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Check if cinder_api is deployed
- command: systemctl is-enabled openstack-cinder-api
- tags: common
- ignore_errors: True
- register: cinder_api_enabled
- - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
- shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
- when: cinder_api_enabled.rc == 0
- tags: step0,validation
- - name: check for cinder running under apache (post upgrade)
- tags: step1
- shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder"
- register: cinder_apache
- ignore_errors: true
- - name: Stop cinder_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
- when: cinder_apache.rc == 0
- - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
- tags: step1
- when: cinder_api_enabled.rc == 0
- service: name=openstack-cinder-api state=stopped enabled=no
+ yaql:
+ expression: $.data.apache_upgrade + $.data.cinder_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ cinder_api_upgrade:
+ - name: Check if cinder_api is deployed
+ command: systemctl is-enabled openstack-cinder-api
+ tags: common
+ ignore_errors: True
+ register: cinder_api_enabled
+ - name: "PreUpgrade step0,validation: Check service openstack-cinder-api is running"
+ shell: /usr/bin/systemctl show 'openstack-cinder-api' --property ActiveState | grep '\bactive\b'
+ when: cinder_api_enabled.rc == 0
+ tags: step0,validation
+ - name: check for cinder running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q cinder"
+ register: cinder_apache
+ ignore_errors: true
+ - name: Stop cinder_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: cinder_apache.rc == 0
+ - name: Stop and disable cinder_api service (pre-upgrade not under httpd)
+ tags: step1
+ when: cinder_api_enabled.rc == 0
+ service: name=openstack-cinder-api state=stopped enabled=no
# See the License for the specific language governing permissions and
# limitations under the License.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Cinder Dell EMC PS Series backend
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Cinder Dell EMC Storage Center backend
CinderDellScVolumeFolder:
type: string
default: 'dellsc_volume'
+ CinderDellScSecondarySanIp:
+ type: string
+ default: ''
+ CinderDellScSecondarySanLogin:
+ type: string
+ default: 'Admin'
+ CinderDellScSecondarySanPassword:
+ type: string
+ hidden: true
+ CinderDellScSecondaryScApiPort:
+ type: number
+ default: 3033
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
cinder::backend::dellsc_iscsi::dell_sc_api_port: {get_param: CinderDellScApiPort}
cinder::backend::dellsc_iscsi::dell_sc_server_folder: {get_param: CinderDellScServerFolder}
cinder::backend::dellsc_iscsi::dell_sc_volume_folder: {get_param: CinderDellScVolumeFolder}
+ cinder::backend::dellsc_iscsi::secondary_san_ip: {get_param: CinderDellScSecondarySanIp}
+ cinder::backend::dellsc_iscsi::secondary_san_login: {get_param: CinderDellScSecondarySanLogin}
+ cinder::backend::dellsc_iscsi::secondary_san_password: {get_param: CinderDellScSecondarySanPassword}
+ cinder::backend::dellsc_iscsi::secondary_sc_api_port: {get_param: CinderDellScSecondaryScApiPort}
step_config: |
include ::tripleo::profile::base::cinder::volume
-heat_template_version: ocata
+heat_template_version: pike
description: Openstack Cinder Netapp backend
parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
CinderEnableNetappBackend:
type: boolean
default: true
CinderNetappEseriesHostType:
type: string
default: 'linux_dm_mp'
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- type: json
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
parameter_groups:
- label: deprecated
--- /dev/null
+# Copyright (c) 2017 Pure Storage Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+heat_template_version: pike
+
+description: >
+ Openstack Cinder Pure Storage FlashArray backend
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ type: json
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ CinderEnablePureBackend:
+ type: boolean
+ default: true
+ CinderPureBackendName:
+ type: string
+ default: 'tripleo_pure'
+ CinderPureStorageProtocol:
+ type: string
+ default: 'iSCSI'
+ CinderPureSanIp:
+ type: string
+ CinderPureAPIToken:
+ type: string
+ CinderPureUseChap:
+ type: boolean
+ default: false
+ CinderPureMultipathXfer:
+ type: boolean
+ default: true
+
+outputs:
+ role_data:
+ description: Role data for the Cinder Pure Storage FlashArray backend.
+ value:
+ service_name: cinder_backend_pure
+ config_settings:
+ tripleo::profile::base::cinder::volume::cinder_enable_pure_backend: {get_param: CinderEnablePureBackend}
+ cinder::backend::pure::volume_backend_name: {get_param: CinderPureBackendName}
+ cinder::backend::pure::pure_storage_protocol: {get_param: CinderPureStorageProtocol}
+ cinder::backend::pure::san_ip: {get_param: CinderPureSanIp}
+ cinder::backend::pure::pure_api_token: {get_input: PureAPIToken}
+ cinder::backend::pure::use_chap_auth: {get_input: PureUseChap}
+ cinder::backend::pure::use_multipath_for_image_xfer: {get_input: PureMultipathXfer}
+ step_config: |
+ include ::tripleo::profile::base::cinder::volume
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Cinder Dell EMC ScaleIO backend
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Backup service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder base service. Shared by all Cinder services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: cinder_base
config_settings:
cinder::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://cinder:'
- - {get_param: CinderPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/cinder'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: cinder
+ password: {get_param: CinderPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /cinder
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
cinder::debug: {get_param: Debug}
cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
cinder::rabbit_userid: {get_param: RabbitUserName}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure Cinder HPELeftHandISCSIDriver
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Scheduler service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Volume service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Congress service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_settings:
congress_password: {get_param: CongressPassword}
congress::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://congress:'
- - {get_param: CongressPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/congress'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: congress
+ password: {get_param: CongressPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /congress
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
congress::debug: {get_param: Debug}
congress::rpc_backend: rabbit
congress::rabbit_userid: {get_param: RabbitUserName}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configuration details for MongoDB service using composable roles
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
MongoDb service deployment using puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
format: >-
/(?<time>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+\+\d{4})
(?<message>.*)$/
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
resources:
MongoDbBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
mongodb::server::bind_ip: {get_param: [ServiceNetMap, MongodbNetwork]}
+ -
+ if:
+ - internal_tls_enabled
+ -
+ generate_service_certificates: true
+ mongodb::server::ssl: true
+ mongodb::server::ssl_key: '/etc/pki/tls/certs/mongodb.pem'
+ mongodb_certificate_specs:
+ service_pem: '/etc/pki/tls/certs/mongodb.pem'
+ service_certificate: '/etc/pki/tls/certs/mongodb.crt'
+ service_key: '/etc/pki/tls/private/mongodb.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MongodbNetwork]}
+ principal:
+ str_replace:
+ template: "mongodb/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MongodbNetwork]}
+ - {}
step_config: |
include ::tripleo::profile::base::database::mongodb
upgrade_tasks:
- name: Start mongodb service
tags: step4
service: name=mongod state=started
+ metadata_settings:
+ if:
+ - internal_tls_enabled
+ -
+ - service: mongodb
+ network: {get_param: [ServiceNetMap, MongodbNetwork]}
+ type: node
+ - null
-heat_template_version: ocata
+heat_template_version: pike
description: >
Mysql client settings
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
EnableInternalTLS:
type: boolean
default: false
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
outputs:
role_data:
config_settings:
tripleo::profile::base::database::mysql::client::mysql_client_bind_address: {get_param: [ServiceNetMap, MysqlNetwork]}
tripleo::profile::base::database::mysql::client::enable_ssl: {get_param: EnableInternalTLS}
+ tripleo::profile::base::database::mysql::client::ssl_ca: {get_param: InternalTLSCAFile}
step_config: |
include ::tripleo::profile::base::database::mysql::client
-heat_template_version: ocata
+heat_template_version: pike
description: >
MySQL service deployment using puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Redis service configured with Puppet
description: The password for Redis
type: string
hidden: true
+ RedisFDLimit:
+ description: Configure Redis FD limit
+ type: string
+ default: 10240
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
redis::sentinel::redis_host: "%{hiera('bootstrap_nodeid_ip')}"
redis::sentinel::notification_script: '/usr/local/bin/redis-notifications.sh'
redis::sentinel::sentinel_bind: {get_param: [ServiceNetMap, RedisNetwork]}
+ redis::ulimit: {get_param: RedisFDLimit}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Redis service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Ceilometer Collector service, disabled since pike
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the disabled Ceilometer Collector role.
+ value:
+ service_name: ceilometer_collector_disabled
+ upgrade_tasks:
+ - name: Stop and disable ceilometer_collector service on upgrade
+ tags: step1
+ service: name=openstack-ceilometer-collector state=stopped enabled=no
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Ceilometer Expirer service, disabled since pike
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+ CeilometerServiceBase:
+ type: ../ceilometer-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
+outputs:
+ role_data:
+ description: Role data for the disabling Ceilometer Expirer role.
+ value:
+ service_name: ceilometer_expirer_disabled
+ config_settings:
+ map_merge:
+ - get_attr: [CeilometerServiceBase, role_data, config_settings]
+ - ceilometer::expirer::enable_cron: false
+ step_config: |
+ include ::tripleo::profile::base::ceilometer::expirer
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Glance Registry service, disabled since ocata
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
role_data:
description: Role data for the disabled Glance Registry role.
value:
- service_name: glance_registry
+ service_name: glance_registry_disabled
upgrade_tasks:
- name: Stop and disable glance_registry service on upgrade
tags: step1
--- /dev/null
+heat_template_version: pike
+
+description: >
+ Mongodb service, disabled by default since pike
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+
+outputs:
+ role_data:
+ description: Role data for the disabled MongoDB role.
+ value:
+ service_name: mongodb_disabled
+ upgrade_tasks:
+ - name: Stop and disable mongodb service on upgrade
+ tags: step1
+ service: name=mongod state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configures docker on the host
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack EC2-API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
type: string
default: 'regionOne'
description: Keystone region for endpoint
+ Ec2ApiExternalNetwork:
+ type: string
+ default: ''
+ description: Name of the external network, which is used to connect VPCs to
+ Internet and to allocate Elastic IPs
+ NovaDefaultFloatingPool:
+ default: 'public'
+ description: Default pool for floating IP addresses
+ type: string
MonitoringSubscriptionEc2Api:
default: 'overcloud-ec2-api'
type: string
conditions:
nova_workers_zero: {equals : [{get_param: Ec2ApiWorkers}, 0]}
+ external_network_unset: {equals : [{get_param: Ec2ApiExternalNetwork}, '']}
outputs:
role_data:
params:
$NETWORK: {get_param: [ServiceNetMap, Ec2ApiMetadataNetwork]}
ec2api::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://ec2_api:'
- - {get_param: Ec2ApiPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ec2_api'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: ec2_api
+ password: {get_param: Ec2ApiPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ec2_api
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
ec2api::api::keystone_ec2_tokens_url:
list_join:
- ''
- {}
- ec2api::api::ec2api_workers: {get_param: Ec2ApiWorkers}
ec2api::metadata::metadata_workers: {get_param: Ec2ApiWorkers}
+ -
+ if:
+ - external_network_unset
+ - ec2api::api::external_network: {get_param: NovaDefaultFloatingPool}
+ - ec2api::api::external_network: {get_param: Ec2ApiExternalNetwork}
step_config: |
include tripleo::profile::base::nova::ec2api
service_config_settings:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Etcd service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
MonitoringSubscriptionEtcd:
default: 'overcloud-etcd'
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+
+ internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
outputs:
role_data:
service_name: etcd
monitoring_subscription: {get_param: MonitoringSubscriptionEtcd}
config_settings:
- etcd::etcd_name:
- str_replace:
- template:
- "%{hiera('fqdn_$NETWORK')}"
- params:
- $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
- # NOTE: bind IP is found in Heat replacing the network name with the local node IP
- # for the given network; replacement examples (eg. for internal_api):
- # internal_api -> IP
- # internal_api_uri -> [IP]
- # internal_api_subnet - > IP/CIDR
- tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]}
- tripleo::profile::base::etcd::client_port: '2379'
- tripleo::profile::base::etcd::peer_port: '2380'
- etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken}
- etcd::manage_package: false
- tripleo.etcd.firewall_rules:
- '141 etcd':
- dport:
- - 2379
- - 2380
+ map_merge:
+ -
+ etcd::etcd_name:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+ # NOTE: bind IP is found in Heat replacing the network name with the local node IP
+ # for the given network; replacement examples (eg. for internal_api):
+ # internal_api -> IP
+ # internal_api_uri -> [IP]
+ # internal_api_subnet - > IP/CIDR
+ tripleo::profile::base::etcd::bind_ip: {get_param: [ServiceNetMap, EtcdNetwork]}
+ tripleo::profile::base::etcd::client_port: '2379'
+ tripleo::profile::base::etcd::peer_port: '2380'
+ etcd::initial_cluster_token: {get_param: EtcdInitialClusterToken}
+ etcd::manage_package: false
+ tripleo.etcd.firewall_rules:
+ '141 etcd':
+ dport:
+ - 2379
+ - 2380
+ -
+ if:
+ - internal_tls_enabled
+ - generate_service_certificates: true
+ tripleo::profile::base::etcd::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/etcd.crt'
+ service_key: '/etc/pki/tls/private/etcd.key'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+ principal:
+ str_replace:
+ template: "etcd/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, EtcdNetwork]}
+ - {}
step_config: |
include ::tripleo::profile::base::etcd
upgrade_tasks:
- name: Stop etcd service
tags: step2
service: name=etcd state=stopped
+ metadata_settings:
+ if:
+ - internal_tls_enabled
+ -
+ - service: etcd
+ network: {get_param: [ServiceNetMap, EtcdNetwork]}
+ type: node
+ - null
-heat_template_version: ocata
+heat_template_version: pike
description: >
External Swift Proxy endpoint configured with Puppet
parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
ExternalPublicUrl:
description: Public endpoint url for the external swift proxy
type: string
type: string
default: 'regionOne'
description: Keystone region for endpoint
- ServiceNetMap:
- default: {}
- description: Mapping of service_name -> network name. Typically set
- via parameter_defaults in the resource registry. This
- mapping overrides those in ServiceNetMapDefaults.
- type: json
- DefaultPasswords:
- default: {}
- type: json
- EndpointMap:
- default: {}
- description: Mapping of service endpoint -> protocol. Typically set
- via parameter_defaults in the resource registry.
- type: json
resources:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Glance API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
conditions:
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
+ glance_workers_unset: {equals : [{get_param: GlanceWorkers}, '']}
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
map_merge:
- get_attr: [TLSProxyBase, role_data, config_settings]
- glance::api::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://glance:'
- - {get_param: GlancePassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/glance'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: glance
+ password: {get_param: GlancePassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /glance
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
glance::api::bind_port: {get_param: [EndpointMap, GlanceInternal, port]}
- glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::api::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
glance::api::authtoken::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
glance::api::enable_v1_api: false
glance::api::enable_v2_api: true
glance::api::authtoken::password: {get_param: GlancePassword}
glance::api::enable_proxy_headers_parsing: true
glance::api::debug: {get_param: Debug}
- glance::api::workers: {get_param: GlanceWorkers}
glance::policy::policies: {get_param: GlanceApiPolicies}
tripleo.glance_api.firewall_rules:
'112 glance_api':
- 9292
- 13292
glance::api::authtoken::project_name: 'service'
+ glance::keystone::authtoken::user_domain_name: 'Default'
+ glance::keystone::authtoken::project_domain_name: 'Default'
glance::api::pipeline: 'keystone'
glance::api::show_image_direct_url: true
# NOTE: bind IP is found in Heat replacing the network name with the
- {get_param: [ServiceNetMap, GlanceApiNetwork]}
glance_notifier_strategy: {get_param: GlanceNotifierStrategy}
glance_log_file: {get_param: GlanceLogFile}
- glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneInternal, uri] }
+ glance::backend::swift::swift_store_auth_address: {get_param: [EndpointMap, KeystoneV3Internal, uri] }
glance::backend::swift::swift_store_user: service:glance
glance::backend::swift::swift_store_key: {get_param: GlancePassword}
glance::backend::swift::swift_store_create_container_on_put: true
+ glance::backend::swift::swift_store_auth_version: 3
glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
glance_backend: {get_param: GlanceBackend}
tripleo::profile::base::glance::api::glance_nfs_enabled: {get_param: GlanceNfsEnabled}
tripleo::glance::nfs_mount::share: {get_param: GlanceNfsShare}
tripleo::glance::nfs_mount::options: {get_param: GlanceNfsOptions}
+ -
+ if:
+ - glance_workers_unset
+ - {}
+ - glance::api::workers: {get_param: GlanceWorkers}
service_config_settings:
keystone:
glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Gnocchi service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ./apache.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Stop gnocchi_api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
+ yaql:
+ expression: $.data.apache_upgrade + $.data.gnocchi_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ gnocchi_api_upgrade:
+ - name: Stop gnocchi_api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
Gnocchi service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 'mysql'
description: The short name of the Gnocchi indexer backend to use.
type: string
+ MetricProcessingDelay:
+ default: 30
+ description: Delay between processing metrics.
+ type: number
GnocchiPassword:
description: The password for the gnocchi service and db account.
type: string
gnocchi_redis_password: {get_param: RedisPassword}
gnocchi::debug: {get_param: Debug}
gnocchi::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://gnocchi:'
- - {get_param: GnocchiPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/gnocchi'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
- gnocchi::db::sync::extra_opts: '--skip-storage'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: gnocchi
+ password: {get_param: GnocchiPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /gnocchi
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
+ gnocchi::db::sync::extra_opts: ''
+ gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
gnocchi::storage::swift::swift_user: 'service:gnocchi'
gnocchi::storage::swift::swift_auth_version: 3
gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Gnocchi service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Gnocchi service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAProxy deployment with TLS enabled, powered by certmonger
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAProxy deployment with TLS enabled, powered by certmonger
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAproxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
MonitoringSubscriptionHaproxy:
default: 'overcloud-haproxy'
type: string
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
HAProxyInternalTLS:
type: OS::TripleO::Services::HAProxyInternalTLS
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
tripleo::haproxy::haproxy_stats_user: {get_param: HAProxyStatsUser}
tripleo::haproxy::haproxy_stats_password: {get_param: HAProxyStatsPassword}
tripleo::haproxy::redis_password: {get_param: RedisPassword}
+ tripleo::haproxy::ca_bundle: {get_param: InternalTLSCAFile}
tripleo::profile::base::haproxy::certificates_specs:
map_merge:
- get_attr: [HAProxyPublicTLS, role_data, certificates_specs]
when: haproxy_enabled.rc == 0
service: name=haproxy state=started
metadata_settings:
- yaql:
- expression: '[].concat(coalesce($.data.internal, []), coalesce($.data.public, []))'
- data:
- public: {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]}
- internal: {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]}
+ list_concat:
+ - {get_attr: [HAProxyPublicTLS, role_data, metadata_settings]}
+ - {get_attr: [HAProxyInternalTLS, role_data, metadata_settings]}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat CloudFormation API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
HeatBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat CloudWatch API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
HeatBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
HeatBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat base service. Shared for all Heat services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Heat Engine service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
heat::engine::max_nested_stack_depth: 6
heat::engine::max_resources_per_stack: {get_param: HeatMaxResourcesPerStack}
heat::engine::heat_metadata_server_url:
- list_join:
- - ''
- - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
- - '://'
- - {get_param: [EndpointMap, HeatCfnPublic, host]}
- - ':'
- - {get_param: [EndpointMap, HeatCfnPublic, port]}
+ make_url:
+ scheme: {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+ host: {get_param: [EndpointMap, HeatCfnPublic, host]}
+ port: {get_param: [EndpointMap, HeatCfnPublic, port]}
heat::engine::heat_waitcondition_server_url:
- list_join:
- - ''
- - - {get_param: [EndpointMap, HeatCfnPublic, protocol]}
- - '://'
- - {get_param: [EndpointMap, HeatCfnPublic, host]}
- - ':'
- - {get_param: [EndpointMap, HeatCfnPublic, port]}
- - '/v1/waitcondition'
+ make_url:
+ scheme: {get_param: [EndpointMap, HeatCfnPublic, protocol]}
+ host: {get_param: [EndpointMap, HeatCfnPublic, host]}
+ port: {get_param: [EndpointMap, HeatCfnPublic, port]}
+ path: /v1/waitcondition
heat::engine::convergence_engine: {get_param: HeatConvergenceEngine}
tripleo::profile::base::heat::manage_db_purge: {get_param: HeatEnableDBPurge}
heat::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://heat:'
- - {get_param: HeatPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/heat'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: heat
+ password: {get_param: HeatPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /heat
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
heat::keystone_ec2_uri:
list_join:
- ''
-heat_template_version: ocata
+heat_template_version: pike
description: >
Horizon service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
tags: step1
when: httpd_enabled.rc == 0
service: name=httpd state=stopped
+ service_config_settings:
+ haproxy:
+ tripleo.horizon.firewall_rules:
+ '127 horizon':
+ dport:
+ - 80
+ - 443
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ironic API configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ironic services configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: ironic_base
config_settings:
ironic::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://ironic:'
- - {get_param: IronicPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ironic'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: ironic
+ password: {get_param: IronicPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ironic
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
ironic::debug: {get_param: Debug}
ironic::rabbit_userid: {get_param: RabbitUserName}
ironic::rabbit_password: {get_param: RabbitPassword}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Ironic conductor configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
created yet) and should be changed to an actual UUID in
a post-deployment stack update.
type: string
+ IronicDefaultBootOption:
+ default: 'local'
+ description: How to boot the bare metal instances. Set to 'local' (the
+ default) to use local bootloader (requires grub2 for partition
+ images). Set to 'netboot' to make the instances boot from
+ controllers using PXE/iPXE.
+ type: string
IronicDefaultNetworkInterface:
default: 'flat'
description: Network interface implementation to use by default.
default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo']
description: Enabled Ironic drivers
type: comma_delimited_list
+ IronicEnabledHardwareTypes:
+ default: ['ipmi', 'redfish']
+ description: Enabled Ironic hardware types
+ type: comma_delimited_list
+ IronicEnabledManagementInterfaces:
+ default: ['ipmitool', 'redfish']
+ description: Enabled management interface implementations. Each hardware
+ type must have at least one valid implementation enabled.
+ type: comma_delimited_list
+ IronicEnabledPowerInterfaces:
+ default: ['ipmitool', 'redfish']
+ description: Enabled power interface implementations. Each hardware
+ type must have at least one valid implementation enabled.
+ type: comma_delimited_list
IronicIPXEEnabled:
default: true
description: Whether to use iPXE instead of PXE for deployment.
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
ironic::conductor::provisioning_network: {get_param: IronicProvisioningNetwork}
+ ironic::conductor::default_boot_option: {get_param: IronicDefaultBootOption}
ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
+ ironic::conductor::enabled_hardware_types: {get_param: IronicEnabledHardwareTypes}
# We need an endpoint containing a real IP, not a VIP here
ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]}
ironic::conductor::http_url:
# NOTE(dtantsur): UEFI only works with iPXE currently for us
ironic::drivers::pxe::uefi_pxe_config_template: '$pybasedir/drivers/modules/ipxe_config.template'
ironic::drivers::pxe::uefi_pxe_bootfile_name: 'ipxe.efi'
+ ironic::drivers::interfaces::enabled_console_interfaces: ['ipmitool-socat', 'no-console']
+ ironic::drivers::interfaces::enabled_management_interfaces: {get_param: IronicEnabledManagementInterfaces}
ironic::drivers::interfaces::enabled_network_interfaces: ['flat', 'neutron']
+ ironic::drivers::interfaces::enabled_power_interfaces: {get_param: IronicEnabledPowerInterfaces}
ironic::drivers::interfaces::default_network_interface: {get_param: IronicDefaultNetworkInterface}
tripleo.ironic_conductor.firewall_rules:
'134 ironic conductor TFTP':
-heat_template_version: ocata
+heat_template_version: pike
description: >
Keepalived service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
- tripleo.keepalived.firewall_rules:
'106 keepalived vrrp':
proto: vrrp
- -
+ -
if:
- control_iface_empty
- {}
- tripleo::keepalived::control_virtual_interface: {get_param: ControlVirtualInterface}
- -
+ -
if:
- public_iface_empty
- {}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Load kernel modules with kmod and configure kernel options with sysctl.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 0
description: Configures sysctl net.ipv6.{default/all}.disable_ipv6 keys
type: number
+ NeighbourGcThreshold1:
+ default: 1024
+ description: Configures sysctl net.ipv4.neigh.default.gc_thresh1 value.
+ This is the minimum number of entries to keep in the ARP
+ cache. The garbage collector will not run if there are
+ fewer than this number of entries in the cache.
+ type: number
+ NeighbourGcThreshold2:
+ default: 2048
+ description: Configures sysctl net.ipv4.neigh.default.gc_thresh2 value.
+ This is the soft maximum number of entries to keep in the
+ ARP cache. The garbage collector will allow the number of
+ entries to exceed this for 5 seconds before collection will
+ be performed.
+ type: number
+ NeighbourGcThreshold3:
+ default: 4096
+ description: Configures sysctl net.ipv4.neigh.default.gc_thresh3 value.
+ This is the hard maximum number of entries to keep in the
+ ARP cache. The garbage collector will always run if there
+ are more than this number of entries in the cache.
+ type: number
outputs:
role_data:
value: 0
net.ipv4.conf.all.send_redirects:
value: 0
+ net.ipv4.conf.all.arp_accept:
+ value: 1
net.ipv4.conf.default.accept_redirects:
value: 0
net.ipv4.conf.default.secure_redirects:
value: 1
fs.suid_dumpable:
value: 0
+ #avoid neighbour table overflow on large deployments
+ net.ipv4.neigh.default.gc_thresh1:
+ value: {get_param: NeighbourGcThreshold1}
+ net.ipv4.neigh.default.gc_thresh2:
+ value: {get_param: NeighbourGcThreshold2}
+ net.ipv4.neigh.default.gc_thresh3:
+ value: {get_param: NeighbourGcThreshold3}
step_config: |
include ::tripleo::profile::base::kernel
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Keystone service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
conditions:
map_merge:
- get_attr: [ApacheServiceBase, role_data, config_settings]
- keystone::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://keystone:'
- - {get_param: AdminToken}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/keystone'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: keystone
+ password: {get_param: AdminToken}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /keystone
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
keystone::admin_token: {get_param: AdminToken}
keystone::admin_password: {get_param: AdminPassword}
keystone::roles::admin::password: {get_param: AdminPassword}
content: {get_param: KeystoneFernetKey0}
'/etc/keystone/fernet-keys/1':
content: {get_param: KeystoneFernetKey1}
+ keystone::fernet_replace_keys: false
keystone::debug: {get_param: Debug}
keystone::rabbit_userid: {get_param: RabbitUserName}
keystone::rabbit_password: {get_param: RabbitPassword}
horizon::keystone_multidomain_support: true
horizon::keystone_default_domain: 'Default'
- {}
- # Ansible tasks to handle upgrade
- upgrade_tasks:
- - name: Stop keystone service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ upgrade_tasks:
+ yaql:
+ expression: $.data.apache_upgrade + $.data.keystone_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ keystone_upgrade:
+ - name: Stop keystone service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: Fluentd base service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: >
-heat_template_version: ocata
+heat_template_version: pike
description: Fluentd client configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: >
type: ./fluentd-base.yaml
properties:
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
-heat_template_version: ocata
+heat_template_version: pike
description: Fluentd logging configuration
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: >
-heat_template_version: ocata
+heat_template_version: pike
description: >
Manila-api service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Manila Cephfs backend
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: 'ceph'
ManilaCephFSNativeCephFSEnableSnapshots:
type: boolean
- default: true
+ default: false
ManilaCephFSDataPoolName:
default: manila_data
type: string
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Manila generic backend.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Manila netapp backend.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
type: json
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Manila base service. Shared by manila-api/scheduler/share services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
manila::db::database_db_max_retries: -1
manila::db::database_max_retries: -1
manila::sql_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://manila:'
- - {get_param: ManilaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/manila'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: manila
+ password: {get_param: ManilaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /manila
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
service_config_settings:
mysql:
manila::db::mysql::password: {get_param: ManilaPassword}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Manila-scheduler service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Manila-share service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Memcached service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: Collectd client service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
CollectdDefaultPlugins:
default:
- disk
CollectdSecurityLevel:
type: string
description: >
- Security level setting for remote collectd connection.
+ Security level setting for remote collectd connection. If it is
+ set to Sign or Encrypt the CollectdPassword and CollectdUsername
+ parameters need to be set.
default: 'None'
constraints:
- allowed_values:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Mistral API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
default: {}
type: json
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ mistral_workers_zero: {equals : [{get_param: MistralWorkers}, 0]}
resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
MistralBase:
type: ./mistral-base.yaml
properties:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [MistralBase, role_data, config_settings]
- mistral::api::api_workers: {get_param: MistralWorkers}
mistral::api::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]}
+ mistral::wsgi::apache::ssl: {get_param: EnableInternalTLS}
mistral::policy::policies: {get_param: MistralApiPolicies}
tripleo.mistral_api.firewall_rules:
'133 mistral':
dport:
- 8989
- 13989
+ mistral::api::service_name: 'httpd'
+ mistral::wsgi::apache::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]}
+ mistral::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MistralApiNetwork]}
+ - if:
+ - mistral_workers_zero
+ - {}
+ - mistral::wsgi::apache::workers: {get_param: MistralWorkers}
service_config_settings:
get_attr: [MistralBase, role_data, service_config_settings]
step_config: |
grep '\bactive\b'
when: mistral_api_enabled.rc == 0
tags: step0,validation
- - name: Stop mistral_api service
+ - name: check for mistral_api running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q mistral_api_wsgi"
+ register: mistral_api_apache
+ ignore_errors: true
+ - name: Stop mistral_api service (running under httpd)
tags: step1
- service: name=openstack-mistral-api state=stopped
- - name: Install openstack-mistral-api package if it was disabled
- tags: step3
- yum: name=openstack-mistral-api state=latest
- when: mistral_api_enabled.rc != 0
+ service: name=httpd state=stopped
+ when: mistral_api_apache.rc == 0
+ - name: Stop and disable mistral_api service (pre-upgrade not under httpd)
+ tags: step1
+ when: mistral_api_enabled.rc == 0
+ service: name=openstack-mistral-api state=stopped enabled=no
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Mistral base service. Shared for all Mistral services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: mistral_base
config_settings:
mistral::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://mistral:'
- - {get_param: MistralPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/mistral'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: mistral
+ password: {get_param: MistralPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /mistral
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
mistral::rabbit_userid: {get_param: RabbitUserName}
mistral::rabbit_password: {get_param: RabbitPassword}
mistral::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Mistral Engine service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Mistral API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: Sensu base service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
MonitoringRabbitHost:
description: RabbitMQ host Sensu has to connect to.
type: string
-heat_template_version: ocata
+heat_template_version: pike
description: Sensu client configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: >
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Analytics Database service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Analytics service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Base parameters for all Contrail Services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Config service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Control service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Database service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail Heat plugin adds Contrail specific heat resources enabling heat
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Opencontrail plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Provision Contrail services after deployment
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail TSN Service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute OpenContrail plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Contrail WebUI service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Server configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
NeutronWorkers:
default: ''
description: |
- Sets the number of API and RPC workers for the Neutron service. The
- default value results in the configuration being left unset and a
- system-dependent default will be chosen (usually the number of
- processors). Please note that this can result in a large number of
- processes and memory consumption on systems with a large core count. On
- such systems it is recommended that a non-default value be selected that
- matches the load requirements.
+ Sets the number of API and RPC workers for the Neutron service.
+ The default value results in the configuration being left unset
+ and a system-dependent default will be chosen (usually the number
+ of processors). Please note that this can result in a large number
+ of processes and memory consumption on systems with a large core
+ count. On such systems it is recommended that a non-default value
+ be selected that matches the load requirements.
type: string
NeutronPassword:
description: The password for the neutron service and db account, used by neutron agents.
conditions:
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
+ neutron_workers_unset: {equals : [{get_param: NeutronWorkers}, '']}
resources:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
NeutronBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [NeutronBase, role_data, config_settings]
- get_attr: [TLSProxyBase, role_data, config_settings]
- neutron::server::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://neutron:'
- - {get_param: NeutronPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ovs_neutron'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: neutron
+ password: {get_param: NeutronPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ovs_neutron
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
neutron::policy::policies: {get_param: NeutronApiPolicies}
neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- neutron::server::api_workers: {get_param: NeutronWorkers}
- neutron::server::rpc_workers: {get_param: NeutronWorkers}
neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
neutron::server::enable_proxy_headers_parsing: true
neutron::keystone::authtoken::password: {get_param: NeutronPassword}
- 9696
- 13696
neutron::server::router_distributed: {get_param: NeutronEnableDVR}
+ neutron::server::enable_dvr: {get_param: NeutronEnableDVR}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
- 'localhost'
- {get_param: [ServiceNetMap, NeutronApiNetwork]}
tripleo::profile::base::neutron::server::l3_ha_override: {get_param: NeutronL3HA}
+ -
+ if:
+ - neutron_workers_unset
+ - {}
+ - neutron::server::api_workers: {get_param: NeutronWorkers}
+ neutron::server::rpc_workers: {get_param: NeutronWorkers}
step_config: |
include tripleo::profile::base::neutron::server
service_config_settings:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron base service. Shared for all Neutron agents.
type: number
default: 0
description: The number of neutron dhcp agents to schedule per network
+ NeutronDnsDomain:
+ type: string
+ default: openstacklocal
+ description: Domain to use for building the hostnames.
NeutronCorePlugin:
default: 'ml2'
description: |
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
neutron::debug: {get_param: Debug}
neutron::purge_config: {get_param: EnableConfigPurge}
neutron::allow_overlapping_ips: true
+ neutron::dns_domain: {get_param: NeutronDnsDomain}
neutron::rabbit_heartbeat_timeout_threshold: 60
neutron::host: '%{::fqdn}'
neutron::db::database_db_max_retries: -1
-heat_template_version: ocata
+heat_template_version: pike
description: >
BGPVPN API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Installs bigswitch agent and enables the services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute Midonet plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute Nuage plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute OVN agent
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Compute Plumgrid plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron DHCP agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ L2 Gateway agent configured with Puppet
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ L2gwAgentOvsdbHosts:
+ default: ''
+ description: L2 gateway agent OVSDB server list.
+ type: comma_delimited_list
+ L2gwAgentEnableManager:
+ default: false
+ description: Connection can be initiated by the ovsdb server.
+ type: boolean
+ L2gwAgentManagerTableListeningPort:
+ default: 6632
+ description: port number for L2 gateway agent, so that it can listen
+ type: number
+ L2gwAgentPeriodicInterval:
+ default: 20
+ description: The L2 gateway agent checks connection state with the OVSDB
+ servers. The interval is number of seconds between attempts.
+ type: number
+ L2gwAgentMaxConnectionRetries:
+ default: 10
+ description: The L2 gateway agent retries to connect to the OVSDB server
+ type: number
+ L2gwAgentSocketTimeout:
+ default: 30
+ description: socket timeout
+ type: number
+ MonitoringSubscriptionNeutronL2gwAgent:
+ default: 'overcloud-neutron-l2gw-agent'
+ type: string
+ NeutronL2gwAgentLoggingSource:
+ type: json
+ default:
+ tag: openstack.neutron.agent.l2gw
+ path: /var/log/neutron/l2gw-agent.log
+
+conditions:
+ internal_manager_enabled: {equals: [{get_param: L2gwAgentEnableManager}, True]}
+
+outputs:
+ role_data:
+ description: Role data for the L2 Gateway role.
+ value:
+ service_name: neutron_l2gw_agent
+ monitoring_subscription: {get_param: MonitoringSubscriptionNeutronL2gwAgent}
+ logging_source: {get_param: NeutronL2gwAgentLoggingSource}
+ logging_groups:
+ - neutron
+ config_settings:
+ map_merge:
+ - neutron::agents::l2gw::ovsdb_hosts: {get_param: L2gwAgentOvsdbHosts}
+ neutron::agents::l2gw::enable_manager: {get_param: L2gwAgentEnableManager}
+ neutron::agents::l2gw::manager_table_listening_port: {get_param: L2gwAgentManagerTableListeningPort}
+ neutron::agents::l2gw::periodic_interval: {get_param: L2gwAgentPeriodicInterval}
+ neutron::agents::l2gw::max_connection_retries: {get_param: L2gwAgentMaxConnectionRetries}
+ neutron::agents::l2gw::socket_timeout: {get_param: L2gwAgentSocketTimeout}
+ -
+ if:
+ - internal_manager_enabled
+ - tripleo.neutron_l2gw_agent.firewall_rules:
+ '142 neutron l2gw agent input':
+ proto: 'tcp'
+ dport: {get_param: L2gwAgentManagerTableListeningPort}
+ - null
+
+ step_config: |
+ include tripleo::profile::base::neutron::agents::l2gw
+ upgrade_tasks:
+ - name: Check if neutron_l2gw_agent is deployed
+ command: systemctl is-enabled neutron-l2gw-agent
+ tags: common
+ ignore_errors: True
+ register: neutron_l2gw_agent_enabled
+ - name: "PreUpgrade step0,validation: Check service neutron-l2gw-agent is running"
+ shell: /usr/bin/systemctl show 'neutron-l2gw-agent' --property ActiveState | grep '\bactive\b'
+ when: neutron_l2gw_agent_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop neutron_l2gw_agent service
+ tags: step1
+ when: neutron_l2gw_agent_enabled.rc == 0
+ service: name=neutron-l2gw-agent state=stopped
-heat_template_version: ocata
+heat_template_version: pike
description: >
L2 Gateway service plugin configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron L3 agent for DVR enabled compute nodes
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron L3 agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: ocata
+
+description: >
+ OpenStack Neutron Linuxbridge agent configured with Puppet.
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ PhysicalInterfaceMapping:
+ description: List of <physical_network>:<physical_interface> tuples
+ mapping physical network names to agent's node-specific
+ physical network interfaces. Defaults to empty list.
+ type: comma_delimited_list
+ default: ''
+ NeutronLinuxbridgeFirewallDriver:
+ default: ''
+ description: Configure the classname of the firewall driver to use for
+ implementing security groups. Possible values depend on
+ system configuration. The default value of an empty string
+ will result in a default supported configuration.
+ type: string
+ NeutronEnableL2Pop:
+ type: string
+ description: Enable/disable the L2 population feature in the Neutron agents.
+ default: 'False'
+ NeutronTunnelTypes:
+ default: 'vxlan'
+ description: The tunnel types for the Neutron tenant network.
+ type: comma_delimited_list
+
+conditions:
+ no_firewall_driver: {equals : [{get_param: NeutronLinuxbridgeFirewallDriver}, '']}
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron Linuxbridge agent service.
+ value:
+ service_name: neutron_linuxbridge_agent
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - neutron::agents::ml2::linuxbridge::physical_interface_mappings: {get_param: PhysicalInterfaceMapping}
+ neutron::agents::ml2::linuxbridge::l2_population: {get_param: NeutronEnableL2Pop}
+ neutron::agents::ml2::linuxbridge::tunnel_types: {get_param: NeutronTunnelTypes}
+ neutron::agents::ml2::linuxbridge::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
+ neutron::agents::dhcp::interface_driver: 'neutron.agent.linux.interface.BridgeInterfaceDriver'
+ neutron::agents::dhcp::dhcp_driver: 'neutron.agent.linux.dhcp.Dnsmasq'
+ -
+ if:
+ - no_firewall_driver
+ - {}
+ - neutron::agents::ml2::linuxbridge::firewall_driver: {get_param: NeutronLinuxbridgeFirewallDriver}
+ step_config: |
+ include ::tripleo::profile::base::neutron::linuxbridge
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Metadata agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
tag: openstack.neutron.agent.metadata
path: /var/log/neutron/metadata-agent.log
+conditions:
+ neutron_workers_unset: {equals : [{get_param: NeutronWorkers}, '']}
+
resources:
NeutronBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
map_merge:
- get_attr: [NeutronBase, role_data, config_settings]
- neutron::agents::metadata::shared_secret: {get_param: NeutronMetadataProxySharedSecret}
- neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
neutron::agents::metadata::auth_tenant: 'service'
neutron::agents::metadata::metadata_ip: "%{hiera('nova_metadata_vip')}"
+ -
+ if:
+ - neutron_workers_unset
+ - {}
+ - neutron::agents::metadata::metadata_workers: {get_param: NeutronWorkers}
step_config: |
include tripleo::profile::base::neutron::metadata
upgrade_tasks:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Midonet plugin and services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron OVS agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
OpenVswitchUpgrade:
type: ./openvswitch-upgrade.yaml
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron OVS DPDK configured with Puppet for Compute Role
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
HostCpusList:
+ default: "0"
description: List of cores to be used for host process
type: string
constraints:
- - allowed_pattern: "'[0-9,-]+'"
+ - allowed_pattern: "[0-9,-]+"
NeutronDpdkCoreList:
+ default: ""
description: List of cores to be used for DPDK Poll Mode Driver
type: string
constraints:
- - allowed_pattern: "'[0-9,-]+'"
+ - allowed_pattern: "[0-9,-]*"
NeutronDpdkMemoryChannels:
+ default: ""
description: Number of memory channels to be used for DPDK
type: string
constraints:
- - allowed_pattern: "[0-9]+"
+ - allowed_pattern: "[0-9]*"
NeutronDpdkSocketMemory:
default: ""
description: Memory allocated for each socket
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
OpenVswitchUpgrade:
type: ./openvswitch-upgrade.yaml
+ # Merging role-specific parameters (RoleParameters) with the default parameters.
+ # RoleParameters will have the precedence over the default parameters.
+ RoleParametersValue:
+ type: OS::Heat::Value
+ properties:
+ type: json
+ value:
+ map_replace:
+ - map_replace:
+ - neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
+ neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
+ vswitch::dpdk::driver_type: NeutronDpdkDriverType
+ vswitch::dpdk::host_core_list: HostCpusList
+ vswitch::dpdk::pmd_core_list: NeutronDpdkCoreList
+ vswitch::dpdk::memory_channels: NeutronDpdkMemoryChannels
+ vswitch::dpdk::socket_mem: NeutronDpdkSocketMemory
+ - values: {get_param: [RoleParameters]}
+ - values:
+ NeutronDatapathType: {get_param: NeutronDatapathType}
+ NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
+ NeutronDpdkDriverType: {get_param: NeutronDpdkDriverType}
+ HostCpusList: {get_param: HostCpusList}
+ NeutronDpdkCoreList: {get_param: NeutronDpdkCoreList}
+ NeutronDpdkMemoryChannels: {get_param: NeutronDpdkMemoryChannels}
+ NeutronDpdkSocketMemory: {get_param: NeutronDpdkSocketMemory}
+
outputs:
role_data:
description: Role data for the Neutron OVS DPDK Agent service.
- keys:
tripleo.neutron_ovs_agent.firewall_rules: tripleo.neutron_ovs_dpdk_agent.firewall_rules
- neutron::agents::ml2::ovs::enable_dpdk: true
- neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
- neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
- vswitch::dpdk::host_core_list: {get_param: HostCpusList}
- vswitch::dpdk::pmd_core_list: {get_param: NeutronDpdkCoreList}
- vswitch::dpdk::memory_channels: {get_param: NeutronDpdkMemoryChannels}
- vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory}
- vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType}
+ - get_attr: [RoleParametersValue, value]
step_config: {get_attr: [NeutronOvsAgent, role_data, step_config]}
upgrade_tasks:
get_attr: [OpenVswitchUpgrade, role_data, upgrade_tasks]
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure hieradata for Fujitsu C-Fabric plugin configuration
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: Configure hieradata for Fujitsu fossw plugin configuration
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron ML2/OpenDaylight plugin configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron ML2/OVN plugin configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron ML2 Plugin configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Neutron NSX
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ DefaultOverlayTz:
+ description: UUID of the default NSX overlay transport zone.
+ type: string
+ DefaultTier0Router:
+ description: UUID of the default tier0 router that will be used for connecting to
+ tier1 logical routers and configuring external networks.
+ type: string
+ NsxApiManagers:
+ description: IP address of one or more NSX managers separated by commas.
+ type: string
+ NsxApiUser:
+ description: User name of NSX Manager.
+ type: string
+ NsxApiPassword:
+ description: Password of NSX Manager.
+ type: string
+ NativeDhcpMetadata:
+ default: True
+ description: This is the flag to indicate if using native DHCP/Metadata or not.
+ type: string
+ DhcpProfileUuid:
+ description: This is the UUID of the NSX DHCP Profile that will be used to enable
+ native DHCP service.
+ type: string
+ MetadataProxyUuid:
+ description: This is the UUID of the NSX Metadata Proxy that will be used to enable
+ native metadata service.
+ type: string
+
+outputs:
+ role_data:
+ description: Role data for the Neutron NSX plugin
+ value:
+ service_name: neutron_plugin_nsx
+ config_settings:
+ neutron::plugins::nsx_v3::default_overlay_tz: {get_param: DefaultOverlayTz}
+ neutron::plugins::nsx_v3::default_tier0_router: {get_param: DefaultTier0Router}
+ neutron::plugins::nsx_v3::nsx_api_managers: {get_param: NsxApiManagers}
+ neutron::plugins::nsx_v3::nsx_api_user: {get_param: NsxApiUser}
+ neutron::plugins::nsx_v3::nsx_api_password: {get_param: NsxApiPassword}
+ neutron::plugins::nsx_v3::native_dhcp_metadata: {get_param: NativeDhcpMetadata}
+ neutron::plugins::nsx_v3::dhcp_profile_uuid: {get_param: DhcpProfileUuid}
+ neutron::plugins::nsx_v3::metadata_proxy_uuid: {get_param: MetadataProxyUuid}
+
+ step_config: |
+ include tripleo::profile::base::neutron::plugins::nsx_v3
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Nuage plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron Plumgrid plugin
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: neutron_plugin_plumgrid
config_settings:
neutron::plugins::plumgrid::connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://neutron:'
- - {get_param: NeutronPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/ovs_neutron'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: neutron
+ password: {get_param: NeutronPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /ovs_neutron
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
neutron::plugins::plumgrid::controller_priv_host: {get_param: [EndpointMap, KeystoneInternal, host]}
neutron::plugins::plumgrid::admin_password: {get_param: AdminPassword}
neutron::plugins::plumgrid::metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Neutron SR-IOV nic agent configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
--- /dev/null
+heat_template_version: pike
+
+description: >
+ OpenStack Neutron ML2/VPP agent configured with Puppet
+
+parameters:
+ ServiceNetMap:
+ default: {}
+ description: >
+ Mapping of service_name -> network name. Typically set via
+ parameter_defaults in the resource registry. This mapping overrides those
+ in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+ NeutronVPPAgentPhysnets:
+ description: >
+ List of <physical_network>:<VPP Interface>
+ Example: "physnet1:GigabitEthernet2/2/0,physnet2:GigabitEthernet2/3/0"
+ type: comma_delimited_list
+ default: ""
+
+resources:
+
+ NeutronBase:
+ type: ./neutron-base.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+
+outputs:
+ role_data:
+ description: Role data for the Neutron ML2/VPP agent service.
+ value:
+ service_name: neutron_vpp_agent
+ config_settings:
+ map_merge:
+ - get_attr: [NeutronBase, role_data, config_settings]
+ - tripleo::profile::base::neutron::agents::vpp::physnet_mapping: {get_param: NeutronVPPAgentPhysnets}
+ step_config: |
+ include ::tripleo::profile::base::neutron::agents::vpp
\ No newline at end of file
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
# ServiceNetMap: {get_param: ServiceNetMap}
# DefaultPasswords: {get_param: DefaultPasswords}
# EndpointMap: {get_param: EndpointMap}
+ # RoleName: {get_param: RoleName}
+ # RoleParameters: {get_param: RoleParameters}
# EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- name: Run puppet apply to set tranport_url in nova.conf
tags: step5
when: is_bootstrap_node
- command: puppet apply --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
register: puppet_apply_nova_api_upgrade
failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
changed_when: puppet_apply_nova_api_upgrade.rc == 2
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova base service. Shared for all Nova services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
nova::placement::os_region_name: {get_param: KeystoneRegion}
nova::placement::os_interface: {get_param: NovaPlacementAPIInterface}
nova::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: nova
+ password: {get_param: NovaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /nova
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
nova::cell0_database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova_cell0'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: nova
+ password: {get_param: NovaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /nova_cell0
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
nova::api_database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova_api:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova_api'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: nova_api
+ password: {get_param: NovaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /nova_api
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
nova::placement_database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://nova_placement:'
- - {get_param: NovaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/nova_placement'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: nova_placement
+ password: {get_param: NovaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /nova_placement
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
nova::debug: {get_param: Debug}
nova::purge_config: {get_param: EnableConfigPurge}
nova::network::neutron::neutron_project_name: 'service'
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Compute service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: >
Reserved RAM for host processes.
type: number
- default: 2048
+ default: 4096
constraints:
- range: { min: 512 }
MonitoringSubscriptionNovaCompute:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
+ tripleo::profile::base::nova::migration_ssh_localaddrs:
+ - "%{hiera('cold_migration_ssh_inbound_addr')}"
+ - "%{hiera('live_migration_ssh_inbound_addr')}"
+ live_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ cold_migration_ssh_inbound_addr: {get_param: [ServiceNetMap, NovaColdMigrationNetwork]}
tripleo::profile::base::nova::nova_compute_enabled: true
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
template: "dest=/etc/nova/nova.conf section=upgrade_levels option=compute value=LEVEL"
params:
LEVEL: {get_param: UpgradeLevelNovaCompute}
+ - name: install openstack-nova-migration
+ tags: step3
+ yum: name=openstack-nova-migration state=latest
- name: Start nova-compute service
tags: step6
service: name=openstack-nova-compute state=started
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Conductor service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Consoleauth service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Compute service configured with Puppet and using Ironic
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Libvirt service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
MonitoringSubscriptionNovaLibvirt:
default: 'overcloud-nova-libvirt'
type: string
+ EnableInternalTLS:
+ type: boolean
+ default: false
+ UseTLSTransportForLiveMigration:
+ type: boolean
+ default: true
+ description: If set to true and if EnableInternalTLS is enabled, it will
+ set the libvirt URI's transport to tls and configure the
+ relevant keys for libvirt.
+ InternalTLSCAFile:
+ default: '/etc/ipa/ca.crt'
+ type: string
+ description: Specifies the default CA cert to use if TLS is used for
+ services in the internal network.
+ LibvirtCACert:
+ type: string
+ default: ''
+ description: This specifies the CA certificate to use for TLS in libvirt.
+ This file will be symlinked to the default CA path in libvirt,
+ which is /etc/pki/CA/cacert.pem. Note that due to limitations
+ GNU TLS, which is the TLS backend for libvirt, the file must
+ be less than 65K (so we can't use the system's CA bundle).
+ This parameter should be used if the default (which comes from
+ the InternalTLSCAFile parameter) is not desired. The current
+ default reflects TripleO's default CA, which is FreeIPA.
+ It will only be used if internal TLS is enabled.
+
+conditions:
+
+ use_tls_for_live_migration:
+ and:
+ - equals:
+ - {get_param: EnableInternalTLS}
+ - true
+ - equals:
+ - {get_param: UseTLSTransportForLiveMigration}
+ - true
+
+ libvirt_specific_ca_unset:
+ equals:
+ - {get_param: LibvirtCACert}
+ - ''
resources:
NovaBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- '49152-49215'
- '5900-5999'
+ -
+ if:
+ - use_tls_for_live_migration
+ -
+ generate_service_certificates: true
+ tripleo::profile::base::nova::libvirt_tls: true
+ nova::migration::libvirt::live_migration_inbound_addr:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ tripleo::certmonger::ca::libvirt::origin_ca_pem:
+ if:
+ - libvirt_specific_ca_unset
+ - get_param: InternalTLSCAFile
+ - get_param: LibvirtCACert
+ tripleo::certmonger::libvirt_dirs::certificate_dir: '/etc/pki/libvirt'
+ tripleo::certmonger::libvirt_dirs::key_dir: '/etc/pki/libvirt/private'
+ libvirt_certificates_specs:
+ libvirt-server-cert:
+ service_certificate: '/etc/pki/libvirt/servercert.pem'
+ service_key: '/etc/pki/libvirt/private/serverkey.pem'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ principal:
+ str_replace:
+ template: "libvirt/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ libvirt-client-cert:
+ service_certificate: '/etc/pki/libvirt/clientcert.pem'
+ service_key: '/etc/pki/libvirt/private/clientkey.pem'
+ hostname:
+ str_replace:
+ template: "%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ principal:
+ str_replace:
+ template: "libvirt/%{hiera('fqdn_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ - {}
step_config: |
include tripleo::profile::base::nova::libvirt
+ metadata_settings:
+ if:
+ - use_tls_for_live_migration
+ -
+ - service: libvirt
+ network: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
+ type: node
+ - null
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Placement API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Scheduler service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Nova Vncproxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia API service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- octavia::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
octavia::policy::policies: {get_param: OctaviaApiPolicies}
octavia::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://octavia:'
- - {get_param: OctaviaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/octavia'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: octavia
+ password: {get_param: OctaviaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /octavia
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
octavia::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
octavia::keystone::authtoken::project_name: 'service'
octavia::keystone::authtoken::password: {get_param: OctaviaPassword}
- 9876
- 13876
octavia::api::host: {get_param: [ServiceNetMap, OctaviaApiNetwork]}
- neutron::server::service_providers: ['LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default']
step_config: |
include tripleo::profile::base::octavia::api
service_config_settings:
octavia::db::mysql::allowed_hosts:
- '%'
- "%{hiera('mysql_bind_host')}"
+ neutron_api:
+ neutron::server::service_providers: ['LOADBALANCERV2:Octavia:neutron_lbaas.drivers.octavia.driver.OctaviaDriver:default']
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia base service. Shared for all Octavia services
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia Health Manager service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia Housekeeping service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Octavia Worker service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenDaylight SDN Controller.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenDaylight OVS Configuration.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
resources:
OpenVswitchUpgrade:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openvswitch package special handling for upgrade.
-heat_template_version: ocata
+heat_template_version: pike
description: >
OVN databases configured with puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Pacemaker service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Ceph RBD mirror service.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Backup service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
CinderBackupBackend: {get_param: CinderBackupBackend}
CinderBackupRbdPoolName: {get_param: CinderBackupRbdPoolName}
CephClientUserName: {get_param: CephClientUserName}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Cinder Volume service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
cinder::host: hostgroup
step_config:
include ::tripleo::profile::pacemaker::cinder::volume
+ upgrade_tasks:
+ - name: Stop cinder_volume service (pacemaker)
+ tags: step1
+ pacemaker_resource:
+ resource: openstack-cinder-volume
+ state: disable
+ wait_for_resource: true
+ - name: Sync cinder DB
+ tags: step5
+ command: cinder-manage db sync
+ - name: Start cinder_volume service (pacemaker)
+ tags: step5
+ pacemaker_resource:
+ resource: openstack-cinder-volume
+ state: enable
-heat_template_version: ocata
+heat_template_version: pike
description: >
MySQL with Pacemaker service deployment using puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Redis service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- get_attr: [RedisBase, role_data, config_settings]
- redis::service_manage: false
redis::notify_service: false
+ redis::managed_by_cluster_manager: true
step_config: |
include ::tripleo::profile::pacemaker::database::redis
-heat_template_version: ocata
+heat_template_version: pike
description: >
HAproxy service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
The manila-share service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
RabbitMQ service with Pacemaker configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
- rabbitmq::service_manage: false
step_config: |
include ::tripleo::profile::pacemaker::rabbitmq
- upgrade_tasks:
- - name: get bootstrap nodeid
- tags: common
- command: hiera bootstrap_nodeid
- register: bootstrap_node
- - name: set is_bootstrap_node fact
- tags: common
- set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
- - name: get rabbitmq policy
- tags: common
- shell: pcs resource show rabbitmq | grep -q -E "Attributes:.*\"ha-mode\":\"all\""
- register: rabbit_ha_mode
- when: is_bootstrap_node
- ignore_errors: true
- - name: set migrate_rabbit_ha_mode fact
- tags: common
- set_fact: migrate_rabbit_ha_mode={{rabbit_ha_mode.rc == 0}}
- when: is_bootstrap_node
- - name: Fixup for rabbitmq ha-queues LP#1668600
- tags: step0,pre-upgrade
- shell: |
- nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
- nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
- if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
- echo "ERROR: The nr. of HA queues during the rabbit upgrade is out of range: $nr_queues"
- exit 1
- fi
- pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
- when: is_bootstrap_node and migrate_rabbit_ha_mode
metadata_settings:
get_attr: [RabbitMQServiceBase, role_data, metadata_settings]
-heat_template_version: ocata
+heat_template_version: pike
description: >
Pacemaker remote service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
- OpenStack Panko API service configured with Puppet
+ OpenStack Panko API service configured with Puppet.
+ Note, This service is deprecated in Pike release and will
+ be disabled in future releases.
parameters:
ServiceNetMap:
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
ApacheServiceBase:
type: ./apache.yaml
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
metadata_settings:
get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: Check if httpd is deployed
- command: systemctl is-enabled httpd
- tags: common
- ignore_errors: True
- register: httpd_enabled
- - name: "PreUpgrade step0,validation: Check if httpd is running"
- shell: >
- /usr/bin/systemctl show 'httpd' --property ActiveState |
- grep '\bactive\b'
- when: httpd_enabled.rc == 0
- tags: step0,validation
- - name: Stop panko-api service (running under httpd)
- tags: step1
- service: name=httpd state=stopped
- when: httpd_enabled.rc == 0
- - name: Install openstack-panko-api package if it was not installed
- tags: step3
- yum: name=openstack-panko-api state=latest
+ yaql:
+ expression: $.data.apache_upgrade + $.data.panko_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ panko_api_upgrade:
+ - name: Check if httpd is deployed
+ command: systemctl is-enabled httpd
+ tags: common
+ ignore_errors: True
+ register: httpd_enabled
+ - name: "PreUpgrade step0,validation: Check if httpd is running"
+ shell: >
+ /usr/bin/systemctl show 'httpd' --property ActiveState |
+ grep '\bactive\b'
+ when: httpd_enabled.rc == 0
+ tags: step0,validation
+ - name: Stop panko-api service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: httpd_enabled.rc == 0
+ - name: Install openstack-panko-api package if it was not installed
+ tags: step3
+ yum: name=openstack-panko-api state=latest
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Panko service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: panko_base
config_settings:
panko::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://panko:'
- - {get_param: PankoPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/panko'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: panko
+ password: {get_param: PankoPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /panko
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
panko::debug: {get_param: Debug}
panko::auth::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix] }
panko::keystone::authtoken::project_name: 'service'
-heat_template_version: ocata
+heat_template_version: pike
description: >
Qpid dispatch router service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
RabbitMQ service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
hidden: true
RabbitHAQueues:
description:
- The number of HA queues to be configured in rabbit. The default is 0 which will
- be automatically overridden to CEIL(N/2) where N is the number of nodes running
- rabbitmq.
- default: 0
+ The number of HA queues to be configured in rabbit. The default is -1 which
+ translates to "ha-mode all". The special value 0 will be automatically
+ overridden to CEIL(N/2) where N is the number of nodes running rabbitmq.
+ default: -1
type: number
MonitoringSubscriptionRabbitmq:
default: 'overcloud-rabbitmq'
--- /dev/null
+---
+upgrade:
+ - When a service is deployed in WSGI with Apache, make sure mode_ssl
+ package is deployed during the upgrade process, it's now required
+ by default so Apache can start properly.
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Sahara API service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Sahara base service. Shared for all Sahara services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
service_name: sahara_base
config_settings:
sahara::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://sahara:'
- - {get_param: SaharaPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/sahara'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: sahara
+ password: {get_param: SaharaPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /sahara
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
sahara::rabbit_password: {get_param: RabbitPassword}
sahara::rabbit_user: {get_param: RabbitUserName}
sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Sahara Engine service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
outputs:
role_data:
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure securetty values
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Utility stack to convert an array of services into a set of combined
description: Mapping of service -> default password. Used to help
pass top level passwords managed by Heat into services.
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ description: Role Specific parameters to be provided to service
+ default: {}
+ type: json
resources:
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
LoggingConfiguration:
type: OS::TripleO::LoggingConfiguration
-heat_template_version: ocata
+heat_template_version: pike
description: >
SNMP client configured with Puppet, to facilitate Ceilometer Hardware
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: The user password for SNMPd with readonly rights running on all Overcloud nodes
type: string
hidden: true
+ SnmpdBindHost:
+ description: An array of bind host addresses on which SNMP daemon will listen.
+ type: comma_delimited_list
+ default: ['udp:161','udp6:[::1]:161']
+ SnmpdOptions:
+ description: A string containing the commandline options passed to snmpd
+ type: string
+ default: '-LS0-5d'
outputs:
role_data:
config_settings:
tripleo::profile::base::snmp::snmpd_user: {get_param: SnmpdReadonlyUserName}
tripleo::profile::base::snmp::snmpd_password: {get_param: SnmpdReadonlyUserPassword}
+ snmp::agentaddress: {get_param: SnmpdBindHost}
+ snmp::snmpd_options: {get_param: SnmpdOptions}
tripleo.snmp.firewall_rules:
'127 snmp':
dport: 161
-heat_template_version: ocata
+heat_template_version: pike
description: >
Configure sshd_config
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
default: ''
description: Configures Banner text in sshd_config
type: string
+ MessageOfTheDay:
+ default: ''
+ description: Configures /etc/motd text
+ type: string
+ SshServerOptions:
+ default:
+ HostKey:
+ - '/etc/ssh/ssh_host_rsa_key'
+ - '/etc/ssh/ssh_host_ecdsa_key'
+ - '/etc/ssh/ssh_host_ed25519_key'
+ SyslogFacility: 'AUTHPRIV'
+ AuthorizedKeysFile: '.ssh/authorized_keys'
+ PasswordAuthentication: 'no'
+ ChallengeResponseAuthentication: 'no'
+ GSSAPIAuthentication: 'yes'
+ GSSAPICleanupCredentials: 'no'
+ UsePAM: 'yes'
+ X11Forwarding: 'yes'
+ UsePrivilegeSeparation: 'sandbox'
+ AcceptEnv:
+ - 'LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES'
+ - 'LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT'
+ - 'LC_IDENTIFICATION LC_ALL LANGUAGE'
+ - 'XMODIFIERS'
+ Subsystem: 'sftp /usr/libexec/openssh/sftp-server'
+ description: Mapping of sshd_config values
+ type: json
outputs:
role_data:
service_name: sshd
config_settings:
tripleo::profile::base::sshd::bannertext: {get_param: BannerText}
+ tripleo::profile::base::sshd::motd: {get_param: MessageOfTheDay}
+ tripleo::profile::base::sshd::options: {get_param: SshServerOptions}
step_config: |
include ::tripleo::profile::base::sshd
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Proxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Proxy service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
description: Set to False to disable the swift proxy ceilometer pipeline.
default: True
type: boolean
+ SwiftCeilometerIgnoreProjects:
+ default: ['services']
+ description: Comma-seperated list of project names to ignore.
+ type: comma_delimited_list
RabbitClientPort:
default: 5672
description: Set rabbit subscriber port, change this if using SSL
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
TLSProxyBase:
type: OS::TripleO::Services::TLSProxyBase
swift::proxy::workers: {get_param: SwiftWorkers}
swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+ swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ swift::proxy::ceilometer::password: {get_param: SwiftPassword}
+ swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
swift::proxy::ceilometer::nonblocking_notify: true
tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Ringbuilder
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Swift Storage service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
conditions:
swift_mount_check:
-heat_template_version: ocata
+heat_template_version: pike
description: >
OpenStack Tacker service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
config_settings:
tacker_password: {get_param: TackerPassword}
tacker::db::database_connection:
- list_join:
- - ''
- - - {get_param: [EndpointMap, MysqlInternal, protocol]}
- - '://tacker:'
- - {get_param: TackerPassword}
- - '@'
- - {get_param: [EndpointMap, MysqlInternal, host]}
- - '/tacker'
- - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+ make_url:
+ scheme: {get_param: [EndpointMap, MysqlInternal, protocol]}
+ username: tacker
+ password: {get_param: TackerPassword}
+ host: {get_param: [EndpointMap, MysqlInternal, host]}
+ path: /tacker
+ query:
+ read_default_file: /etc/my.cnf.d/tripleo.cnf
+ read_default_group: tripleo
tacker::debug: {get_param: Debug}
tacker::rpc_backend: rabbit
-heat_template_version: ocata
+heat_template_version: pike
description: >
NTP service deployment using puppet, this YAML file
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Composable Timezone service
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
TripleO Firewall settings
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
TripleO Package installation settings
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Vpp service configured with Puppet
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
-heat_template_version: ocata
+heat_template_version: pike
description: >
Openstack Zaqar service. Shared for all Heat services.
DefaultPasswords:
default: {}
type: json
+ RoleName:
+ default: ''
+ description: Role name on which the service is applied
+ type: string
+ RoleParameters:
+ default: {}
+ description: Parameters specific to the role
+ type: json
Debug:
default: ''
description: Set to True to enable debugging on all services.
e.g. { zaqar-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
default: {}
type: json
+ ZaqarWorkers:
+ type: string
+ description: Set the number of workers for zaqar::wsgi::apache
+ default: '%{::os_workers}'
+ EnableInternalTLS:
+ type: boolean
+ default: false
+
+conditions:
+ zaqar_workers_zero: {equals : [{get_param: ZaqarWorkers}, 0]}
+
+resources:
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
outputs:
role_data:
value:
service_name: zaqar
config_settings:
- zaqar::policy::policies: {get_param: ZaqarPolicies}
- zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
- zaqar::keystone::authtoken::project_name: 'service'
- zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- zaqar::debug: {get_param: Debug}
- zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
- zaqar::transport::wsgi::bind: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
- zaqar::message_pipeline: 'zaqar.notification.notifier'
- zaqar::unreliable: true
+ map_merge:
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - zaqar::policy::policies: {get_param: ZaqarPolicies}
+ zaqar::keystone::authtoken::password: {get_param: ZaqarPassword}
+ zaqar::keystone::authtoken::project_name: 'service'
+ zaqar::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ zaqar::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ zaqar::debug: {get_param: Debug}
+ zaqar::server::service_name: 'httpd'
+ zaqar::transport::websocket::bind: {get_param: [EndpointMap, ZaqarInternal, host]}
+ zaqar::wsgi::apache::ssl: false
+ zaqar::wsgi::apache::bind_host: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
+ zaqar::message_pipeline: 'zaqar.notification.notifier'
+ zaqar::unreliable: true
+ zaqar::wsgi::apache::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, ZaqarApiNetwork]}
+ -
+ if:
+ - zaqar_workers_zero
+ - {}
+ - zaqar::wsgi::apache::workers: {get_param: ZaqarWorkers}
service_config_settings:
keystone:
zaqar::keystone::auth::password: {get_param: ZaqarPassword}
step_config: |
include ::tripleo::profile::base::zaqar
upgrade_tasks:
- - name: Check if zaqar is deployed
- command: systemctl is-enabled openstack-zaqar
- tags: common
- ignore_errors: True
- register: zaqar_enabled
- - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
- shell: >
- /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
- grep '\bactive\b'
- when: zaqar_enabled.rc == 0
- tags: step0,validation
- - name: Stop zaqar service
- tags: step1
- when: zaqar_enabled.rc == 0
- service: name=openstack-zaqar state=stopped
- - name: Install openstack-zaqar package if it was disabled
- tags: step3
- yum: name=openstack-zaqar state=latest
- when: zaqar_enabled.rc != 0
+ yaql:
+ expression: $.data.apache_upgrade + $.data.zaqar_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ zaqar_upgrade:
+ - name: Check if zaqar is deployed
+ command: systemctl is-enabled openstack-zaqar
+ tags: common
+ ignore_errors: True
+ register: zaqar_enabled
+ - name: "PreUpgrade step0,validation: Check if openstack-zaqar is running"
+ shell: >
+ /usr/bin/systemctl show 'openstack-zaqar' --property ActiveState |
+ grep '\bactive\b'
+ when: zaqar_enabled.rc == 0
+ tags: step0,validation
+ - name: Check for zaqar running under apache (post upgrade)
+ tags: step1
+ shell: "httpd -t -D DUMP_VHOSTS | grep -q zaqar_wsgi"
+ register: zaqar_apache
+ ignore_errors: true
+ - name: Stop zaqar service (running under httpd)
+ tags: step1
+ service: name=httpd state=stopped
+ when: zaqar_apache.rc == 0
+ - name: Stop and disable zaqar service (pre-upgrade not under httpd)
+ tags: step1
+ when: zaqar_enabled.rc == 0
+ service: name=openstack-zaqar state=stopped enabled=no
+ - name: Install openstack-zaqar package if it was disabled
+ tags: step3
+ yum: name=openstack-zaqar state=latest
+ when: zaqar_enabled.rc != 0
-heat_template_version: ocata
+heat_template_version: pike
description: 'Upgrade for via ansible by applying a step related tag'
parameters:
--- /dev/null
+---
+features:
+ - Adds the InternalTLSCAFile parameter, which defines which CA file should be
+ used by the internal services to verify that the peer's certificate is
+ trusted. This is applicable if internal TLS is enabled. Currently, it
+ defaults to using the CA file for FreeIPA, which is the default CA.
--- /dev/null
+---
+upgrade:
+ - Mongodb is no longer used by default, so now one has to enable it
+ explicitly if there's a need for using it.
+other:
+ - Mongodb is not used by any service we enable by default, so it has been
+ removed from the default services. It has subsequently been added to the
+ services that use it (zaqar and ceilometer-collector).
--- /dev/null
+---
+features:
+ - |
+ If TLS in the internal network is enabled, libvirt's transport defaults to
+ using TLS. This can be changed by setting the ``UseTLSTransportForLiveMigration``
+ parameter, which is ``true`` by default.
--- /dev/null
+---
+fixes:
+ - Previously only the VIPs and their associated hostnames were present
+ in the HostsEntry output, due to the hosts_entries output on the
+ hosts-config.yaml nested stack being empty. It was referencing an
+ invalid attribute. See
+ https://bugs.launchpad.net/tripleo/+bug/1683517
+
+
--- /dev/null
+---
+features:
+ - Add new cadf.yaml environment, that will configure
+ Keystone to emit CADF notifications. This standard
+ provides auditing capabilities for compliance with
+ security, and is intented to be used for deploying
+ TripleO with hardened security.
--- /dev/null
+---
+features:
+ - Add support for L2 Gateway Neutron agent
--- /dev/null
+---
+features:
+ - Added Pure Storage FlashArray iSCSI and FC backend support for cinder
--- /dev/null
+---
+fixes:
+ - |
+ During a deployment on lower spec systems, the "db sync" can take longer
+ than five minutes. value of DatabaseSyncTimeout has change from 300
+ to 900 at the environment file "low-memory-usage.yaml".
--- /dev/null
+---
+upgrade:
+ - |
+ We are not changing the rabbitmq ha-mode policy during upgrades any longer.
+ The policy chosen at deploy time will remain the same but can be changed
+ manually.
+fixes:
+ - |
+ Due to https://bugs.launchpad.net/tripleo/+bug/1686337 we switch the
+ default of rabbitmq back ha-mode "all". This is to make the installation
+ more robust in the face of network issues.
--- /dev/null
+---
+features:
+ - |
+ Per default, don't log a message in syslog for each incoming SNMP query.
+ So set the default log level to '-LS0-5d'. Allow the operator to customize
+ the log level via a parameter.
--- /dev/null
+---
+upgrade:
+ - With expirer deprecated and disabled by default, there is an upgrade
+ impact here. If you had expirer enabled in ocata and you upgrade to
+ pike the expirer will not be enabled anymore. If you wish to use
+ expirer, ensure you include the ceilometer-expirer.yaml
+ to your upgrade deploy command. Also note that with collector
+ disabled, there is no need for expirer to be running.
+deprecations:
+ - Deprecate and turn off expirer service as collector. Without collector
+ and standard storage, expirer has no use.
--- /dev/null
+---
+upgrade:
+ - With collector deprecated and disabled by default, there is an upgrade
+ impact here. If you had collector enabled in ocata and you upgrade to
+ pike the collector will not be enabled anymore. If you wish to use
+ collector, ensure you include the ceilometer-collector.yaml
+ to your upgrade deploy command. We recommend switching to using the
+ new pipeline approach with publisher instead.
+deprecations:
+ - Deprecate and disable ceilometer collector service by default. Instead
+ use the publisher directly in the pipeline to push data where appropriate.
+ This can be manually enabled by passing the environment file to deploy
+ command which is included in environment dir as ceilometer-collector.yaml.
+ By default, the pipeline publisher pushes data automatically to gnocchi.
--- /dev/null
+---
+deprecations:
+ - Panko API service is deprecated in Pike release. Note that this service
+ will remain enabled by default as there is no replacement yet. This will
+ be disabled in future releases.
--- /dev/null
+---
+upgrade:
+ - |
+ Disabled cephfs snapshot support (ManilaCephFSNativeCephFSEnableSnapshots
+ parameter) in manila by default.
--- /dev/null
+---
+upgrade:
+ - |
+ Disable default vhost for apache. It is required for a hybrid deployments
+ when WSGI based services running both at host and in containers, without
+ conflicting default ports.
--- /dev/null
+---
+other:
+ - |
+ All nodes now enable ``arp_accept`` sysctl setting to help with honoring
+ gratuitous ARP packets in their ARP tables. While sources of gratuitous ARP
+ packets are diverse, this comes especially useful for Neutron floating IP
+ addresses that roam between devices, and for which Neutron L3 agent sends
+ gratuitous ARP packets to update all network nodes about IP address new
+ locations.
--- /dev/null
+---
+fixes:
+ - Expose metric_processing_delay to tweak gnocchi performance.
--- /dev/null
+---
+features:
+ - Deploy Glance with Keystone v3 endpoints and make
+ sure it doesn't rely on Keystone v2 anymore.
--- /dev/null
+---
+other:
+ - Increased the default of NovaReservedHostMemory for
+ Compute nodes to 4096 MB.
--- /dev/null
+---
+features:
+ - |
+ New configuration ``IronicDefaultBootOption`` allows to change the default
+ boot option to use for bare metal instances in the overcloud.
+upgrade:
+ - |
+ The default boot option for bare metal instances in overcloud was changed
+ to "local". This was already the default for whole-disk images, but for
+ partition images it requires ``grub2`` to be installed on them.
+ Use the new ``IronicDefaultBootOption`` configuration to override, or
+ set ``boot_option`` capability on nodes and flavors.
--- /dev/null
+---
+features:
+ - |
+ Configuring enabled Ironic hardware types is now possible via new
+ ``IronicEnabledHardwareTypes`` parameter. See this spec for details:
+ http://specs.openstack.org/openstack/ironic-specs/specs/approved/driver-composition-reform.html.
+ - |
+ Bare metal serial console support via ``socat`` utility is enabled for
+ Ironic hardware types supporting it (currently only ``ipmi``).
--- /dev/null
+---
+upgrade:
+ - |
+ Neutron API controller no longer advertises ``dvr`` extension if the
+ cloud is not configured for DVR. This is achieved by setting ``enable_dvr``
+ to match ``NeutronEnableDVR`` setting.
--- /dev/null
+---
+features:
+ - Move Mistral API to use mod_wsgi under Apache.
+upgrade:
+ - Mistral API systemd service will be stopped and
+ disabled.
+
--- /dev/null
+---
+features:
+ - Add support for NSX Neutron plugin
--- /dev/null
+---
+fixes:
+ - |
+ Octavia API and Neutron Server can now be deployed on separated nodes.
+ See https://bugs.launchpad.net/tripleo/+bug/1687026
--- /dev/null
+---
+features:
+ - The server resource type, OS::TripleO::Server can now be
+ mapped per role instead of globally. This allows users to
+ mix baremetal (OS::Nova::Server) and
+ deployed-server (OS::Heat::DeployedServer) server resources
+ in the same deployment. See
+ https://blueprints.launchpad.net/tripleo/+spec/pluggable-server-type-per-role
--- /dev/null
+---
+features:
+ - |
+ Support for Redfish hardware is enabled by default for overcloud Ironic
+ via the ``redfish`` hardware type.
+ - |
+ Support changing enabled management and power interfaces for hardware types
+ in overcloud Ironic.
--- /dev/null
+---
+features:
+ - Added the ability to blacklist servers by name from being
+ associated with any Heat triggered SoftwareDeployment
+ resources. The servers are specified in the new
+ DeploymentServerBlacklist parameter.
--- /dev/null
+---
+features:
+ - Role specific informations are added to the service template to enable
+ role specific decisions on the service.
--- /dev/null
+---
+features:
+ - |
+ Adding a new parameter to SNMP profile, SnmpdBindHost
+ so users can change the binding addresses on SNMP daemon.
+ The parameter is an array and takes the default value that
+ were previously hardcoded in puppet-tripleo.
--- /dev/null
+---
+features:
+ - |
+ Added ability to manage MOTD Banner
+ Enabled SSHD composible service by default. Puppet-ssh manages the sshd config.
--- /dev/null
+---
+fixes:
+ - The stack name can now be overridden in the get-occ-config.sh script
+ for deployed-server's by setting the $STACK_NAME variable in the
+ environment.
--- /dev/null
+---
+fixes:
+ - This commit merges both [Pre|Post]Puppet and [Pre|Post]Config
+ resources, giving an agnostic name for the configuration
+ steps. The [Pre|Post]Puppet resource is removed and should not
+ be used anymore.
--- /dev/null
+---
+fixes:
+ - |
+ Removed the hard coding of osd_pool_default_min_size. Setting this value
+ to 1 can result in data loss in operating production deployments. Not
+ setting this value (or setting it to 0) will allow ceph to calculate the
+ value based on the current setting of osd_pool_default_size. If the
+ replication count is 3, then the calculated min_size is 2. If the
+ replication count is 1, then the calcualted min_size is 1. For a POC
+ deployments using a single OSD, set osd_pool_default_size = 1. See
+ description at http://docs.ceph.com/docs/master/rados/configuration/pool-pg-config-ref/
+ Added CephPoolDefaultSize to set default replication size. Default value is 3.
--- /dev/null
+---
+fixes:
+ - Update the default metric processing delay to 30. This will help reduce
+ the metric backlog and wont load up the storage backend.
--- /dev/null
+---
+upgrade:
+ - |
+ The new StackUpdateType parameter is now set to UPGRADE
+ when a major version upgrade is in progress. This enables application
+ configuration via puppet to distinuish a major version upgrade from a
+ normal stack update (e.g for minor updates or reconfiguration) by
+ inspecting the stack_update_type hiera value. In future other values may be added to
+ flag e.g minor updates vs reconfiguration, but for now only UPGRADE is considered.
--- /dev/null
+---
+issues:
+ - Modify ``NeutronVhostuserSocketDir`` to a seprate directory in the DPDK
+ environment file. A different set of permission is required for creating
+ vhost sockets when the vhost type is dpdkvhostuserclient (which is default
+ from ocata).
--- /dev/null
+---
+features:
+ - Adds support for networking-vpp ML2 mechanism driver and agent.
--- /dev/null
+---
+features:
+ - Run the Zaqar WSGI service over httpd in Puppet.
# built documents.
#
# The full version, including alpha/beta/rc tags.
-release = '7.0.0.0b1'
+release = '7.0.0.0b2'
# The short X.Y version.
version = '7.0.0'
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=2.0.0 # Apache-2.0
+pbr!=2.1.0,>=2.0.0 # Apache-2.0
Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
six>=1.9.0 # MIT
- OS::TripleO::Services::NeutronApi
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::NeutronL2gwAgent
+ - OS::TripleO::Services::NeutronLinuxbridgeAgent
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- - OS::TripleO::Services::CeilometerCollector
+ # FIXME: This service was disabled in Pike and this entry should be removed
+ # in Queens.
- OS::TripleO::Services::CeilometerExpirer
- OS::TripleO::Services::CeilometerAgentCentral
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::OctaviaHousekeeping
- OS::TripleO::Services::OctaviaWorker
- OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::Docker
- name: Compute
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::ComputeNeutronCorePlugin
- OS::TripleO::Services::ComputeNeutronOvsAgent
+ - OS::TripleO::Services::NeutronLinuxbridgeAgent
- OS::TripleO::Services::ComputeCeilometerAgent
- OS::TripleO::Services::ComputeNeutronL3Agent
- OS::TripleO::Services::ComputeNeutronMetadataAgent
- OS::TripleO::Services::AuditD
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Vpp
+ - OS::TripleO::Services::NeutronVppAgent
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::Docker
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::NeutronDhcpAgent
- - OS::TripleO::Services::AodhApi
- - OS::TripleO::Services::AodhEvaluator
- - OS::TripleO::Services::AodhNotifier
- - OS::TripleO::Services::AodhListener
- - OS::TripleO::Services::GnocchiApi
- - OS::TripleO::Services::GnocchiMetricd
- - OS::TripleO::Services::GnocchiStatsd
- - OS::TripleO::Services::PankoApi
+ - OS::TripleO::Services::UndercloudAodhApi
+ - OS::TripleO::Services::UndercloudAodhEvaluator
+ - OS::TripleO::Services::UndercloudAodhNotifier
+ - OS::TripleO::Services::UndercloudAodhListener
+ - OS::TripleO::Services::UndercloudGnocchiApi
+ - OS::TripleO::Services::UndercloudGnocchiMetricd
+ - OS::TripleO::Services::UndercloudGnocchiStatsd
+ - OS::TripleO::Services::UndercloudPankoApi
+ - OS::TripleO::Services::UndercloudCeilometerAgentCentral
+ - OS::TripleO::Services::UndercloudCeilometerAgentNotification
PyYAML>=3.10.0 # MIT
Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
six>=1.9.0 # MIT
-sphinx>=1.5.1 # BSD
+sphinx!=1.6.1,>=1.5.1 # BSD
oslosphinx>=4.7.0 # Apache-2.0
reno>=1.8.0 # Apache-2.0
parser.add_argument('-r', '--roles-data', metavar='ROLES_DATA',
help="""relative path to the roles_data.yaml file.""",
default='roles_data.yaml')
+ parser.add_argument('-n', '--network-data', metavar='NETWORK_DATA',
+ help="""relative path to the network_data.yaml file.""",
+ default='network_data.yaml')
parser.add_argument('--safe',
action='store_true',
help="""Enable safe mode (do not overwrite files).""",
out_f.write(r_template)
-def process_templates(template_path, role_data_path, output_dir, overwrite):
+def process_templates(template_path, role_data_path, output_dir,
+ network_data_path, overwrite):
with open(role_data_path) as role_data_file:
role_data = yaml.safe_load(role_data_file)
+ with open(network_data_path) as network_data_file:
+ network_data = yaml.safe_load(network_data_file)
+
j2_excludes_path = os.path.join(template_path, 'j2_excludes.yaml')
with open(j2_excludes_path) as role_data_file:
j2_excludes = yaml.safe_load(role_data_file)
print("jinja2 rendering normal template %s" % f)
with open(file_path) as j2_template:
template_data = j2_template.read()
- j2_data = {'roles': role_data}
+ j2_data = {'roles': role_data,
+ 'networks': network_data}
out_f = os.path.basename(f).replace('.j2.yaml', '.yaml')
out_f_path = os.path.join(out_dir, out_f)
_j2_render_to_file(template_data, j2_data, out_f_path,
opts = parse_opts(sys.argv)
role_data_path = os.path.join(opts.base_path, opts.roles_data)
+network_data_path = os.path.join(opts.base_path, opts.network_data)
-process_templates(opts.base_path, role_data_path, opts.output_dir, (not opts.safe))
+process_templates(opts.base_path, role_data_path, opts.output_dir,
+ network_data_path, (not opts.safe))
print("Error couldn't find run-os-net-config.sh relative to filename")
exit_usage()
- for r in six.iteritems(tpl.get('resources', {})):
+ for r in (tpl.get('resources', {})).items():
if (r[1].get('type') == 'OS::Heat::StructuredConfig' and
r[1].get('properties', {}).get('group') == 'os-apply-config' and
r[1].get('properties', {}).get('config', {}).get('os_net_config')):
import yaml
-required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords']
+required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
+ 'RoleName', 'RoleParameters']
envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
[testenv]
usedevelop = True
+install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
echo "SUCCESS"
}
+# Verify at least one time source is available.
+function ntp_check() {
+ NTP_SERVERS=$(hiera ntp::servers nil |tr -d '[],"')
+ if [[ "$NTP_SERVERS" != "nil" ]];then
+ echo -n "Testing NTP..."
+ NTP_SUCCESS=0
+ for NTP_SERVER in $NTP_SERVERS; do
+ set +e
+ NTPDATE_OUT=$(ntpdate -qud $NTP_SERVER 2>&1)
+ NTPDATE_EXIT=$?
+ set -e
+ if [[ "$NTPDATE_EXIT" == "0" ]];then
+ NTP_SUCCESS=1
+ break
+ else
+ NTPDATE_OUT_FULL="$NTPDATE_OUT_FULL $NTPDATE_OUT"
+ fi
+ done
+ if [[ "$NTP_SUCCESS" == "0" ]];then
+ echo "FAILURE"
+ echo "$NTPDATE_OUT_FULL"
+ exit 1
+ fi
+ echo "SUCCESS"
+ fi
+}
+
ping_controller_ips "$ping_test_ips"
ping_default_gateways
if [[ $validate_fqdn == "True" ]];then
fqdn_check
fi
+if [[ $validate_ntp == "True" ]];then
+ ntp_check
+fi