--- /dev/null
+This directory contains environments that are used in tripleo-ci. They may change from
+release to release or within a release, and should not be relied upon in a production
+environment. The top-level ``environments`` directory in tripleo-heat-templates
+contains the production-ready environment files.
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+ # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/
OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml
- OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
- OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml
- OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml
- OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml
+ OS::TripleO::Services::PankoApi: ../../docker/services/panko-api.yaml
+ OS::TripleO::Services::Collectd: ../../docker/services/collectd.yaml
+ OS::TripleO::Services::Tacker: ../../docker/services/tacker.yaml
+ OS::TripleO::Services::Congress: ../../docker/services/congress-api.yaml
+ # TODO fluentd is being containerized: https://review.openstack.org/#/c/467072/
OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
- OS::TripleO::Services::SensuClient: ../../puppet/services/monitoring/sensu-client.yaml
+ OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
# NOTE: This is needed because of upgrades from Ocata to Pike. We
# deploy the initial environment with Ocata templates, and
# overcloud-resource-registry.yaml there doesn't have this Docker
shell: python /var/lib/docker-puppet/docker-puppet.py
environment:
NET_HOST: 'true'
+ DEBUG: '{{docker_puppet_debug}}'
when: step == "1"
changed_when: false
check_mode: no
import multiprocessing
log = logging.getLogger()
-log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
-ch.setLevel(logging.DEBUG)
+if os.environ.get('DEBUG', False):
+ log.setLevel(logging.DEBUG)
+ ch.setLevel(logging.DEBUG)
+else:
+ log.setLevel(logging.INFO)
+ ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
if not manifest or not config_image:
continue
- log.debug('config_volume %s' % config_volume)
- log.debug('puppet_tags %s' % puppet_tags)
- log.debug('manifest %s' % manifest)
- log.debug('config_image %s' % config_image)
- log.debug('volumes %s' % volumes)
+ log.info('config_volume %s' % config_volume)
+ log.info('puppet_tags %s' % puppet_tags)
+ log.info('manifest %s' % manifest)
+ log.info('config_image %s' % config_image)
+ log.info('volumes %s' % volumes)
# We key off of config volume for all configs.
if config_volume in configs:
# Append puppet tags and manifest.
# Write a checksum of the config-data dir, this is used as a
# salt to trigger container restart when the config changes
- tar cf - /var/lib/config-data/${NAME} | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
+ tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
fi
""")
subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
cmd_stdout, cmd_stderr = subproc.communicate()
- if cmd_stdout:
- log.debug(cmd_stdout)
- if cmd_stderr:
- log.debug(cmd_stderr)
if subproc.returncode != 0:
log.error('Failed running docker-puppet.py for %s' % config_volume)
+ if cmd_stdout:
+ log.error(cmd_stdout)
+ if cmd_stderr:
+ log.error(cmd_stderr)
else:
+ if cmd_stdout:
+ log.debug(cmd_stdout)
+ if cmd_stderr:
+ log.debug(cmd_stderr)
# only delete successful runs, for debugging
rm_container('docker-puppet-%s' % config_volume)
return subproc.returncode
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
+ DockerPuppetDebug:
+ type: string
+ default: ''
+ description: Set to True to enable debug logging with docker-puppet.py
ctlplane_service_ips:
type: json
- name: role_name
- name: update_identifier
- name: bootstrap_server_id
+ - name: docker_puppet_debug
config: {get_file: deploy-steps-playbook.yaml}
{%- for step in range(1, deploy_steps_max) %}
role_name: {{role.name}}
update_identifier: {get_param: DeployIdentifier}
bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+ docker_puppet_debug: {get_param: DockerPuppetDebug}
{% endfor %}
# END CONFIG STEPS
action='store_true',
help="""Start docker container interactively (-ti).""",
default=False)
+ parser.add_argument('-d', '--detach',
+ action='store_true',
+ help="""Start container detached.""",
+ default=False)
opts = parser.parse_args(argv[1:])
return opts
+
def docker_arg_map(key, value):
value = str(value).encode('ascii', 'ignore')
if len(value) == 0:
'net': "--net=%s" % value,
'pid': "--pid=%s" % value,
'privileged': "--privileged=%s" % value.lower(),
- #'restart': "--restart=%s" % "false",
'user': "--user=%s" % value,
'volumes': "--volume=%s" % value,
'volumes_from': "--volumes-from=%s" % value,
}.get(key, None)
+
def run_docker_container(opts, container_name):
container_found = False
if opts.user:
continue
arg = docker_arg_map(container_data,
- json_data[step][container][container_data])
+ json_data[step][container][container_data])
if arg:
cmd.append(arg)
if opts.user:
cmd.append('--user')
cmd.append(opts.user)
+ if opts.detach:
+ cmd.append('--detach')
if opts.interactive:
cmd.append('-ti')
# May as well remove it when we're done too
if not container_found:
print("Container '%s' not found!" % container_name)
+
def list_docker_containers(opts):
- print opts
with open(opts.config) as f:
json_data = json.load(f)
for step in (json_data or []):
if step is None:
continue
- print step
for container in (json_data[step] or []):
print('\tcontainer: %s' % container)
for container_data in (json_data[step][container] or []):
- #print('\t\tcontainer_data: %s' % container_data)
if container_data == "start_order":
print('\t\tstart_order: %s' % json_data[step][container][container_data])
run_docker_container(opts, opts.container)
else:
list_docker_containers(opts)
-
- /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_5:
- ceilometer_gnocchi_upgrade:
- start_order: 1
- image: *ceilometer_agent_ipmi_image
- net: host
- detach: false
- privileged: false
- volumes:
- list_concat:
- - {get_attr: [ContainersCommon, volumes]}
- -
- - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
- - /var/log/containers/ceilometer:/var/log/ceilometer
- command: "/usr/bin/bootstrap_host_exec ceilometer su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'"
upgrade_tasks:
- name: Stop and disable ceilometer agent ipmi service
tags: step2
- /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- step_5:
- ceilometer_gnocchi_upgrade:
- start_order: 1
- image: *ceilometer_agent_notification_image
- net: host
- detach: false
- privileged: false
- volumes:
- list_concat:
- - {get_attr: [ContainersCommon, volumes]}
- -
- - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
- - /var/log/containers/ceilometer:/var/log/ceilometer
- command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
upgrade_tasks:
- name: Stop and disable ceilometer agent notification service
tags: step2
default: 'centos-binary-ec2-api:latest'
type: string
DockerEc2ApiConfigImage:
- description: The container image to use for the ec2api config_volume
+ description: The container image to use for the ec2_api config_volume
default: 'centos-binary-ec2-api:latest'
type: string
EndpointMap:
service_config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, service_config_settings]}
# BEGIN DOCKER SETTINGS
puppet_config:
- config_volume: ec2api
+ config_volume: ec2_api
puppet_tags: ec2api_api_paste_ini,ec2api_config
step_config: *step_config
config_image:
keystone_db_sync:
image: *keystone_image
net: host
+ user: root
privileged: false
detach: false
volumes: &keystone_volumes
keystone_bootstrap:
start_order: 3
action: exec
+ user: root
command:
[ 'keystone', '/usr/bin/bootstrap_host_exec', 'keystone' ,'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
docker_puppet_tasks:
- [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron_api.json:
- command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
+ command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-server
permissions:
- path: /var/log/neutron
owner: neutron:neutron
- [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron_dhcp.json:
- command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
+ command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-dhcp-agent
permissions:
- path: /var/log/neutron
owner: neutron:neutron
- [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron_l3_agent.json:
- command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
+ command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-l3-agent
permissions:
- path: /var/log/neutron
owner: neutron:neutron
- [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
kolla_config:
/var/lib/kolla/config_files/neutron_ovs_agent.json:
- command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+ command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-dir /etc/neutron/conf.d/common
permissions:
- path: /var/log/neutron
owner: neutron:neutron
map_merge:
- get_attr: [NovaApiBase, role_data, config_settings]
- apache::default_vhost: false
+ nova_wsgi_enabled: false
+ nova::api::service_name: '%{::nova::params::api_service_name}'
+ nova::wsgi::apache_api::ssl: false
step_config: &step_config
list_join:
- "\n"
net: host
privileged: false
detach: false
+ user: root
volumes: &sahara_volumes
list_concat:
- {get_attr: [ContainersCommon, volumes]}
# BEGIN DOCKER SETTINGS
puppet_config:
config_volume: swift
- puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
+ puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config,rsync::server
step_config: *step_config
config_image:
list_join:
- path: /var/log/swift
owner: swift:swift
recurse: true
+ /var/lib/kolla/config_files/swift_xinetd_rsync.json:
+ command: /usr/sbin/xinetd -dontfork
docker_config:
step_3:
# The puppet config sets this up but we don't have a way to mount the named
- /dev:/dev
- /var/log/containers/swift:/var/log/swift
environment: *kolla_env
+ swift_xinetd_rsync:
+ image: *swift_object_image
+ net: host
+ user: root
+ restart: always
+ privileged: true
+ volumes:
+ list_concat:
+ - {get_attr: [ContainersCommon, volumes]}
+ -
+ - /var/lib/kolla/config_files/swift_xinetd_rsync.json:/var/lib/kolla/config_files/config.json:ro
+ - /var/lib/config-data/swift/etc:/etc
+ - /run:/run
+ - /srv/node:/srv/node
+ - /dev:/dev
+ - /var/log/containers/swift:/var/log/swift
+ environment: *kolla_env
+
host_prep_tasks:
- name: create persistent directories
file:
--- /dev/null
+# Environment file to deploy the HA services via docker
+# Add it *after* -e docker.yaml:
+# ...deploy..-e docker.yaml -e docker-ha.yaml
+resource_registry:
+ # Pacemaker runs on the host
+ OS::TripleO::Tasks::ControllerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+ OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+ OS::TripleO::Services::Pacemaker: ../puppet/services/pacemaker.yaml
+ OS::TripleO::Services::PacemakerRemote: ../puppet/services/pacemaker_remote.yaml
+
+ # Services that are disabled for HA deployments with pacemaker
+ OS::TripleO::Services::Keepalived: OS::Heat::None
+
+ # HA Containers managed by pacemaker
+ OS::TripleO::Services::CinderVolume: ../docker/services/pacemaker/cinder-volume.yaml
+ OS::TripleO::Services::CinderBackup: ../docker/services/pacemaker/cinder-backup.yaml
+ OS::TripleO::Services::Clustercheck: ../docker/services/pacemaker/clustercheck.yaml
+ OS::TripleO::Services::HAproxy: ../docker/services/pacemaker/haproxy.yaml
+ OS::TripleO::Services::MySQL: ../docker/services/pacemaker/database/mysql.yaml
+ OS::TripleO::Services::RabbitMQ: ../docker/services/pacemaker/rabbitmq.yaml
+ OS::TripleO::Services::Redis: ../docker/services/pacemaker/database/redis.yaml
UpgradeLevelNovaCompute: auto
UpgradeInitCommonCommand: |
#!/bin/bash
+ set -eu
# Ocata to Pike, put any needed host-level workarounds here
+ yum install -y ansible-pacemaker
# a Cisco Neutron plugin.
resource_registry:
OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
- OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
parameter_defaults:
OS::TripleO::Services::Docker: OS::Heat::None
OS::TripleO::Services::CertmongerUser: OS::Heat::None
OS::TripleO::Services::Iscsid: OS::Heat::None
+ OS::TripleO::Services::Clustercheck: OS::Heat::None
parameter_defaults:
EnablePackageInstall: false
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
{% endfor %}
+ # This is a different format to *Servers, as it creates a map of lists
+ # whereas *Servers creates a map of maps with keys of the nested resource names
+ ServerIdMap:
+ type: OS::Heat::Value
+ properties:
+ value:
+ server_ids:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}, nova_server_resource]}
+{% endfor %}
+ bootstrap_server_id:
+ yaql:
+ expression: coalesce($.data, []).first(null)
+ data: {get_attr: [{{primary_role_name}}, nova_server_resource]}
+
# This resource just creates a dict out of the DeploymentServerBlacklist,
# which is a list. The dict is used in the role templates to set a condition
# on whether to create the deployment resources. We can't use the list
map_merge:
- {get_attr: [VipMap, net_ip_map]}
- redis: {get_attr: [RedisVirtualIP, ip_address]}
+ ServerIdData:
+ description: Mapping of each role to a list of nova server IDs and the bootstrap ID
+ value: {get_attr: [ServerIdMap, value]}
RoleParameters:
type: json
description: Role Specific Parameters
+ default: {}
DeploymentSwiftDataMap:
type: json
description: |
RoleParameters:
type: json
description: Role Specific Parameters
+ default: {}
DeploymentSwiftDataMap:
type: json
description: |
RoleParameters:
type: json
description: Role Specific Parameters
+ default: {}
DeploymentSwiftDataMap:
type: json
description: |
RoleParameters:
type: json
description: Role Specific Parameters
+ default: {}
DeploymentSwiftDataMap:
type: json
description: |
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
-
+ ctlplane_service_ips:
+ type: json
UpdateIdentifier:
type: string
description: >
servers: {get_param: servers}
stack_name: {get_param: stack_name}
role_data: {get_param: role_data}
+ ctlplane_service_ips: {get_param: ctlplane_service_ips}
outputs:
# Output the config for each role, just use Step1 as the config should be
servers:
type: json
description: Mapping of Role name e.g Controller to a list of servers
-
+ stack_name:
+ type: string
+ description: Name of the topmost stack
role_data:
type: json
description: Mapping of Role name e.g Controller to the per-role data
-
DeployIdentifier:
default: ''
type: string
description: >
Setting this to a unique value will re-run any deployment tasks which
perform configuration on a Heat stack-update.
+ ctlplane_service_ips:
+ type: json
resources:
# Note the include here is the same as post.j2.yaml but the data used at
RoleParameters:
type: json
description: Role Specific Parameters
+ default: {}
DeploymentSwiftDataMap:
type: json
description: |
GlanceRbdPoolName:
default: images
type: string
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
GnocchiRbdPoolName:
default: metrics
type: string
- NovaEnableRbdBackend:
- default: false
- description: Whether to enable or not the Rbd backend for Nova
- type: boolean
NovaRbdPoolName:
default: vms
type: string
parameters:
- ControllerEnableCephStorage
-conditions:
- glance_multiple_locations:
- and:
- - equals:
- - get_param: GlanceBackend
- - rbd
- - equals:
- - get_param: NovaEnableRbdBackend
- - true
-
outputs:
role_data:
description: Role data for the Ceph base service.
- keys:
CEPH_CLIENT_KEY:
list_join: ['.', ['client', {get_param: CephClientUserName}]]
- service_config_settings:
- glance_api:
- glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
GlanceRbdPoolName:
default: images
type: string
- GlanceBackend:
- default: swift
- description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
- type: string
- constraints:
- - allowed_values: ['swift', 'file', 'rbd']
GnocchiRbdPoolName:
default: metrics
type: string
- NovaEnableRbdBackend:
- default: false
- description: Whether to enable or not the Rbd backend for Nova
- type: boolean
NovaRbdPoolName:
default: vms
type: string
clients using older Ceph servers.
type: string
-conditions:
- glance_multiple_locations:
- and:
- - equals:
- - get_param: GlanceBackend
- - rbd
- - equals:
- - get_param: NovaEnableRbdBackend
- - true
-
outputs:
role_data:
description: Role data for the Ceph External service.
- ceph-base
- ceph-mon
- ceph-osd
- service_config_settings:
- glance_api:
- glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
step_config: |
include ::tripleo::profile::base::ceph::client
- name: Remove ceilometer expirer cron tab on upgrade
tags: step1
shell: '/usr/bin/crontab -u ceilometer -r'
+ register: remove_ceilometer_expirer_crontab
+ failed_when: remove_ceilometer_expirer_crontab.rc != 0 and remove_ceilometer_expirer_crontab.stderr != "no crontab for ceilometer"
+ changed_when: remove_ceilometer_expirer_crontab.stderr != "no crontab for ceilometer"
GlanceRbdPoolName:
default: images
type: string
+ NovaEnableRbdBackend:
+ default: false
+ description: Whether to enable or not the Rbd backend for Nova
+ type: boolean
RabbitPassword:
description: The password for RabbitMQ
type: string
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
glance_workers_unset: {equals : [{get_param: GlanceWorkers}, '']}
service_debug_unset: {equals : [{get_param: GlanceDebug}, '']}
+ glance_multiple_locations:
+ and:
+ - equals:
+ - get_param: GlanceBackend
+ - rbd
+ - equals:
+ - get_param: NovaEnableRbdBackend
+ - true
resources:
glance::keystone::authtoken::project_domain_name: 'Default'
glance::api::pipeline: 'keystone'
glance::api::show_image_direct_url: true
+ glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
# (eg. for internal_api):
type: json
NovaWorkers:
default: 0
- description: Number of workers for Nova API service.
+ description: Number of workers for Nova services.
type: number
NovaPassword:
description: The password for the nova service and db account, used by nova-api.
nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
resources:
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # ApacheServiceBase:
- # type: ./apache.yaml
- # properties:
- # ServiceNetMap: {get_param: ServiceNetMap}
- # DefaultPasswords: {get_param: DefaultPasswords}
- # EndpointMap: {get_param: EndpointMap}
- # RoleName: {get_param: RoleName}
- # RoleParameters: {get_param: RoleParameters}
- # EnableInternalTLS: {get_param: EnableInternalTLS}
+ ApacheServiceBase:
+ type: ./apache.yaml
+ properties:
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ EndpointMap: {get_param: EndpointMap}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+ EnableInternalTLS: {get_param: EnableInternalTLS}
NovaBase:
type: ./nova-base.yaml
config_settings:
map_merge:
- get_attr: [NovaBase, role_data, config_settings]
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # - get_attr: [ApacheServiceBase, role_data, config_settings]
+ - get_attr: [ApacheServiceBase, role_data, config_settings]
- nova::cron::archive_deleted_rows::hour: '*/12'
nova::cron::archive_deleted_rows::destination: '/dev/null'
tripleo.nova_api.firewall_rules:
"%{hiera('fqdn_$NETWORK')}"
params:
$NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- nova_wsgi_enabled: false
- # nova::api::service_name: 'httpd'
- # nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+ nova_wsgi_enabled: true
+ nova::api::service_name: 'httpd'
+ nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
# NOTE: bind IP is found in Heat replacing the network name with the local node IP
# for the given network; replacement examples (eg. for internal_api):
# internal_api -> IP
# internal_api_uri -> [IP]
# internal_api_subnet - > IP/CIDR
- # nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
- # nova::wsgi::apache_api::servername:
- # str_replace:
- # template:
- # "%{hiera('fqdn_$NETWORK')}"
- # params:
- # $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+ nova::wsgi::apache_api::servername:
+ str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
nova::api::instance_name_template: {get_param: InstanceNameTemplate}
nova_enable_db_purge: {get_param: NovaEnableDBPurge}
- nova_workers_zero
- {}
- nova::api::osapi_compute_workers: {get_param: NovaWorkers}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
+ nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
step_config: |
include tripleo::profile::base::nova::api
service_config_settings:
nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
nova::keystone::auth::password: {get_param: NovaPassword}
nova::keystone::auth::region: {get_param: KeystoneRegion}
- # Temporarily disable Nova API deployed in WSGI
- # https://bugs.launchpad.net/nova/+bug/1661360
- # metadata_settings:
- # get_attr: [ApacheServiceBase, role_data, metadata_settings]
+ metadata_settings:
+ get_attr: [ApacheServiceBase, role_data, metadata_settings]
upgrade_tasks:
- - name: get bootstrap nodeid
- tags: common
- command: hiera bootstrap_nodeid
- register: bootstrap_node
- - name: set is_bootstrap_node fact
- tags: common
- set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
- - name: Extra migration for nova tripleo/+bug/1656791
- tags: step0,pre-upgrade
- when: is_bootstrap_node
- command: nova-manage db online_data_migrations
- - name: Stop and disable nova_api service (pre-upgrade not under httpd)
- tags: step2
- service: name=openstack-nova-api state=stopped enabled=no
- - name: Create puppet manifest to set transport_url in nova.conf
- tags: step5
- when: is_bootstrap_node
- copy:
- dest: /root/nova-api_upgrade_manifest.pp
- mode: 0600
- content: >
- $transport_url = os_transport_url({
- 'transport' => hiera('messaging_service_name', 'rabbit'),
- 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
- 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
- 'username' => hiera('nova::rabbit_userid', 'guest'),
- 'password' => hiera('nova::rabbit_password'),
- 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
- })
- oslo::messaging::default { 'nova_config':
- transport_url => $transport_url
- }
- - name: Run puppet apply to set tranport_url in nova.conf
- tags: step5
- when: is_bootstrap_node
- command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
- register: puppet_apply_nova_api_upgrade
- failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
- changed_when: puppet_apply_nova_api_upgrade.rc == 2
- - name: Setup cell_v2 (map cell0)
- tags: step5
- when: is_bootstrap_node
- shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
- - name: Setup cell_v2 (create default cell)
- tags: step5
- when: is_bootstrap_node
- # (owalsh) puppet-nova expects the cell name 'default'
- # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
- shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
- register: nova_api_create_cell
- failed_when: nova_api_create_cell.rc not in [0,2]
- changed_when: nova_api_create_cell.rc == 0
- - name: Setup cell_v2 (sync nova/cell DB)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage db sync
- async: {get_param: NovaDbSyncTimeout}
- poll: 10
- - name: Setup cell_v2 (get cell uuid)
- tags: step5
- when: is_bootstrap_node
- shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
- register: nova_api_cell_uuid
- - name: Setup cell_v2 (migrate hosts)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
- - name: Setup cell_v2 (migrate instances)
- tags: step5
- when: is_bootstrap_node
- command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
- - name: Sync nova_api DB
- tags: step5
- command: nova-manage api_db sync
- when: is_bootstrap_node
- - name: Online data migration for nova
- tags: step5
- when: is_bootstrap_node
- command: nova-manage db online_data_migrations
+ yaql:
+ expression: $.data.apache_upgrade + $.data.nova_api_upgrade
+ data:
+ apache_upgrade:
+ get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+ nova_api_upgrade:
+ - name: get bootstrap nodeid
+ tags: common
+ command: hiera bootstrap_nodeid
+ register: bootstrap_node
+ - name: set is_bootstrap_node fact
+ tags: common
+ set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+ - name: Extra migration for nova tripleo/+bug/1656791
+ tags: step0,pre-upgrade
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
+ - name: Stop and disable nova_api service (pre-upgrade not under httpd)
+ tags: step2
+ service: name=openstack-nova-api state=stopped enabled=no
+ - name: Create puppet manifest to set transport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ copy:
+ dest: /root/nova-api_upgrade_manifest.pp
+ mode: 0600
+ content: >
+ $transport_url = os_transport_url({
+ 'transport' => hiera('messaging_service_name', 'rabbit'),
+ 'hosts' => any2array(hiera('rabbitmq_node_names', undef)),
+ 'port' => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
+ 'username' => hiera('nova::rabbit_userid', 'guest'),
+ 'password' => hiera('nova::rabbit_password'),
+ 'ssl' => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
+ })
+ oslo::messaging::default { 'nova_config':
+ transport_url => $transport_url
+ }
+ - name: Run puppet apply to set tranport_url in nova.conf
+ tags: step5
+ when: is_bootstrap_node
+ command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+ register: puppet_apply_nova_api_upgrade
+ failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
+ changed_when: puppet_apply_nova_api_upgrade.rc == 2
+ - name: Setup cell_v2 (map cell0)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
+ - name: Setup cell_v2 (create default cell)
+ tags: step5
+ when: is_bootstrap_node
+ # (owalsh) puppet-nova expects the cell name 'default'
+ # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
+ shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
+ register: nova_api_create_cell
+ failed_when: nova_api_create_cell.rc not in [0,2]
+ changed_when: nova_api_create_cell.rc == 0
+ - name: Setup cell_v2 (sync nova/cell DB)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db sync
+ async: {get_param: NovaDbSyncTimeout}
+ poll: 10
+ - name: Setup cell_v2 (get cell uuid)
+ tags: step5
+ when: is_bootstrap_node
+ shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
+ register: nova_api_cell_uuid
+ - name: Setup cell_v2 (migrate hosts)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
+ - name: Setup cell_v2 (migrate instances)
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
+ - name: Sync nova_api DB
+ tags: step5
+ command: nova-manage api_db sync
+ when: is_bootstrap_node
+ - name: Online data migration for nova
+ tags: step5
+ when: is_bootstrap_node
+ command: nova-manage db online_data_migrations
type: json
NovaWorkers:
default: 0
- description: Number of workers for Nova Conductor service.
+ description: Number of workers for Nova services.
type: number
MonitoringSubscriptionNovaConductor:
default: 'overcloud-nova-conductor'
type: json
NovaWorkers:
default: 0
- description: Number of workers for Nova API service.
+ description: Number of workers for Nova services.
type: number
conditions:
type: json
NovaWorkers:
default: 0
- description: Number of workers for Nova Placement API service.
+ description: Number of workers for Nova services.
type: number
NovaPassword:
description: The password for the nova service and db account, used by nova-placement.
type: string
SwiftCeilometerPipelineEnabled:
description: Set to False to disable the swift proxy ceilometer pipeline.
- default: True
+ default: false
type: boolean
SwiftCeilometerIgnoreProjects:
- default: ['services']
+ default: ['service']
description: Comma-seperated list of project names to ignore.
type: comma_delimited_list
RabbitClientPort:
conditions:
- ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+ ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, true]}
use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
resources:
swift::proxy::authtoken::project_name: 'service'
swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
swift::proxy::workers: {get_param: SwiftWorkers}
- swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
- swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
- swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
- swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
- swift::proxy::ceilometer::password: {get_param: SwiftPassword}
- swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
- swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
- swift::proxy::ceilometer::nonblocking_notify: true
+ -
+ if:
+ - ceilometer_pipeline_enabled
+ -
+ swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
+ swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+ swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+ swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+ swift::proxy::ceilometer::password: {get_param: SwiftPassword}
+ swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
+ swift::proxy::ceilometer::nonblocking_notify: true
+ swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+ - {}
+ - swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
- ''
- 'proxy-logging'
- 'proxy-server'
- swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
swift::proxy::account_autocreate: true
# NOTE: bind IP is found in Heat replacing the network name with the
# local node IP for the given network; replacement examples
--- /dev/null
+---
+fixes:
+ - Disable ceilometer in the swift proxy middleware pipeline out of the box.
+ This generates a lot of events with gnocchi and swift backend and causes
+ heavy load. It should be easy to enable if needed.
--- /dev/null
+---
+features:
+ - |
+ There is now a tool in tripleo-heat-templates, similar to the
+ oslo-config-generator, that can be used to programmatically generate
+ sample environment files based directly on the contents of the templates
+ themselves. This ensures consistency in the sample environments, as well
+ as making it easier to update environments to reflect changes to the
+ templates.
+upgrade:
+ - |
+ Some sample environment files will be moving as part of the work to
+ generate them programmatically. The old versions will be left in place for
+ one cycle to allow a smooth upgrade process. When upgrading, if any of the
+ environment files in use for the deployment have been deprecated they
+ should be replaced with the new generated verions.
+deprecations:
+ - |
+ Where a generated sample environment replaces an existing one, the existing
+ environment is deprecated. This will be noted in a comment at the top of
+ the file.
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Congress
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Etcd
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Congress
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Etcd
- OS::TripleO::Services::CinderHPELeftHandISCSI
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::Congress
- OS::TripleO::Services::Docker
PyYAML>=3.10.0 # MIT
Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
six>=1.9.0 # MIT
-sphinx!=1.6.1,>=1.5.1 # BSD
+sphinx>=1.6.2 # BSD
oslosphinx>=4.7.0 # Apache-2.0
reno!=2.3.1,>=1.8.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0
% (expected_config_image_parameter, config_volume))
return 1
+ if 'docker_config' in role_data:
+ docker_config = role_data['docker_config']
+ for _, step in docker_config.items():
+ for _, container in step.items():
+ if not isinstance(container, dict):
+ # NOTE(mandre) this skips everything that is not a dict
+ # so we may ignore some containers definitions if they
+ # are in a map_merge for example
+ continue
+ command = container.get('command', '')
+ if isinstance(command, list):
+ command = ' '.join(map(str, command))
+ if 'bootstrap_host_exec' in command \
+ and container.get('user') != 'root':
+ print('ERROR: bootstrap_host_exec needs to run as the root user.')
+ return 1
+
if 'parameters' in tpl:
for param in required_params:
if param not in tpl['parameters']: