OS::TripleO::Services::BarbicanApi: ../../docker/services/barbican-api.yaml
OS::TripleO::Services::Zaqar: ../../docker/services/zaqar.yaml
OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
- OS::TripleO::Services::MongoDb: ../../docker/services/database/mongodb.yaml
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::BarbicanApi
- - OS::TripleO::Services::MongoDb
- OS::TripleO::Services::Zaqar
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::TripleoPackages
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
+ ZaqarMessageStore: 'swift'
+ ZaqarManagementStore: 'sqlalchemy'
SwiftCeilometerPipelineEnabled: false
NotificationDriver: 'noop'
OS::TripleO::Services::HAproxy: ../../puppet/services/pacemaker/haproxy.yaml
OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
OS::TripleO::Services::MySQL: ../../puppet/services/pacemaker/database/mysql.yaml
- OS::TripleO::Services::MongoDb: ../../puppet/services/database/mongodb.yaml
OS::TripleO::Services::CinderBackup: ../../puppet/services/pacemaker/cinder-backup.yaml
OS::TripleO::Services::CinderVolume: ../../puppet/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::BarbicanApi
- - OS::TripleO::Services::MongoDb
- OS::TripleO::Services::Zaqar
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::TripleoPackages
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
+ ZaqarMessageStore: 'swift'
+ ZaqarManagementStore: 'sqlalchemy'
SwiftCeilometerPipelineEnabled: false
NotificationDriver: 'noop'
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
OS::TripleO::Services::NovaMigrationTarget: OS::Heat::None
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Clustercheck
# this is the intended variable for overriding
OVERCLOUD_SSH_KEY=${OVERCLOUD_SSH_KEY:-"$SUBNODES_SSH_KEY"}
+SHORT_TERM_KEY_COMMENT="TripleO split stack short term key"
SLEEP_TIME=5
function overcloud_ssh_hosts_json {
function overcloud_ssh_key_json {
# we pass the contents to Mistral instead of just path, otherwise
# the key file would have to be readable for the mistral user
- cat "$OVERCLOUD_SSH_KEY" | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))'
+ cat "$1" | python -c 'import json,sys; print(json.dumps(sys.stdin.read()))'
}
function workflow_finished {
openstack workflow execution show -f shell $execution_id | grep 'state="SUCCESS"' > /dev/null
}
+function generate_short_term_keys {
+ local tmpdir=$(mktemp -d)
+ ssh-keygen -N '' -t rsa -b 4096 -f "$tmpdir/id_rsa" -C "$SHORT_TERM_KEY_COMMENT" > /dev/null
+ echo "$tmpdir"
+}
+
if [ -z "$OVERCLOUD_HOSTS" ]; then
echo 'Please set $OVERCLOUD_HOSTS'
exit 1
echo "Hosts: $OVERCLOUD_HOSTS"
echo
-EXECUTION_PARAMS="{\"ssh_user\": \"$OVERCLOUD_SSH_USER\", \"ssh_servers\": $(overcloud_ssh_hosts_json), \"ssh_private_key\": $(overcloud_ssh_key_json)}"
+SHORT_TERM_KEY_DIR=$(generate_short_term_keys)
+SHORT_TERM_KEY_PRIVATE="$SHORT_TERM_KEY_DIR/id_rsa"
+SHORT_TERM_KEY_PUBLIC="$SHORT_TERM_KEY_DIR/id_rsa.pub"
+SHORT_TERM_KEY_PUBLIC_CONTENT=$(cat $SHORT_TERM_KEY_PUBLIC)
+
+for HOST in $OVERCLOUD_HOSTS; do
+ echo "Inserting TripleO short term key for $HOST"
+ # prepending an extra newline so that if authorized_keys didn't
+ # end with a newline previously, we don't end up garbling it up
+ ssh -i "$OVERCLOUD_SSH_KEY" -l "$OVERCLOUD_SSH_USER" "$HOST" "echo -e '\n$SHORT_TERM_KEY_PUBLIC_CONTENT' >> \$HOME/.ssh/authorized_keys"
+done
+
+echo "Starting ssh admin enablement workflow"
+EXECUTION_PARAMS="{\"ssh_user\": \"$OVERCLOUD_SSH_USER\", \"ssh_servers\": $(overcloud_ssh_hosts_json), \"ssh_private_key\": $(overcloud_ssh_key_json "$SHORT_TERM_KEY_PRIVATE")}"
EXECUTION_CREATE_OUTPUT=$(openstack workflow execution create -f shell -d 'deployed server ssh admin creation' tripleo.access.v1.enable_ssh_admin "$EXECUTION_PARAMS")
echo "$EXECUTION_CREATE_OUTPUT"
EXECUTION_ID=$(echo "$EXECUTION_CREATE_OUTPUT" | grep '^id=' | awk '-F"' '{ print $2 }')
sleep $SLEEP_TIME
echo -n .
done
+echo # newline after the previous dots
+
+for HOST in $OVERCLOUD_HOSTS; do
+ echo "Removing TripleO short term key from $HOST"
+ ssh -l "$OVERCLOUD_SSH_USER" "$HOST" "sed -i -e '/$SHORT_TERM_KEY_COMMENT/d' \$HOME/.ssh/authorized_keys"
+done
+
+echo "Removing short term keys locally"
+rm -r "$SHORT_TERM_KEY_DIR"
echo "Success."
description: >
It can be used to override settings for one of the predefined pools, or to create
additional ones. Example:
- {
- "volumes": {
- "size": 5,
- "pg_num": 128,
- "pgp_num": 128
- }
- }
- default: {}
- type: json
+ [{"name": "volumes", "pg_num": 64, "rule_name": ""}]
+ default: []
+ type: comma_delimited_list
CinderRbdPoolName:
default: volumes
type: string
- {get_param: NovaRbdPoolName}
- {get_param: GlanceRbdPoolName}
- {get_param: GnocchiRbdPoolName}
- - repeat:
- template:
- name: <%pool%>
- pg_num: {get_param: CephPoolDefaultPgNum}
- rule_name: ""
- for_each:
- <%pool%>: {get_param: CephPools}
+ - {get_param: CephPools}
openstack_keys: &openstack_keys
- name:
list_join:
tags: step2
service: name=httpd state=stopped enabled=no
- name: remove old cinder cron jobs
+ tags: step2
file:
path: /var/spool/cron/cinder
state: absent
EnableInternalTLS:
type: boolean
default: false
+ GlanceBackend:
+ default: swift
+ description: The short name of the Glance backend to use. Should be one
+ of swift, rbd, cinder, or file
+ type: string
+ constraints:
+ - allowed_values: ['swift', 'file', 'rbd', 'cinder']
GlanceNfsEnabled:
default: false
description: >
default: false
description: Remove package if the service is being disabled during upgrade
type: boolean
+ GlanceNfsShare:
+ default: ''
+ description: >
+ NFS share to mount for image storage (when GlanceNfsEnabled is true)
+ type: string
+ GlanceNfsOptions:
+ default: 'intr,context=system_u:object_r:glance_var_lib_t:s0'
+ description: >
+ NFS mount options for image storage (when GlanceNfsEnabled is true)
+ type: string
conditions:
internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
nfs_backend_enabled: {equals: [{get_param: GlanceNfsEnabled}, true]}
+ cinder_backend_enabled: {equals: [{get_param: GlanceBackend}, cinder]}
resources:
dest: "/etc/ceph/"
merge: true
preserve_properties: true
+ permissions:
+ - path: /var/lib/glance
+ owner: glance:glance
+ recurse: true
/var/lib/kolla/config_files/glance_api_tls_proxy.json:
command: /usr/sbin/httpd -DFOREGROUND
config_files:
- nfs_backend_enabled
- /var/lib/glance:/var/lib/glance
- ''
+ -
+ if:
+ - cinder_backend_enabled
+ - - /dev:/dev
+ - /etc/iscsi:/etc/iscsi
+ - []
environment:
- KOLLA_BOOTSTRAP=True
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
start_order: 2
image: *glance_api_image
net: host
- privileged: false
+ privileged: {if: [cinder_backend_enabled, true, false]}
restart: always
volumes: *glance_volumes
environment:
- KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
- {}
host_prep_tasks:
+ - name: Mount NFS on host
+ vars:
+ nfs_backend_enable: {get_param: GlanceNfsEnabled}
+ mount: name=/var/lib/glance src="{{item.NFS_SHARE}}" fstype=nfs4 opts="{{item.NFS_OPTIONS}}" state=mounted
+ with_items:
+ - NFS_SHARE: {get_param: GlanceNfsShare}
+ NFS_OPTIONS: {get_param: GlanceNfsOptions}
+ when:
+ - nfs_backend_enable
- name: create persistent logs directory
file:
path: "{{ item }}"
ignore_errors: True
register: heat_api_enabled
- name: remove old heat cron jobs
+ tags: step2
file:
path: /var/spool/cron/heat
state: absent
- path: /var/log/horizon/
owner: apache:apache
recurse: true
+ # NOTE The upstream Kolla Dockerfile sets /etc/openstack-dashboard/ ownership to
+ # horizon:horizon - the policy.json files need read permissions for the apache user
+ # FIXME We should consider whether this should be fixed in the Kolla Dockerfile instead
+ - path: /etc/openstack-dashboard/
+ owner: apache:apache
+ recurse: true
# FIXME Apache tries to write a .lock file there
- path: /usr/share/openstack-dashboard/openstack_dashboard/local/
owner: apache:apache
volumes:
- /var/log/containers/horizon:/var/log/horizon
- /var/log/containers/httpd/horizon:/var/log/httpd
- - /var/lib/config-data/horizon/etc/:/etc/
+ - /var/lib/config-data/puppet-generated/horizon/etc/openstack-dashboard:/etc/openstack-dashboard
step_3:
horizon:
image: *horizon_image
tags: step2
service: name=httpd state=stopped enabled=no
- name: remove old keystone cron jobs
+ tags: step2
file:
path: /var/spool/cron/keystone
state: absent
user: root
volumes:
- /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
- - /var/log/memcached.log:/var/log/memcached.log
- command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; chown ${USER} /var/log/memcached.log']
+ - /var/log/containers/memcached:/var/log/
+ command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; touch /var/log/memcached.log && chown ${USER} /var/log/memcached.log']
memcached:
start_order: 1
image: *memcached_image
- {get_attr: [ContainersCommon, volumes]}
-
- /var/lib/config-data/memcached/etc/sysconfig/memcached:/etc/sysconfig/memcached:ro
- # TODO(bogdando) capture memcached syslog logs from a container
- command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS']
+ - /var/log/containers/memcached:/var/log/
+ # NOTE: We're adding the log redirection here, even though should
+ # already be part of the options. This is because the redirection
+ # via the options is not working and ends up being passed as a
+ # parameter to the memcached command (which it silently ignores).
+ # Thus the need for the explicit redirection here. The redirection
+ # will be removed from the $OPTIONS, which is done via the puppet
+ # module, but we'll only be able to do this once the following pull
+ # request merges: https://github.com/saz/puppet-memcached/pull/88
+ command: ['/bin/bash', '-c', 'source /etc/sysconfig/memcached; /usr/bin/memcached -p ${PORT} -u ${USER} -m ${CACHESIZE} -c ${MAXCONN} $OPTIONS >> /var/log/memcached.log 2>&1']
upgrade_tasks:
- name: Stop and disable memcached service
tags: step2
default: {}
description: Parameters specific to the role
type: json
+ MistralWorkers:
+ default: 1
+ description: The number of workers for the mistral-api.
+ type: number
+ MistralApiPolicies:
+ description: |
+ A hash of policies to configure for Mistral API.
+ e.g. { mistral-context_is_admin: { key: context_is_admin, value: 'role:admin' } }
+ default: {}
+ type: json
resources:
MySQLClient:
type: ../../puppet/services/database/mysql-client.yaml
+ MistralBase:
+ type: ../../puppet/services/mistral-base.yaml
+ properties:
+ EndpointMap: {get_param: EndpointMap}
+ ServiceData: {get_param: ServiceData}
+ ServiceNetMap: {get_param: ServiceNetMap}
+ DefaultPasswords: {get_param: DefaultPasswords}
+ RoleName: {get_param: RoleName}
+ RoleParameters: {get_param: RoleParameters}
+
MistralApiBase:
type: ../../puppet/services/mistral-api.yaml
properties:
description: Role data for the Mistral API role.
value:
service_name: {get_attr: [MistralApiBase, role_data, service_name]}
+ # FIXME(mandre) restore once mistral-api image has the necessary packages
+ # to run on top of apache
+ # config_settings:
+ # map_merge:
+ # - get_attr: [MistralApiBase, role_data, config_settings]
config_settings:
map_merge:
- - get_attr: [MistralApiBase, role_data, config_settings]
+ - get_attr: [MistralBase, role_data, config_settings]
+ - mistral::api::api_workers: {get_param: MistralWorkers}
+ mistral::api::bind_host: {get_param: [ServiceNetMap, MistralApiNetwork]}
+ mistral::policy::policies: {get_param: MistralApiPolicies}
+ tripleo.mistral_api.firewall_rules:
+ '133 mistral':
+ dport:
+ - 8989
+ - 13989
+ mistral_wsgi_enabled: false
logging_source: {get_attr: [MistralApiBase, role_data, logging_source]}
logging_groups: {get_attr: [MistralApiBase, role_data, logging_groups]}
step_config: &step_config
ignore_errors: True
when: {get_param: UpgradeRemoveUnusedPackages}
- name: remove old nova cron jobs
+ tags: step2
file:
path: /var/spool/cron/nova
state: absent
ContainersCommon:
type: ../containers-common.yaml
+# We import from the corresponding docker service because otherwise we risk
+# rewriting the tripleo.mysql.firewall_rules key with the baremetal firewall
+# rules (see LP#1728918)
MysqlPuppetBase:
- type: ../../../puppet/services/pacemaker/database/mysql.yaml
+ type: ../../../docker/services/pacemaker/database/mysql.yaml
properties:
EndpointMap: {get_param: EndpointMap}
ServiceData: {get_param: ServiceData}
# Type: string
ComputeHostnameFormat: '%stackname%-novacompute-%index%'
- # Number of Controller nodes to deploy
+ # Number of ControllerOpenstack nodes
# Type: number
- ControllerCount: 3
+ ControllerOpenstackCount: 3
- # Format for Controller node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
+ # Format for ControllerOpenstack node hostnames Note %index% is translated into the index of the node, e.g 0/1/2 etc and %stackname% is replaced with the stack name e.g overcloud
# Type: string
- ControllerHostnameFormat: '%stackname%-controller-%index%'
+ ControllerOpenstackHostnameFormat: '%stackname%-controller-%index%'
# Number of Database nodes
# Type: number
# Type: string
OvercloudComputeFlavor: compute
- # Name of the flavor for Controller nodes
+ # Name of the flavor for ControllerOpenstack nodes
# Type: string
- OvercloudControllerFlavor: control
+ OvercloudControllerOpenstackFlavor: control
# Name of the flavor for Database nodes
# Type: string
RabbitIPv6: True
# Enable IPv6 environment for Memcached.
MemcachedIPv6: True
+ # Enable IPv6 environment for MySQL.
+ MysqlIPv6: True
# Type: boolean
CinderEnableRbdBackend: True
- # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
# Type: string
GlanceBackend: rbd
# Type: string
CinderRbdPoolName: volumes
- # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
# Type: string
GlanceBackend: rbd
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
- # The short name of the Glance backend to use. Should be one of swift, rbd, or file
+ # The short name of the Glance backend to use. Should be one of swift, rbd, cinder, or file
# Type: string
GlanceBackend: file
rhel_reg_user: ""
rhel_reg_type: ""
rhel_reg_method: ""
- rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.1-rpms"
+ rhel_reg_sat_repo: "rhel-7-server-satellite-tools-6.2-rpms"
rhel_reg_http_proxy_host: ""
rhel_reg_http_proxy_port: ""
rhel_reg_http_proxy_username: ""
proxy_url=
proxy_username=
proxy_password=
+curl_opts="--retry-delay 10 --max-time 30 --retry ${retry_max_count} --cacert /etc/rhsm/ca/redhat-uep.pem"
+portal_test_url="https://$(crudini --get /etc/rhsm/rhsm.conf server hostname)/subscription/"
# process variables..
if [ -n "${REG_AUTO_ATTACH:-}" ]; then
# Good both values are not empty
proxy_url="http://${proxy_host}:${proxy_port}"
config_opts="--server.proxy_hostname=${proxy_host} --server.proxy_port=${proxy_port}"
- sat5_opts="${sat5_opts} --proxy_hostname=${proxy_url}"
+ sat5_opts="${sat5_opts} --proxy=${proxy_url}"
+ curl_opts="${curl_opts} -x http://${proxy_host}:${proxy_port}"
echo "RHSM Proxy set to: ${proxy_url}"
if [ -n "${REG_HTTP_PROXY_USERNAME:-}" ]; then
if [ -n "${REG_HTTP_PROXY_PASSWORD:-}" ]; then
config_opts="${config_opts} --server.proxy_user=${proxy_username} --server.proxy_password=${proxy_password}"
sat5_opts="${sat5_opts} --proxyUser=${proxy_username} --proxyPassword=${proxy_password}"
+ curl_opts="${curl_opts} --proxy-user ${proxy_username}:${proxy_password}"
else
echo "Warning: REG_HTTP_PROXY_PASSWORD cannot be null with non-empty REG_HTTP_PROXY_USERNAME! Skipping..."
proxy_username= ; proxy_password=
}
function detect_satellite_server {
- if curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm | grep "200 OK"; then
+ if curl ${curl_opts} -L -k -s -D - -o /dev/null $REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm | grep "200 OK"; then
echo Satellite 6 or beyond with Katello API detected at $REG_SAT_URL
katello_api_enabled=1
- elif curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
+ elif curl ${curl_opts} -L -k -s -D - -o /dev/null $REG_SAT_URL/rhn/Login.do | grep "200 OK"; then
echo Satellite 5 with RHN detected at $REG_SAT_URL
katello_api_enabled=0
else
fi
}
-if [ "x${proxy_url}" != "x" ];then
+if [ "x${proxy_url}" != "x" ]; then
+ # Before everything, we want to make sure the proxy can be reached
+ # Note: no need to manage retries, already done by retry() function.
+ echo "Testing proxy connectivity..."
+ retry bash -c "</dev/tcp/${proxy_host}/${proxy_port}"
+ echo "Proxy ${proxy_url} is reachable!"
+
# Config subscription-manager for proxy
subscription-manager config ${config_opts}
case "${REG_METHOD:-}" in
portal)
+ # First test curl to RHSM through the specified proxy
+
+ if curl ${curl_opts} -L -s -D - -o /dev/null ${portal_test_url}|grep '200 OK'; then
+ if [ "x${proxy_url}" = "x" ]; then
+ echo "Access to RHSM portal OK, continuing..."
+ else
+ echo "Access to RHSM portal through proxy ${proxy_url} OK, continuing..."
+ fi
+ else
+ if [ "x${proxy_url}" = "x" ]; then
+ echo "Unable to access RHSM portal! Please check your parameters."
+ else
+ echo "Unable to access RHSM portal through configured HTTP proxy (${proxy_url}) ! Please check your parameters."
+ fi
+ exit 1
+ fi
retry subscription-manager register $opts
if [ -z "${REG_AUTO_ATTACH:-}" -a -z "${REG_ACTIVATION_KEY:-}" ]; then
retry subscription-manager attach $attach_opts
detect_satellite_server
if [ "$katello_api_enabled" = "1" ]; then
repos="$repos --enable ${satellite_repo}"
- curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
+ curl ${curl_opts} -L -k -O "$REG_SAT_URL/pub/katello-ca-consumer-latest.noarch.rpm"
# https://bugs.launchpad.net/tripleo/+bug/1711435
# Delete the /etc/rhsm/facts directory entirely so that the
rpm -Uvh katello-ca-consumer-latest.noarch.rpm || true
retry subscription-manager register $opts
retry subscription-manager $repos
- retry yum install -y katello-agent || true # needed for errata reporting to satellite6
+ yum install -y katello-agent || true # needed for errata reporting to satellite6
katello-package-upload
# https://bugs.launchpad.net/tripleo/+bug/1711435
mkdir -p /etc/rhsm/facts
else
pushd /usr/share/rhn/
- curl --retry ${retry_max_count} --retry-delay 10 --max-time 30 -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
+ curl ${curl_opts} -k -O $REG_SAT_URL/pub/RHN-ORG-TRUSTED-SSL-CERT
popd
retry rhnreg_ks --serverUrl=$REG_SAT_URL/XMLRPC $sat5_opts
fi
config:
datafiles:
neutron_bigswitch_data:
- mapped_data:
- neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
- neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
- # NOTE(aschultz): required for the puppet module but we don't
- # actually want them defined on the compute nodes so we're
- # relying on the puppet module's handling of <SERVICE DEFAULT>
- # to just not set these but still accept that they were defined.
- # This will should be fixed in puppet-neutron and removed here,
- # but for backportability, we need to define something.
- neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
- neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ # NOTE(aschultz): required for the puppet module but we don't
+ # actually want them defined on the compute nodes so we're
+ # relying on the puppet module's handling of <SERVICE DEFAULT>
+ # to just not set these but still accept that they were defined.
+ # This will should be fixed in puppet-neutron and removed here,
+ # but for backportability, we need to define something.
+ neutron::plugins::ml2::bigswitch::restproxy::servers: '<SERVICE DEFAULT>'
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: '<SERVICE DEFAULT>'
NeutronBigswitchDeployment:
config:
datafiles:
neutron_bigswitch_data:
- mapped_data:
- neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
- neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
- neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
- neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
- neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
- neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
- neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
- neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
- neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
+ neutron::agents::bigswitch::agent_enabled: {get_input: neutron_enable_bigswitch_agent}
+ neutron::agents::bigswitch::lldp_enabled: {get_input: neutron_enable_bigswitch_lldp}
+ neutron::plugins::ml2::bigswitch::restproxy::servers: {get_input: restproxy_servers}
+ neutron::plugins::ml2::bigswitch::restproxy::server_auth: {get_input: restproxy_server_auth}
+ neutron::plugins::ml2::bigswitch::restproxy::auto_sync_on_failure: {get_input: restproxy_auto_sync_on_failure}
+ neutron::plugins::ml2::bigswitch::restproxy::consistency_interval: {get_input: restproxy_consistency_interval}
+ neutron::plugins::ml2::bigswitch::restproxy::neutron_id: {get_input: restproxy_neutron_id}
+ neutron::plugins::ml2::bigswitch::restproxy::server_ssl: {get_input: restproxy_server_ssl}
+ neutron::plugins::ml2::bigswitch::restproxy::ssl_cert_directory: {get_input: restproxy_ssl_cert_directory}
NeutronBigswitchDeployment:
type: OS::Heat::StructuredDeployment
- bootstrap_node # provided by allNodesConfig
- all_nodes # provided by allNodesConfig
- vip_data # provided by allNodesConfig
+ - net_ip_map
- '"%{::osfamily}"'
# The following are required for compatibility with the Controller role
# where some vendor integrations added hieradata via ExtraConfigPre
service_names:
service_names: {get_param: ServiceNames}
sensu::subscriptions: {get_param: MonitoringSubscriptions}
+ net_ip_map: {get_attr: [NetIpMap, net_ip_map]}
service_configs:
map_replace:
- {get_param: ServiceConfigSettings}
type: string
description: >
Cron to move deleted instances to another table - User
- default: 'keystone'
+ default: 'cinder'
CinderCronDbPurgeAge:
type: string
description: >
EnableInternalTLS:
type: boolean
default: false
+ MysqlIPv6:
+ default: false
+ description: Enable IPv6 in MySQL
+ type: boolean
+
conditions:
# in tripleo-puppet-elements.
mysql::server::package_name: 'mariadb-galera-server'
mysql::server::manage_config_file: true
+ mysql_ipv6: {get_param: MysqlIPv6}
tripleo.mysql.firewall_rules:
'104 mysql galera':
dport:
{get_param: [ServiceNetMap, MysqlNetwork]}
tripleo::profile::base::database::mysql::generate_dropin_file_limit:
{get_param: MysqlIncreaseFileLimit}
- - generate_service_certificates: true
- tripleo::profile::base::database::mysql::certificate_specs:
- service_certificate: '/etc/pki/tls/certs/mysql.crt'
- service_key: '/etc/pki/tls/private/mysql.key'
- hostname:
- str_replace:
- template: "%{hiera('cloud_name_NETWORK')}"
- params:
- NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- dnsnames:
- - str_replace:
+ - if:
+ - internal_tls_enabled
+ -
+ generate_service_certificates: true
+ tripleo::profile::base::database::mysql::certificate_specs:
+ service_certificate: '/etc/pki/tls/certs/mysql.crt'
+ service_key: '/etc/pki/tls/private/mysql.key'
+ hostname:
+ str_replace:
template: "%{hiera('cloud_name_NETWORK')}"
params:
NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- - str_replace:
- template:
- "%{hiera('fqdn_$NETWORK')}"
+ dnsnames:
+ - str_replace:
+ template: "%{hiera('cloud_name_NETWORK')}"
+ params:
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ - str_replace:
+ template:
+ "%{hiera('fqdn_$NETWORK')}"
+ params:
+ $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ principal:
+ str_replace:
+ template: "mysql/%{hiera('cloud_name_NETWORK')}"
params:
- $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
- principal:
- str_replace:
- template: "mysql/%{hiera('cloud_name_NETWORK')}"
- params:
- NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+ - {}
step_config: |
include ::tripleo::profile::base::database::mysql
metadata_settings:
GlanceBackend:
default: swift
description: The short name of the Glance backend to use. Should be one
- of swift, rbd, or file
+ of swift, rbd, cinder, or file
type: string
constraints:
- - allowed_values: ['swift', 'file', 'rbd']
+ - allowed_values: ['swift', 'file', 'rbd', 'cinder']
GlanceNfsEnabled:
default: false
description: >
# internal_api_subnet - > IP/CIDR
memcached::listen_ip: {get_param: [ServiceNetMap, MemcachedNetwork]}
memcached::max_memory: {get_param: MemcachedMaxMemory}
+ memcached::verbosity: 'v'
tripleo.memcached.firewall_rules:
'121 memcached':
dport: 11211
service_config_settings:
neutron_api:
neutron::server::service_providers: {get_param: NeutronServiceProviders}
+ horizon:
+ horizon::neutron_options:
+ enable_lb: True
collectd:
tripleo.collectd.plugins.nova_compute:
- virt
- collectd::plugins::virt::connection: "qemu:///system"
+ collectd::plugin::virt::connection: 'qemu:///system'
upgrade_tasks:
- name: Stop nova-compute service
tags: step1
RabbitFDLimit:
default: 65536
description: Configures RabbitMQ FD limit
- type: string
+ type: number
RabbitIPv6:
default: false
description: Enable IPv6 in RabbitMQ
--- /dev/null
+---
+upgrade:
+ - |
+ The format to use for the CephPools parameter needs to be updated into the
+ form expected by ceph-ansible. For example, for a new pool named `mypool`
+ it should change from:
+ { "mypool": { "size": 3, "pg_num": 128, "pgp_num": 128 } }
+ into:
+ [ { "name": "mypool", "pg_num": 128, "rule_name": "" } ]
+ The first is a map where each key is a pool name and its value the pool
+ properties, the second is a list where each item describes all properties
+ of a pool, including its name.
+other:
+ - |
+ With the migration from puppet-ceph to ceph-ansible for the deployment
+ of Ceph, the format of CephPools parameter changes because the two tools
+ use a different format to represent the list of additional pools to create.
--- /dev/null
+---
+features:
+ - |
+ When using RHSM proxy, TripleO will now verify that the proxy can be reached
+ otherwise we'll stop early and not try to subscribe nodes.
--- /dev/null
+---
+upgrade:
+ - |
+ When deploying with RHSM, sat-tools 6.2 will be installed instead of 6.1.
+ The new version is supported by RHEL 7.4 and provides katello-agent package.
files:
overcloud.yaml:
parameters:
- - ControllerHostnameFormat
- ComputeHostnameFormat
- CephStorageHostnameFormat
- - ControllerCount
- ComputeCount
- CephStorageCount
puppet/services/time/ntp.yaml:
- NtpServer
sample-env-generator/composable-roles.yaml:
parameters:
+ - ControllerOpenstackHostnameFormat
- DnsServers
+ - ControllerOpenstackCount
- DatabaseCount
- MessagingCount
- NetworkerCount
- - OvercloudControllerFlavor
+ - OvercloudControllerOpenstackFlavor
- OvercloudComputeFlavor
- OvercloudCephStorageFlavor
- OvercloudDatabaseFlavor
- OvercloudMessagingFlavor
- OvercloudNetworkerFlavor
sample_values:
- ControllerCount: 3
- OvercloudControllerFlavor: control
+ ControllerOpenstackCount: 3
+ OvercloudControllerOpenstackFlavor: control
ComputeCount: 1
OvercloudComputeFlavor: compute
CephStorageCount: 1
description: DNS servers to use for the Overcloud
type: comma_delimited_list
# Dynamic vars based on roles
+ ControllerOpenstackCount:
+ default: 0
+ description: Number of ControllerOpenstack nodes
+ type: number
DatabaseCount:
default: 0
description: Number of Database nodes
default: 0
description: Number of Networker nodes
type: number
+ ControllerOpenstackHostnameFormat:
+ type: string
+ description: >
+ Format for ControllerOpenstack node hostnames
+ Note %index% is translated into the index of the node, e.g 0/1/2 etc
+ and %stackname% is replaced with the stack name e.g overcloud
+ default: "%stackname%-controller-%index%"
OvercloudControllerFlavor:
default: control
description: Name of the flavor for Controller nodes
type: string
+ OvercloudControllerOpenstackFlavor:
+ default: control
+ description: Name of the flavor for ControllerOpenstack nodes
+ type: string
OvercloudComputeFlavor:
default: compute
description: Name of the flavor for Compute nodes
# consistency across files on. This should only contain parameters whose
# definition we cannot change for backwards compatibility reasons. New
# parameters to the templates should not be added to this list.
-PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
+PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description',
+ 'type',
+ 'default'],
+ 'ManagementNetCidr': ['default'],
'ManagementAllocationPools': ['default'],
'ExternalNetCidr': ['default'],
'ExternalAllocationPools': ['default'],