- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
--- /dev/null
+# NOTE: This is an environment specific for containers upgrade
+# CI. Mainly we deploy non-pacemakerized overcloud, as at the time
+# being containerization of services managed by pacemaker is not
+# complete, so we deploy and upgrade the non-HA services for now.
+
+resource_registry:
+ OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+ OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+
+parameter_defaults:
+ ControllerServices:
+ - OS::TripleO::Services::CephMon
+ - OS::TripleO::Services::CephOSD
+ - OS::TripleO::Services::CinderApi
+ - OS::TripleO::Services::CinderScheduler
+ - OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Docker
+ - OS::TripleO::Services::Kernel
+ - OS::TripleO::Services::Keystone
+ - OS::TripleO::Services::GlanceApi
+ - OS::TripleO::Services::HeatApi
+ - OS::TripleO::Services::HeatApiCfn
+ - OS::TripleO::Services::HeatApiCloudwatch
+ - OS::TripleO::Services::HeatEngine
+ - OS::TripleO::Services::MySQL
+ - OS::TripleO::Services::MySQLClient
+ - OS::TripleO::Services::NeutronDhcpAgent
+ - OS::TripleO::Services::NeutronL3Agent
+ - OS::TripleO::Services::NeutronMetadataAgent
+ - OS::TripleO::Services::NeutronServer
+ - OS::TripleO::Services::NeutronCorePlugin
+ - OS::TripleO::Services::NeutronOvsAgent
+ - OS::TripleO::Services::RabbitMQ
+ - OS::TripleO::Services::HAproxy
+ - OS::TripleO::Services::Keepalived
+ - OS::TripleO::Services::Memcached
+ - OS::TripleO::Services::Pacemaker
+ - OS::TripleO::Services::NovaConductor
+ - OS::TripleO::Services::NovaApi
+ - OS::TripleO::Services::NovaPlacement
+ - OS::TripleO::Services::NovaMetadata
+ - OS::TripleO::Services::NovaScheduler
+ - OS::TripleO::Services::Ntp
+ - OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::SwiftStorage
+ - OS::TripleO::Services::SwiftRingBuilder
+ - OS::TripleO::Services::Snmp
+ - OS::TripleO::Services::Timezone
+ - OS::TripleO::Services::NovaCompute
+ - OS::TripleO::Services::NovaLibvirt
+ ControllerExtraConfig:
+ nova::compute::libvirt::services::libvirt_virt_type: qemu
+ nova::compute::libvirt::libvirt_virt_type: qemu
+ # Required for Centos 7.3 and Qemu 2.6.0
+ nova::compute::libvirt::libvirt_cpu_mode: 'none'
+ #NOTE(gfidente): not great but we need this to deploy on ext4
+ #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+ ceph::profile::params::osd_max_object_name_len: 256
+ ceph::profile::params::osd_max_object_namespace_len: 64
+ SwiftCeilometerPipelineEnabled: False
+ Debug: True
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::CACerts
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
parameter_defaults:
ControllerServices:
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephRgw
+ - OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
print(cmd_stdout)
- print(cmd_stderr)
+ if cmd_stderr and \
+ cmd_stderr != 'Error response from daemon: ' \
+ 'No such container: {}\n'.format(name):
+ print(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
# certain initialization steps (run in a container) will occur
-# on the first role listed in the roles file
-{% set primary_role_name = roles[0].name -%}
-
+# on the role marked as primary controller or the first role listed
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
heat_template_version: ocata
description: >
upgrade_tasks:
- name: Stop and disable nova-compute service
tags: step2
- service: name=nova-compute state=stopped enabled=no
+ service: name=openstack-nova-compute state=stopped enabled=no
upgrade_tasks:
- name: Stop and disable nova-compute service
tags: step2
- service: name=nova-compute state=stopped enabled=no
+ service: name=openstack-nova-compute state=stopped enabled=no
- [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
kolla_config:
/var/lib/kolla/config_files/zaqar.json:
- command: /usr/sbin/httpd -DFOREGROUND
+ command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf
/var/lib/kolla/config_files/zaqar_websocket.json:
command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf
docker_config:
net: host
privileged: false
restart: always
- # NOTE(mandre) kolla image changes the user to 'zaqar', we need it
- # to be root to run httpd
- user: root
volumes:
- /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
- /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
- - /var/lib/config-data/zaqar/etc/httpd:/etc/httpd/:ro
- /etc/hosts:/etc/hosts:ro
- /etc/localtime:/etc/localtime:ro
environment:
upgrade_tasks:
- name: Stop and disable zaqar service
tags: step2
- service: name=httpd state=stopped enabled=no
+ service: name=openstack-zaqar.service state=stopped enabled=no
+
--- /dev/null
+resource_registry:
+ OS::TripleO::Services::ExternalSwiftProxy: ../puppet/services/external-swift-proxy.yaml
+ OS::TripleO::Services::SwiftProxy: OS::Heat::None
+ OS::TripleO::Services::SwiftStorage: OS::Heat::None
+ OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+
+parameter_defaults:
+ ExternalPublicUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalInternalUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalAdminUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+ ExternalSwiftUserTenant: 'service'
+
type: string
rhel_reg_http_proxy_password:
type: string
+ UpdateOnRHELRegistration:
+ type: boolean
+ default: false
+ description: |
+ When enabled, the system will perform a yum update after performing the
+ RHEL Registration process.
resources:
input_values:
REG_METHOD: {get_param: rhel_reg_method}
+ YumUpdateConfigurationAfterRHELRegistration:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ config: |
+ #!/bin/bash
+ set -x
+ num_updates=$(yum list -q updates | wc -l)
+ if [ "$num_updates" -eq "0" ]; then
+ echo "No packages require updating"
+ exit 0
+ fi
+ full_command="yum -q -y update"
+ echo "Running: $full_command"
+ result=$($full_command)
+ return_code=$?
+ echo "$result"
+ echo "yum return code: $return_code"
+ exit $return_code
+
+ UpdateDeploymentAfterRHELRegistration:
+ type: OS::Heat::SoftwareDeployment
+ depends_on: RHELRegistrationDeployment
+ conditions:
+ update_requested: {get_param: UpdateOnRHELRegistration}
+ properties:
+ name: UpdateDeploymentAfterRHELRegistration
+ config: {get_resource: YumUpdateConfigurationAfterRHELRegistration}
+ server: {get_param: server}
+ actions: ['CREATE'] # Only do this on CREATE
+
outputs:
deploy_stdout:
description: Deployment reference, used to trigger puppet apply on changes
--- /dev/null
+heat_template_version: ocata
+
+description: >
+ This is a template which will fetch the ssh host public key.
+
+parameters:
+ server:
+ description: ID of the node to apply this config to
+ type: string
+
+resources:
+ SshHostPubKeyConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ outputs:
+ - name: rsa
+ - name: ecdsa
+ - name: ed25519
+ config: |
+ #!/bin/sh -x
+ test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa
+ test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa
+ test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519
+
+ SshHostPubKeyDeployment:
+ type: OS::Heat::SoftwareDeployment
+ properties:
+ config: {get_resource: SshHostPubKeyConfig}
+ server: {get_param: server}
+
+
+outputs:
+ ecdsa:
+ description: Host ssh public key (ecdsa)
+ value: {get_attr: [SshHostPubKeyDeployment, ecdsa]}
+ rsa:
+ description: Host ssh public key (rsa)
+ value: {get_attr: [SshHostPubKeyDeployment, rsa]}
+ ed25519:
+ description: Host ssh public key (ed25519)
+ value: {get_attr: [SshHostPubKeyDeployment, ed25519]}
--- /dev/null
+heat_template_version: ocata
+description: 'SSH Known Hosts Config'
+
+parameters:
+ known_hosts:
+ type: string
+
+resources:
+
+ SSHKnownHostsConfig:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ group: script
+ inputs:
+ - name: known_hosts
+ default: {get_param: known_hosts}
+ config: |
+ #!/bin/bash
+ set -eux
+ set -o pipefail
+
+ echo "Creating ssh known hosts file"
+
+ if [ ! -z "${known_hosts}" ]; then
+ echo "${known_hosts}"
+ echo -ne "${known_hosts}" > /etc/ssh/ssh_known_hosts
+ chmod 0644 /etc/ssh/ssh_known_hosts
+ else
+ rm -f /etc/ssh/ssh_known_hosts
+ echo "No ssh known hosts"
+ fi
+
+outputs:
+ OS::stack_id:
+ description: The SSHKnownHostsConfig resource.
+ value: {get_resource: SSHKnownHostsConfig}
\ No newline at end of file
OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
+ OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
+ OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
OS::TripleO::DefaultPasswords: default_passwords.yaml
# Tasks (for internal TripleO usage)
OS::TripleO::Services::NovaLibvirt: puppet/services/nova-libvirt.yaml
OS::TripleO::Services::Ntp: puppet/services/time/ntp.yaml
OS::TripleO::Services::SwiftProxy: puppet/services/swift-proxy.yaml
+ OS::TripleO::Services::ExternalSwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: puppet/services/swift-storage.yaml
OS::TripleO::Services::SwiftRingBuilder: puppet/services/swift-ringbuilder.yaml
OS::TripleO::Services::Snmp: puppet/services/snmp.yaml
-{% set primary_role_name = roles[0].name -%}
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+ {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+ {%- set _ = primary_role.pop() -%}
+ {%- set _ = primary_role.append(role) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
heat_template_version: ocata
description: >
type: json
value: {get_attr: [EndpointMap, endpoint_map]}
+ SshKnownHostsConfig:
+ type: OS::TripleO::Ssh::KnownHostsConfig
+ properties:
+ known_hosts:
+ list_join:
+ - ''
+ {% for role in roles %}
+ - {get_attr: [{{role.name}}, known_hosts_entry]}
+ {% endfor %}
+
# Jinja loop for Role in roles_data.yaml
{% for role in roles %}
# Resources generated for {{role.name}} Role
config: {get_attr: [hostsConfig, config_id]}
servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+ {{role.name}}SshKnownHostsDeployment:
+ type: OS::Heat::StructuredDeployments
+ properties:
+ name: {{role.name}}SshKnownHostsDeployment
+ config: {get_resource: SshKnownHostsConfig}
+ servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+
{{role.name}}AllNodesDeployment:
type: OS::Heat::StructuredDeployments
depends_on:
value:
{% for role in roles %}
{{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
+ RoleNetIpMap:
+ description: Mapping of each network to a list of IPs for each role
+ value:
+{% for role in roles %}
+ {{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
{% endfor %}
-version: 1.0\r
-\r
-template: overcloud.yaml\r
-environments:\r
-- path: overcloud-resource-registry-puppet.yaml\r
+version: 1.0
+
+name: overcloud
+description: >
+ Default Deployment plan
+template: overcloud.yaml
+environments:
+ - path: overcloud-resource-registry-puppet.yaml
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: BlockStorageDeployment
+ properties:
+ server: {get_resource: BlockStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [BlockStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the block storage server
value:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: CephStorageDeployment
+ properties:
+ server: {get_resource: CephStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [CephStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the ceph storage server
value:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: NovaComputeDeployment
+ properties:
+ server: {get_resource: NovaCompute}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [NovaCompute, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
- {get_resource: NovaCompute}
+ {get_resource: NovaCompute}
\ No newline at end of file
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: ControllerDeployment
+ properties:
+ server: {get_resource: Controller}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [Controller, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the Nova compute server
value:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: SwiftStorageHieraDeploy
+ properties:
+ server: {get_resource: SwiftStorage}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [SwiftStorage, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for the swift storage server
value:
update_identifier:
get_param: UpdateIdentifier
+ SshHostPubKey:
+ type: OS::TripleO::Ssh::HostPubKey
+ depends_on: {{role}}Deployment
+ properties:
+ server: {get_resource: {{role}}}
+
outputs:
ip_address:
description: IP address of the server in the ctlplane network
MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ known_hosts_entry:
+ description: Entry for ssh known hosts
+ value:
+ str_replace:
+ template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+ params:
+ PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+ DOMAIN: {get_param: CloudDomain}
+ PRIMARYHOST: {get_attr: [{{role}}, name]}
+ EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+ EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+ INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+ INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+ STORAGEIP: {get_attr: [StoragePort, ip_address]}
+ STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+ STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+ STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+ TENANTIP: {get_attr: [TenantPort, ip_address]}
+ TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+ MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+ MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+ CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+ CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+ HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
nova_server_resource:
description: Heat resource handle for {{role}} server
value:
--- /dev/null
+heat_template_version: ocata
+
+description: >
+ External Swift Proxy endpoint configured with Puppet
+
+parameters:
+ ExternalPublicUrl:
+ description: Public endpoint url for the external swift proxy
+ type: string
+ ExternalInternalUrl:
+ description: Internal endpoint url for the external swift proxy
+ type: string
+ ExternalAdminUrl:
+ description: External endpoint url for the external swift proxy
+ type: string
+ ExternalSwiftUserTenant:
+ description: Tenant where swift user will be set as admin
+ type: string
+ default: 'service'
+ SwiftPassword:
+ description: The password for the swift service account, used by the swift proxy services.
+ type: string
+ hidden: true
+ KeystoneRegion:
+ type: string
+ default: 'regionOne'
+ description: Keystone region for endpoint
+ ServiceNetMap:
+ default: {}
+ description: Mapping of service_name -> network name. Typically set
+ via parameter_defaults in the resource registry. This
+ mapping overrides those in ServiceNetMapDefaults.
+ type: json
+ DefaultPasswords:
+ default: {}
+ type: json
+ EndpointMap:
+ default: {}
+ description: Mapping of service endpoint -> protocol. Typically set
+ via parameter_defaults in the resource registry.
+ type: json
+
+resources:
+
+outputs:
+ role_data:
+ description: Role data for External Swift proxy.
+ value:
+ service_name: external_swift_proxy
+ config_settings:
+
+ step_config:
+
+ service_config_settings:
+ keystone:
+ swift::keystone::auth::public_url: {get_param: ExternalPublicUrl}
+ swift::keystone::auth::internal_url: {get_param: ExternalInternalUrl}
+ swift::keystone::auth::admin_url: {get_param: ExternalAdminUrl}
+ swift::keystone::auth::public_url_s3: ''
+ swift::keystone::auth::internal_url_s3: ''
+ swift::keystone::auth::admin_url_s3: ''
+ swift::keystone::auth::password: {get_param: SwiftPassword}
+ swift::keystone::auth::region: {get_param: KeystoneRegion}
+ swift::keystone::auth::tenant: {get_param: ExternalSwiftUserTenant}
+ swift::keystone::auth::configure_s3_endpoint: false
+ swift::keystone::auth::operator_roles:
+ - admin
+ - swiftoperator
+ - ResellerAdmin
+
Cron to purge expired tokens - Ensure
default: 'present'
KeystoneCronTokenFlushMinute:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Minute
default: '1'
KeystoneCronTokenFlushHour:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Hour
- default: '0'
+ default: '*'
KeystoneCronTokenFlushMonthday:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Month Day
default: '*'
KeystoneCronTokenFlushMonth:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Month
default: '*'
KeystoneCronTokenFlushWeekday:
- type: string
+ type: comma_delimited_list
description: >
Cron to purge expired tokens - Week Day
default: '*'
value:
service_name: neutron_bigswitch_agent
step_config: |
- if hiera('step') >= 4 {
- include ::neutron::agents::bigswitch
- }
+ include ::tripleo::profile::base::neutron::agents::bigswitch
type: string
description: Nova Compute upgrade level
default: auto
+ MigrationSshKey:
+ type: json
+ description: >
+ SSH key for migration.
+ Expects a dictionary with keys 'public_key' and 'private_key'.
+ Values should be identical to SSH public/private key files.
+ default: {}
resources:
NovaBase:
# we manage migration in nova common puppet profile
nova::compute::libvirt::migration_support: false
tripleo::profile::base::nova::manage_migration: true
+ tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
tripleo::profile::base::nova::nova_compute_enabled: true
nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
tripleo.nova_libvirt.firewall_rules:
'200 nova_libvirt':
dport:
- - 16509
- 16514
- '49152-49215'
- '5900-5999'
--- /dev/null
+---
+features:
+ - Added support for external swift proxy. Users may need to
+ configure endpoints pointing to swift proxy service
+ already available.
--- /dev/null
+---
+features:
+ - |
+ Add support for cold migration over ssh.
+
+ This enables nova cold migration.
+
+ This also switches to SSH as the default transport for live-migration.
+ The tripleo-common mistral action that generates passwords supplies the
+ MigrationSshKey parameter that enables this.
+deprecations:
+ - |
+ The TCP transport is no longer used for live-migration and the firewall
+ port has been closed.
--- /dev/null
+---
+features:
+ - |
+ Adds tags to roles that allow an operator to specify custom tags to use
+ when trying to find functionality available from a role. Currently a role
+ with both the 'primary' and 'controller' tag is consider to be the primary
+ role. Historically the role named 'Controller' was the 'primary' role and
+ this primary designation is used to determine items like memcache ip
+ addresses. If no roles have the both the 'primary' and 'controller' tags,
+ the first role specified in the roles_data.yaml is used as the primary
+ role.
+upgrade:
+ - |
+ If using custom roles data, the logic was changed to leverage the first
+ role listed in the roles_data.yaml file to be the primary role. This can
+ be worked around by adding the 'primary' and 'controller' tags to the
+ custom controller role in your roles_data.yaml to ensure that the defined
+ custom controller role is still considered the primary role.
--- /dev/null
+---
+features:
+ - SSH host key exchange. The ssh host keys are collected from each host,
+ combined, and written to /etc/ssh/ssh_known_hosts.
--- /dev/null
+---
+fixes:
+ - The token flush cron job has been modified to run hourly instead of once
+ a day. This is because this was causing issues with larger deployments, as
+ the operation would take too long and sometimes even fail because of the
+ transaction being so large. Note that this only affects people using the
+ UUID token provider.
--- /dev/null
+---
+features:
+ - |
+ Adds a new boolean parameter for RHEL Registration called
+ 'UpdateOnRHELRegistration' that when enabled will trigger a yum update
+ on the node after the registration process completes.
--- /dev/null
+---
+features:
+ - Add name and description fields to plan-environment.yaml
# ServicesDefault: (list) optional default list of services to be deployed
# on the role, defaults to an empty list. Sets the default for the
# {{role.name}}Services parameter in overcloud.yaml
-
-- name: Controller # the 'primary' role goes first
+#
+# tags: (list) list of tags used by other parts of the deployment process to
+# find the role for a specific type of functionality. Currently a role
+# with both 'primary' and 'controller' is used as the primary role for the
+# deployment process. If no roles have have 'primary' and 'controller', the
+# first role in this file is used as the primary role.
+#
+- name: Controller
CountDefault: 1
+ tags:
+ - primary
+ - controller
ServicesDefault:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::CertmongerUser
- OS::TripleO::Services::Ec2Api
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::SwiftProxy
+ - OS::TripleO::Services::ExternalSwiftProxy
- OS::TripleO::Services::SwiftStorage
- OS::TripleO::Services::SwiftRingBuilder
- OS::TripleO::Services::Snmp
-- name: Undercloud # the 'primary' role goes first
+- name: Undercloud
CountDefault: 1
disable_constraints: True
+ tags:
+ - primary
+ - controller
ServicesDefault:
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::MySQL