Merge "Use underscore for Aodh and Gnocchi's container names"
authorJenkins <jenkins@review.openstack.org>
Wed, 19 Apr 2017 05:26:58 +0000 (05:26 +0000)
committerGerrit Code Review <review@openstack.org>
Wed, 19 Apr 2017 05:26:58 +0000 (05:26 +0000)
40 files changed:
ci/environments/multinode-3nodes.yaml
ci/environments/multinode-container-upgrade.yaml [new file with mode: 0644]
ci/environments/multinode.yaml
ci/environments/multinode_major_upgrade.yaml
ci/environments/scenario001-multinode.yaml
ci/environments/scenario002-multinode.yaml
ci/environments/scenario003-multinode.yaml
ci/environments/scenario004-multinode.yaml
docker/docker-puppet.py
docker/docker-steps.j2
docker/services/nova-compute.yaml
docker/services/nova-ironic.yaml
docker/services/zaqar.yaml
environments/swift-external.yaml [new file with mode: 0644]
extraconfig/pre_deploy/rhel-registration/rhel-registration.yaml
extraconfig/tasks/ssh/host_public_key.yaml [new file with mode: 0644]
extraconfig/tasks/ssh/known_hosts_config.yaml [new file with mode: 0644]
overcloud-resource-registry-puppet.j2.yaml
overcloud.j2.yaml
plan-environment.yaml
puppet/blockstorage-role.yaml
puppet/cephstorage-role.yaml
puppet/compute-role.yaml
puppet/controller-role.yaml
puppet/objectstorage-role.yaml
puppet/role.role.j2.yaml
puppet/services/external-swift-proxy.yaml [new file with mode: 0644]
puppet/services/keystone.yaml
puppet/services/neutron-bigswitch-agent.yaml
puppet/services/nova-compute.yaml
puppet/services/nova-libvirt.yaml
releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml [new file with mode: 0644]
releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml [new file with mode: 0644]
releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml [new file with mode: 0644]
releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml [new file with mode: 0644]
releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml [new file with mode: 0644]
releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml [new file with mode: 0644]
releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml [new file with mode: 0644]
roles_data.yaml
roles_data_undercloud.yaml

index 8307db9..56013ad 100644 (file)
@@ -24,6 +24,7 @@
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CinderApi
     - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
@@ -62,6 +63,7 @@
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CinderBackup
     - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::MySQL
     - OS::TripleO::Services::MySQLClient
diff --git a/ci/environments/multinode-container-upgrade.yaml b/ci/environments/multinode-container-upgrade.yaml
new file mode 100644 (file)
index 0000000..44a0ce7
--- /dev/null
@@ -0,0 +1,61 @@
+# NOTE: This is an environment specific for containers upgrade
+# CI. Mainly we deploy non-pacemakerized overcloud, as at the time
+# being containerization of services managed by pacemaker is not
+# complete, so we deploy and upgrade the non-HA services for now.
+
+resource_registry:
+  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
+
+parameter_defaults:
+  ControllerServices:
+    - OS::TripleO::Services::CephMon
+    - OS::TripleO::Services::CephOSD
+    - OS::TripleO::Services::CinderApi
+    - OS::TripleO::Services::CinderScheduler
+    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::Keystone
+    - OS::TripleO::Services::GlanceApi
+    - OS::TripleO::Services::HeatApi
+    - OS::TripleO::Services::HeatApiCfn
+    - OS::TripleO::Services::HeatApiCloudwatch
+    - OS::TripleO::Services::HeatEngine
+    - OS::TripleO::Services::MySQL
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::NeutronDhcpAgent
+    - OS::TripleO::Services::NeutronL3Agent
+    - OS::TripleO::Services::NeutronMetadataAgent
+    - OS::TripleO::Services::NeutronServer
+    - OS::TripleO::Services::NeutronCorePlugin
+    - OS::TripleO::Services::NeutronOvsAgent
+    - OS::TripleO::Services::RabbitMQ
+    - OS::TripleO::Services::HAproxy
+    - OS::TripleO::Services::Keepalived
+    - OS::TripleO::Services::Memcached
+    - OS::TripleO::Services::Pacemaker
+    - OS::TripleO::Services::NovaConductor
+    - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaPlacement
+    - OS::TripleO::Services::NovaMetadata
+    - OS::TripleO::Services::NovaScheduler
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::SwiftStorage
+    - OS::TripleO::Services::SwiftRingBuilder
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::NovaCompute
+    - OS::TripleO::Services::NovaLibvirt
+  ControllerExtraConfig:
+    nova::compute::libvirt::services::libvirt_virt_type: qemu
+    nova::compute::libvirt::libvirt_virt_type: qemu
+    # Required for Centos 7.3 and Qemu 2.6.0
+    nova::compute::libvirt::libvirt_cpu_mode: 'none'
+    #NOTE(gfidente): not great but we need this to deploy on ext4
+    #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
+    ceph::profile::params::osd_max_object_name_len: 256
+    ceph::profile::params::osd_max_object_namespace_len: 64
+  SwiftCeilometerPipelineEnabled: False
+  Debug: True
index c946ec8..d0d6ba9 100644 (file)
@@ -18,6 +18,7 @@ parameter_defaults:
     - OS::TripleO::Services::CinderApi
     - OS::TripleO::Services::CinderScheduler
     - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
index 2251cc0..c97080f 100644 (file)
@@ -14,6 +14,7 @@ resource_registry:
 parameter_defaults:
   ControllerServices:
     - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
index 5dd1f0f..0282c38 100644 (file)
@@ -23,6 +23,7 @@ resource_registry:
 
 parameter_defaults:
   ControllerServices:
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
index cbcfa9b..38d24ee 100644 (file)
@@ -17,6 +17,7 @@ resource_registry:
 
 parameter_defaults:
   ControllerServices:
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
index 6e926f7..5472b49 100644 (file)
@@ -17,6 +17,7 @@ resource_registry:
 
 parameter_defaults:
   ControllerServices:
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
index 7428d42..25fad4b 100644 (file)
@@ -31,6 +31,7 @@ parameter_defaults:
     - OS::TripleO::Services::CephMon
     - OS::TripleO::Services::CephOSD
     - OS::TripleO::Services::CephRgw
+    - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::GlanceApi
index 5c68b08..909a2c8 100755 (executable)
@@ -61,7 +61,10 @@ def rm_container(name):
                                stderr=subprocess.PIPE)
     cmd_stdout, cmd_stderr = subproc.communicate()
     print(cmd_stdout)
-    print(cmd_stderr)
+    if cmd_stderr and \
+            cmd_stderr != 'Error response from daemon: ' \
+            'No such container: {}\n'.format(name):
+        print(cmd_stderr)
 
 process_count = int(os.environ.get('PROCESS_COUNT',
                                    multiprocessing.cpu_count()))
index 301d838..f0af8e2 100644 (file)
@@ -1,7 +1,14 @@
 # certain initialization steps (run in a container) will occur
-# on the first role listed in the roles file
-{% set primary_role_name = roles[0].name -%}
-
+# on the role marked as primary controller or the first role listed
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+  {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+    {%- set _ = primary_role.pop() -%}
+    {%- set _ = primary_role.append(role) -%}
+  {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
 heat_template_version: ocata
 
 description: >
index eefcb36..fb286ca 100644 (file)
@@ -85,4 +85,4 @@ outputs:
       upgrade_tasks:
         - name: Stop and disable nova-compute service
           tags: step2
-          service: name=nova-compute state=stopped enabled=no
+          service: name=openstack-nova-compute state=stopped enabled=no
index 9941abd..d627042 100644 (file)
@@ -82,4 +82,4 @@ outputs:
       upgrade_tasks:
         - name: Stop and disable nova-compute service
           tags: step2
-          service: name=nova-compute state=stopped enabled=no
+          service: name=openstack-nova-compute state=stopped enabled=no
index 1160031..21aff31 100644 (file)
@@ -56,7 +56,7 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerZaqarImage} ]
       kolla_config:
         /var/lib/kolla/config_files/zaqar.json:
-          command: /usr/sbin/httpd -DFOREGROUND
+          command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf
         /var/lib/kolla/config_files/zaqar_websocket.json:
           command: /usr/bin/zaqar-server --config-file /etc/zaqar/zaqar.conf --config-file /etc/zaqar/1.conf
       docker_config:
@@ -66,13 +66,9 @@ outputs:
             net: host
             privileged: false
             restart: always
-            # NOTE(mandre) kolla image changes the user to 'zaqar', we need it
-            # to be root to run httpd
-            user: root
             volumes:
               - /var/lib/kolla/config_files/zaqar.json:/var/lib/kolla/config_files/config.json:ro
               - /var/lib/config-data/zaqar/etc/zaqar/:/etc/zaqar/:ro
-              - /var/lib/config-data/zaqar/etc/httpd:/etc/httpd/:ro
               - /etc/hosts:/etc/hosts:ro
               - /etc/localtime:/etc/localtime:ro
             environment:
@@ -92,4 +88,5 @@ outputs:
       upgrade_tasks:
         - name: Stop and disable zaqar service
           tags: step2
-          service: name=httpd state=stopped enabled=no
+          service: name=openstack-zaqar.service state=stopped enabled=no
+
diff --git a/environments/swift-external.yaml b/environments/swift-external.yaml
new file mode 100644 (file)
index 0000000..0bf0d39
--- /dev/null
@@ -0,0 +1,12 @@
+resource_registry:
+  OS::TripleO::Services::ExternalSwiftProxy: ../puppet/services/external-swift-proxy.yaml
+  OS::TripleO::Services::SwiftProxy: OS::Heat::None
+  OS::TripleO::Services::SwiftStorage: OS::Heat::None
+  OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+
+parameter_defaults:
+  ExternalPublicUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+  ExternalInternalUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+  ExternalAdminUrl: 'http://swiftproxy:9024/v1/%(tenant_id)s'
+  ExternalSwiftUserTenant: 'service'
+
index e8316c5..30a8355 100644 (file)
@@ -53,6 +53,12 @@ parameters:
     type: string
   rhel_reg_http_proxy_password:
     type: string
+  UpdateOnRHELRegistration:
+    type: boolean
+    default: false
+    description: |
+      When enabled, the system will perform a yum update after performing the
+      RHEL Registration process.
 
 resources:
 
@@ -134,6 +140,37 @@ resources:
       input_values:
         REG_METHOD: {get_param: rhel_reg_method}
 
+  YumUpdateConfigurationAfterRHELRegistration:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: |
+        #!/bin/bash
+        set -x
+        num_updates=$(yum list -q updates | wc -l)
+        if [ "$num_updates" -eq "0" ]; then
+           echo "No packages require updating"
+           exit 0
+        fi
+        full_command="yum -q -y update"
+        echo "Running: $full_command"
+        result=$($full_command)
+        return_code=$?
+        echo "$result"
+        echo "yum return code: $return_code"
+        exit $return_code
+
+  UpdateDeploymentAfterRHELRegistration:
+    type: OS::Heat::SoftwareDeployment
+    depends_on: RHELRegistrationDeployment
+    conditions:
+      update_requested: {get_param: UpdateOnRHELRegistration}
+    properties:
+      name: UpdateDeploymentAfterRHELRegistration
+      config: {get_resource: YumUpdateConfigurationAfterRHELRegistration}
+      server:  {get_param: server}
+      actions: ['CREATE'] # Only do this on CREATE
+
 outputs:
   deploy_stdout:
     description: Deployment reference, used to trigger puppet apply on changes
diff --git a/extraconfig/tasks/ssh/host_public_key.yaml b/extraconfig/tasks/ssh/host_public_key.yaml
new file mode 100644 (file)
index 0000000..847c877
--- /dev/null
@@ -0,0 +1,42 @@
+heat_template_version: ocata
+
+description: >
+  This is a template which will fetch the ssh host public key.
+
+parameters:
+  server:
+    description: ID of the node to apply this config to
+    type: string
+
+resources:
+  SshHostPubKeyConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      outputs:
+      - name: rsa
+      - name: ecdsa
+      - name: ed25519
+      config: |
+        #!/bin/sh -x
+        test -e '/etc/ssh/ssh_host_rsa_key.pub' && cat /etc/ssh/ssh_host_rsa_key.pub > $heat_outputs_path.rsa
+        test -e '/etc/ssh/ssh_host_ecdsa_key.pub' && cat /etc/ssh/ssh_host_ecdsa_key.pub > $heat_outputs_path.ecdsa
+        test -e '/etc/ssh/ssh_host_ed25519_key.pub' && cat /etc/ssh/ssh_host_ed25519_key.pub > $heat_outputs_path.ed25519
+
+  SshHostPubKeyDeployment:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      config: {get_resource: SshHostPubKeyConfig}
+      server: {get_param: server}
+
+
+outputs:
+  ecdsa:
+    description: Host ssh public key (ecdsa)
+    value:  {get_attr: [SshHostPubKeyDeployment, ecdsa]}
+  rsa:
+    description: Host ssh public key (rsa)
+    value:  {get_attr: [SshHostPubKeyDeployment, rsa]}
+  ed25519:
+    description: Host ssh public key (ed25519)
+    value:  {get_attr: [SshHostPubKeyDeployment, ed25519]}
diff --git a/extraconfig/tasks/ssh/known_hosts_config.yaml b/extraconfig/tasks/ssh/known_hosts_config.yaml
new file mode 100644 (file)
index 0000000..2ebcb63
--- /dev/null
@@ -0,0 +1,36 @@
+heat_template_version: ocata
+description: 'SSH Known Hosts Config'
+
+parameters:
+  known_hosts:
+    type: string
+
+resources:
+
+  SSHKnownHostsConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      inputs:
+        - name: known_hosts
+          default: {get_param: known_hosts}
+      config: |
+        #!/bin/bash
+        set -eux
+        set -o pipefail
+
+        echo "Creating ssh known hosts file"
+
+        if [ ! -z "${known_hosts}" ]; then
+          echo "${known_hosts}"
+          echo -ne "${known_hosts}" > /etc/ssh/ssh_known_hosts
+          chmod 0644 /etc/ssh/ssh_known_hosts
+        else
+          rm -f /etc/ssh/ssh_known_hosts
+          echo "No ssh known hosts"
+        fi
+
+outputs:
+  OS::stack_id:
+    description: The SSHKnownHostsConfig resource.
+    value: {get_resource: SSHKnownHostsConfig}
\ No newline at end of file
index b178068..ee75de6 100644 (file)
@@ -5,6 +5,8 @@ resource_registry:
   OS::TripleO::PostUpgradeSteps: puppet/post-upgrade.yaml
   OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
   OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
+  OS::TripleO::Ssh::HostPubKey: extraconfig/tasks/ssh/host_public_key.yaml
+  OS::TripleO::Ssh::KnownHostsConfig: extraconfig/tasks/ssh/known_hosts_config.yaml
   OS::TripleO::DefaultPasswords: default_passwords.yaml
 
   # Tasks (for internal TripleO usage)
@@ -189,6 +191,7 @@ resource_registry:
   OS::TripleO::Services::NovaLibvirt: puppet/services/nova-libvirt.yaml
   OS::TripleO::Services::Ntp: puppet/services/time/ntp.yaml
   OS::TripleO::Services::SwiftProxy: puppet/services/swift-proxy.yaml
+  OS::TripleO::Services::ExternalSwiftProxy: OS::Heat::None
   OS::TripleO::Services::SwiftStorage: puppet/services/swift-storage.yaml
   OS::TripleO::Services::SwiftRingBuilder: puppet/services/swift-ringbuilder.yaml
   OS::TripleO::Services::Snmp: puppet/services/snmp.yaml
index a322a44..a2d501d 100644 (file)
@@ -1,4 +1,12 @@
-{% set primary_role_name = roles[0].name -%}
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+  {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+    {%- set _ = primary_role.pop() -%}
+    {%- set _ = primary_role.append(role) -%}
+  {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# primary role is: {{primary_role_name}}
 heat_template_version: ocata
 
 description: >
@@ -254,6 +262,16 @@ resources:
       type: json
       value: {get_attr: [EndpointMap, endpoint_map]}
 
+  SshKnownHostsConfig:
+    type: OS::TripleO::Ssh::KnownHostsConfig
+    properties:
+      known_hosts:
+        list_join:
+          - ''
+          {% for role in roles %}
+          - {get_attr: [{{role.name}}, known_hosts_entry]}
+          {% endfor %}
+
   # Jinja loop for Role in roles_data.yaml
 {% for role in roles %}
   # Resources generated for {{role.name}} Role
@@ -285,6 +303,13 @@ resources:
       config: {get_attr: [hostsConfig, config_id]}
       servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
 
+  {{role.name}}SshKnownHostsDeployment:
+    type: OS::Heat::StructuredDeployments
+    properties:
+      name: {{role.name}}SshKnownHostsDeployment
+      config: {get_resource: SshKnownHostsConfig}
+      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+
   {{role.name}}AllNodesDeployment:
     type: OS::Heat::StructuredDeployments
     depends_on:
@@ -691,4 +716,10 @@ outputs:
     value:
 {% for role in roles %}
       {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
+  RoleNetIpMap:
+    description: Mapping of each network to a list of IPs for each role
+    value:
+{% for role in roles %}
+      {{role.name}}: {get_attr: [{{role.name}}IpListMap, net_ip_map]}
 {% endfor %}
index f629eff..1f9c821 100644 (file)
@@ -1,5 +1,8 @@
-version: 1.0\r
-\r
-template: overcloud.yaml\r
-environments:\r
--  path: overcloud-resource-registry-puppet.yaml\r
+version: 1.0
+
+name: overcloud
+description: >
+  Default Deployment plan
+template: overcloud.yaml
+environments:
+  - path: overcloud-resource-registry-puppet.yaml
index 51f9aba..16fb4b9 100644 (file)
@@ -457,6 +457,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: BlockStorageDeployment
+    properties:
+        server: {get_resource: BlockStorage}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -504,6 +510,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, BlockStorageHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [BlockStorage, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [BlockStorage, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the block storage server
     value:
index d7d7f47..4b02245 100644 (file)
@@ -468,6 +468,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: CephStorageDeployment
+    properties:
+        server: {get_resource: CephStorage}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -515,6 +521,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, CephStorageHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [CephStorage, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [CephStorage, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the ceph storage server
     value:
index ebdd762..37331f3 100644 (file)
@@ -492,6 +492,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: NovaComputeDeployment
+    properties:
+        server: {get_resource: NovaCompute}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -559,7 +565,38 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ComputeHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [NovaCompute, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [NovaCompute, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the Nova compute server
     value:
-      {get_resource: NovaCompute}
+      {get_resource: NovaCompute}
\ No newline at end of file
index 3d32add..68623e2 100644 (file)
@@ -531,6 +531,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: ControllerDeployment
+    properties:
+        server: {get_resource: Controller}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -598,6 +604,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ControllerHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [Controller, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [Controller, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the Nova compute server
     value:
index 6ee06d7..a329d13 100644 (file)
@@ -455,6 +455,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: SwiftStorageHieraDeploy
+    properties:
+        server: {get_resource: SwiftStorage}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -502,6 +508,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, ObjectStorageHostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [SwiftStorage, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [SwiftStorage, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for the swift storage server
     value:
index 9227b52..9cfc65a 100644 (file)
@@ -490,6 +490,12 @@ resources:
         update_identifier:
           get_param: UpdateIdentifier
 
+  SshHostPubKey:
+    type: OS::TripleO::Ssh::HostPubKey
+    depends_on: {{role}}Deployment
+    properties:
+        server: {get_resource: {{role}}}
+
 outputs:
   ip_address:
     description: IP address of the server in the ctlplane network
@@ -537,6 +543,37 @@ outputs:
           MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
           CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
           CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+  known_hosts_entry:
+    description: Entry for ssh known hosts
+    value:
+      str_replace:
+        template: "PRIMARYIP,PRIMARYHOST.DOMAIN,PRIMARYHOST,\
+EXTERNALIP,EXTERNALHOST.DOMAIN,EXTERNALHOST,\
+INTERNAL_APIIP,INTERNAL_APIHOST.DOMAIN,INTERNAL_APIHOST,\
+STORAGEIP,STORAGEHOST.DOMAIN,STORAGEHOST,\
+STORAGE_MGMTIP,STORAGE_MGMTHOST.DOMAIN,STORAGE_MGMTHOST,\
+TENANTIP,TENANTHOST.DOMAIN,TENANTHOST,\
+MANAGEMENTIP,MANAGEMENTHOST.DOMAIN,MANAGEMENTHOST,\
+CTLPLANEIP,CTLPLANEHOST.DOMAIN,CTLPLANEHOST HOSTSSHPUBKEY"
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [{{role}}, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST: {get_attr: [NetHostMap, value, external, short]}
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST: {get_attr: [NetHostMap, value, internal_api, short]}
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST: {get_attr: [NetHostMap, value, storage, short]}
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST: {get_attr: [NetHostMap, value, storage_mgmt, short]}
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST: {get_attr: [NetHostMap, value, tenant, short]}
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST: {get_attr: [NetHostMap, value, management, short]}
+          CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+          CTLPLANEHOST: {get_attr: [NetHostMap, value, ctlplane, short]}
+          HOSTSSHPUBKEY: {get_attr: [SshHostPubKey, ecdsa]}
   nova_server_resource:
     description: Heat resource handle for {{role}} server
     value:
diff --git a/puppet/services/external-swift-proxy.yaml b/puppet/services/external-swift-proxy.yaml
new file mode 100644 (file)
index 0000000..75f5b6a
--- /dev/null
@@ -0,0 +1,70 @@
+heat_template_version: ocata
+
+description: >
+  External Swift Proxy endpoint configured with Puppet
+
+parameters:
+  ExternalPublicUrl:
+    description: Public endpoint url for the external swift proxy
+    type: string
+  ExternalInternalUrl:
+    description: Internal endpoint url for the external swift proxy
+    type: string
+  ExternalAdminUrl:
+    description: External endpoint url for the external swift proxy
+    type: string
+  ExternalSwiftUserTenant:
+    description: Tenant where swift user will be set as admin
+    type: string
+    default: 'service'
+  SwiftPassword:
+    description: The password for the swift service account, used by the swift proxy services.
+    type: string
+    hidden: true
+  KeystoneRegion:
+    type: string
+    default: 'regionOne'
+    description: Keystone region for endpoint
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+
+resources:
+
+outputs:
+  role_data:
+    description: Role data for External Swift proxy.
+    value:
+      service_name: external_swift_proxy
+      config_settings:
+
+      step_config:
+
+      service_config_settings:
+        keystone:
+          swift::keystone::auth::public_url: {get_param: ExternalPublicUrl}
+          swift::keystone::auth::internal_url: {get_param: ExternalInternalUrl}
+          swift::keystone::auth::admin_url: {get_param: ExternalAdminUrl}
+          swift::keystone::auth::public_url_s3: ''
+          swift::keystone::auth::internal_url_s3: ''
+          swift::keystone::auth::admin_url_s3: ''
+          swift::keystone::auth::password: {get_param: SwiftPassword}
+          swift::keystone::auth::region: {get_param: KeystoneRegion}
+          swift::keystone::auth::tenant: {get_param: ExternalSwiftUserTenant}
+          swift::keystone::auth::configure_s3_endpoint: false
+          swift::keystone::auth::operator_roles:
+            - admin
+            - swiftoperator
+            - ResellerAdmin
+
index 632d9b0..8a0e750 100644 (file)
@@ -119,27 +119,27 @@ parameters:
         Cron to purge expired tokens - Ensure
     default: 'present'
   KeystoneCronTokenFlushMinute:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Minute
     default: '1'
   KeystoneCronTokenFlushHour:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Hour
-    default: '0'
+    default: '*'
   KeystoneCronTokenFlushMonthday:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Month Day
     default: '*'
   KeystoneCronTokenFlushMonth:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Month
     default: '*'
   KeystoneCronTokenFlushWeekday:
-    type: string
+    type: comma_delimited_list
     description: >
         Cron to purge expired tokens - Week Day
     default: '*'
index 845f0da..8f56e0a 100644 (file)
@@ -26,6 +26,4 @@ outputs:
     value:
       service_name: neutron_bigswitch_agent
       step_config: |
-        if hiera('step') >= 4 {
-          include ::neutron::agents::bigswitch
-        }
+        include ::tripleo::profile::base::neutron::agents::bigswitch
index a9737eb..b171143 100644 (file)
@@ -79,6 +79,13 @@ parameters:
     type: string
     description: Nova Compute upgrade level
     default: auto
+  MigrationSshKey:
+    type: json
+    description: >
+      SSH key for migration.
+      Expects a dictionary with keys 'public_key' and 'private_key'.
+      Values should be identical to SSH public/private key files.
+    default: {}
 
 resources:
   NovaBase:
@@ -111,6 +118,7 @@ outputs:
             # we manage migration in nova common puppet profile
             nova::compute::libvirt::migration_support: false
             tripleo::profile::base::nova::manage_migration: true
+            tripleo::profile::base::nova::migration_ssh_key: {get_param: MigrationSshKey}
             tripleo::profile::base::nova::nova_compute_enabled: true
             nova::compute::rbd::libvirt_images_rbd_pool: {get_param: NovaRbdPoolName}
             nova::compute::rbd::libvirt_rbd_user: {get_param: CephClientUserName}
index faf1ae4..b297424 100644 (file)
@@ -66,7 +66,6 @@ outputs:
             tripleo.nova_libvirt.firewall_rules:
               '200 nova_libvirt':
                 dport:
-                  - 16509
                   - 16514
                   - '49152-49215'
                   - '5900-5999'
diff --git a/releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml b/releasenotes/notes/enable-support-for-external-swift-proxy-941917f8bcc63a5d.yaml
new file mode 100644 (file)
index 0000000..83b05bb
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Added support for external swift proxy. Users may need to
+    configure endpoints pointing to swift proxy service
+    already available.
diff --git a/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml b/releasenotes/notes/migration_over_ssh-003e2a92f5f5374d.yaml
new file mode 100644 (file)
index 0000000..45ca9fe
--- /dev/null
@@ -0,0 +1,14 @@
+---
+features:
+  - |
+    Add support for cold migration over ssh.
+
+    This enables nova cold migration.
+
+    This also switches to SSH as the default transport for live-migration.
+    The tripleo-common mistral action that generates passwords supplies the
+    MigrationSshKey parameter that enables this.
+deprecations:
+  - |
+    The TCP transport is no longer used for live-migration and the firewall
+    port has been closed.
diff --git a/releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml b/releasenotes/notes/role-tags-16ac2e9e8fcab218.yaml
new file mode 100644 (file)
index 0000000..dadbfa4
--- /dev/null
@@ -0,0 +1,18 @@
+---
+features:
+  - |
+    Adds tags to roles that allow an operator to specify custom tags to use
+    when trying to find functionality available from a role. Currently a role
+    with both the 'primary' and 'controller' tag is consider to be the primary
+    role.  Historically the role named 'Controller' was the 'primary' role and
+    this primary designation is used to determine items like memcache ip
+    addresses. If no roles have the both the 'primary' and 'controller' tags,
+    the first role specified in the roles_data.yaml is used as the primary
+    role.
+upgrade:
+  - |
+    If using custom roles data, the logic was changed to leverage the first
+    role listed in the roles_data.yaml file to be the primary role. This can
+    be worked around by adding the 'primary' and 'controller' tags to the
+    custom controller role in your roles_data.yaml to ensure that the defined
+    custom controller role is still considered the primary role.
diff --git a/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml b/releasenotes/notes/ssh_known_hosts-287563590632d1aa.yaml
new file mode 100644 (file)
index 0000000..8b533b1
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - SSH host key exchange. The ssh host keys are collected from each host,
+    combined, and written to /etc/ssh/ssh_known_hosts.
diff --git a/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml b/releasenotes/notes/token-flush-twice-a-day-d4b00a2953a6b383.yaml
new file mode 100644 (file)
index 0000000..70051f6
--- /dev/null
@@ -0,0 +1,7 @@
+---
+fixes:
+  - The token flush cron job has been modified to run hourly instead of once
+    a day. This is because this was causing issues with larger deployments, as
+    the operation would take too long and sometimes even fail because of the
+    transaction being so large. Note that this only affects people using the
+    UUID token provider.
diff --git a/releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml b/releasenotes/notes/update-on-rhel-registration-afbef3ead983b08f.yaml
new file mode 100644 (file)
index 0000000..ad1f39c
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Adds a new boolean parameter for RHEL Registration called
+    'UpdateOnRHELRegistration' that when enabled will trigger a yum update
+    on the node after the registration process completes.
diff --git a/releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml b/releasenotes/notes/update-plan-environment-4e164b57a801e2cb.yaml
new file mode 100644 (file)
index 0000000..29d32cb
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add name and description fields to plan-environment.yaml
index eee6bf5..8d3b507 100644 (file)
 # ServicesDefault: (list) optional default list of services to be deployed
 # on the role, defaults to an empty list. Sets the default for the
 # {{role.name}}Services parameter in overcloud.yaml
-
-- name: Controller # the 'primary' role goes first
+#
+# tags: (list) list of tags used by other parts of the deployment process to
+# find the role for a specific type of functionality. Currently a role
+# with both 'primary' and 'controller' is used as the primary role for the
+# deployment process. If no roles have have 'primary' and 'controller', the
+# first role in this file is used as the primary role.
+#
+- name: Controller
   CountDefault: 1
+  tags:
+    - primary
+    - controller
   ServicesDefault:
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CertmongerUser
@@ -82,6 +91,7 @@
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::SwiftProxy
+    - OS::TripleO::Services::ExternalSwiftProxy
     - OS::TripleO::Services::SwiftStorage
     - OS::TripleO::Services::SwiftRingBuilder
     - OS::TripleO::Services::Snmp
index 8e83071..df2e196 100644 (file)
@@ -1,6 +1,9 @@
-- name: Undercloud # the 'primary' role goes first
+- name: Undercloud
   CountDefault: 1
   disable_constraints: True
+  tags:
+    - primary
+    - controller
   ServicesDefault:
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::MySQL