Merge "Introduce Octavia implementation services"
authorJenkins <jenkins@review.openstack.org>
Tue, 7 Feb 2017 20:57:13 +0000 (20:57 +0000)
committerGerrit Code Review <review@openstack.org>
Tue, 7 Feb 2017 20:57:13 +0000 (20:57 +0000)
53 files changed:
Gemfile [deleted file]
Rakefile [deleted file]
capabilities-map.yaml
ci/common/net-config-multinode-os-net-config.yaml
ci/common/net-config-multinode.yaml
ci/environments/multinode.yaml
ci/environments/multinode_major_upgrade.yaml
ci/pingtests/tenantvm_floatingip.yaml
deployed-server/deployed-server-bootstrap-rhel.sh [new file with mode: 0644]
deployed-server/deployed-server-bootstrap-rhel.yaml [new file with mode: 0644]
docker/copy-json.py [deleted file]
docker/post.j2.yaml
docker/services/README.rst
docker/services/neutron-ovs-agent.yaml
docker/services/nova-compute.yaml
docker/services/nova-libvirt.yaml
docker/services/services.yaml
environments/cinder-dellps-config.yaml [new file with mode: 0644]
environments/cinder-eqlx-config.yaml [deleted file]
environments/deployed-server-bootstrap-environment-rhel.yaml [new file with mode: 0644]
environments/major-upgrade-all-in-one.yaml
environments/major-upgrade-composable-steps.yaml
environments/neutron-opendaylight-l3.yaml [deleted file]
extraconfig/tasks/yum_update.sh
network/endpoints/build_endpoint_map.py
network/service_net_map.j2.yaml
overcloud-resource-registry-puppet.j2.yaml
overcloud.j2.yaml
puppet/controller-role.yaml
puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml [deleted file]
puppet/major_upgrade_steps.j2.yaml
puppet/post-upgrade.j2.yaml [new file with mode: 0644]
puppet/post.j2.yaml
puppet/puppet-steps.j2 [new file with mode: 0644]
puppet/services/README.rst
puppet/services/aodh-api.yaml
puppet/services/ceilometer-api.yaml
puppet/services/ceph-osd.yaml
puppet/services/ceph-rgw.yaml
puppet/services/cinder-backend-dellps.yaml [new file with mode: 0644]
puppet/services/gnocchi-api.yaml
puppet/services/ironic-api.yaml
puppet/services/ironic-conductor.yaml
puppet/services/keystone.yaml
puppet/services/neutron-ovs-dpdk-agent.yaml
puppet/services/nova-base.yaml
puppet/services/nova-libvirt.yaml
puppet/services/pacemaker.yaml
puppet/services/swift-proxy.yaml
releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml [new file with mode: 0644]
tools/yaml-validate.py
tox.ini

diff --git a/Gemfile b/Gemfile
deleted file mode 100644 (file)
index 302ef41..0000000
--- a/Gemfile
+++ /dev/null
@@ -1,24 +0,0 @@
-source 'https://rubygems.org'
-
-group :development, :test do
-  gem 'puppetlabs_spec_helper', :require => false
-
-  gem 'puppet-lint', '~> 1.1'
-  gem 'puppet-lint-absolute_classname-check'
-  gem 'puppet-lint-absolute_template_path'
-  gem 'puppet-lint-trailing_newline-check'
-
-  # Puppet 4.x related lint checks
-  gem 'puppet-lint-unquoted_string-check'
-  gem 'puppet-lint-leading_zero-check'
-  gem 'puppet-lint-variable_contains_upcase'
-  gem 'puppet-lint-numericvariable'
-end
-
-if puppetversion = ENV['PUPPET_GEM_VERSION']
-  gem 'puppet', puppetversion, :require => false
-else
-  gem 'puppet', :require => false
-end
-
-# vim:ft=ruby
diff --git a/Rakefile b/Rakefile
deleted file mode 100644 (file)
index bca6a6c..0000000
--- a/Rakefile
+++ /dev/null
@@ -1,6 +0,0 @@
-require 'puppetlabs_spec_helper/rake_tasks'
-require 'puppet-lint/tasks/puppet-lint'
-
-PuppetLint.configuration.fail_on_warnings = true
-PuppetLint.configuration.send('disable_80chars')
-PuppetLint.configuration.send('disable_autoloader_layout')
index 08c455f..bfdedae 100644 (file)
@@ -426,10 +426,11 @@ topics:
               via puppet
             requires:
               - overcloud-resource-registry-puppet.yaml
-          - file: environments/cinder-eqlx-config.yaml
-            title: Cinder EQLX backend
+          - file: environments/cinder-dellps-config.yaml
+            title: Cinder Dell EMC PS Series backend
             description: >
-              Enables a Cinder EQLX backend, configured via puppet
+              Enables a Cinder Dell EMC PS Series backend,
+              configured via puppet
             requires:
               - overcloud-resource-registry-puppet.yaml
           - file: environments/cinder-iser.yaml
index 227c5da..8c50b64 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: 2016-10-14
+heat_template_version: ocata
 
 description: >
   Software Config to drive os-net-config for a simple bridge configured
index bf947d3..dc31235 100644 (file)
@@ -47,7 +47,9 @@ resources:
         str_replace:
           template: |
             #!/bin/bash
-            ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name
+            if ! ip addr show dev $bridge_name | grep CONTROLPLANEIP/CONTROLPLANESUBNETCIDR; then
+                ip addr add CONTROLPLANEIP/CONTROLPLANESUBNETCIDR dev $bridge_name
+            fi
           params:
             CONTROLPLANEIP: {get_param: ControlPlaneIp}
             CONTROLPLANESUBNETCIDR: {get_param: ControlPlaneSubnetCidr}
index 11243c8..212f6a2 100644 (file)
@@ -45,3 +45,4 @@ parameter_defaults:
     # Required for Centos 7.3 and Qemu 2.6.0
     nova::compute::libvirt::libvirt_cpu_mode: 'none'
   SwiftCeilometerPipelineEnabled: False
+  Debug: True
index 04a9420..56d04de 100644 (file)
@@ -1,6 +1,6 @@
 resource_registry:
-  OS::TripleO::Controller::Net::SoftwareConfig: ../heat-templates/net-config-multinode.yaml
-  OS::TripleO::Compute::Net::SoftwareConfig: ../heat-templates/net-config-multinode.yaml
+  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
   OS::TripleO::Services::Core: multinode-core.yaml
 
 parameter_defaults:
@@ -45,3 +45,4 @@ parameter_defaults:
     nova::compute::libvirt::libvirt_cpu_mode: 'none'
     heat::rpc_response_timeout: 600
   SwiftCeilometerPipelineEnabled: False
+  Debug: True
index 0f31bc1..b910d6c 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: 2013-05-23
+heat_template_version: ocata
 
 description: >
   This template resides in tripleo-ci for Mitaka CI jobs only.
diff --git a/deployed-server/deployed-server-bootstrap-rhel.sh b/deployed-server/deployed-server-bootstrap-rhel.sh
new file mode 100644 (file)
index 0000000..36ff007
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -eux
+
+yum install -y \
+    jq \
+    python-ipaddr \
+    openstack-puppet-modules \
+    os-net-config \
+    openvswitch \
+    python-heat-agent*
+
+ln -s -f /usr/share/openstack-puppet/modules/* /etc/puppet/modules
diff --git a/deployed-server/deployed-server-bootstrap-rhel.yaml b/deployed-server/deployed-server-bootstrap-rhel.yaml
new file mode 100644 (file)
index 0000000..2d2f515
--- /dev/null
@@ -0,0 +1,22 @@
+heat_template_version: ocata
+
+description: 'Deployed Server Bootstrap Config'
+
+parameters:
+
+  server:
+    type: string
+
+resources:
+
+  DeployedServerBootstrapConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: {get_file: deployed-server-bootstrap-rhel.sh}
+
+  DeployedServerBootstrapDeployment:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      config: {get_resource: DeployedServerBootstrapConfig}
+      server: {get_param: server}
diff --git a/docker/copy-json.py b/docker/copy-json.py
deleted file mode 100644 (file)
index e85ff11..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/python
-import json
-import os
-
-data = {}
-file_perms = '0600'
-libvirt_perms = '0644'
-
-libvirt_config = os.getenv('libvirt_config').split(',')
-nova_config = os.getenv('nova_config').split(',')
-neutron_openvswitch_agent_config = os.getenv('neutron_openvswitch_agent_config').split(',')
-
-# Command, Config_files, Owner, Perms
-services = {
-    'nova-libvirt': [
-        '/usr/sbin/libvirtd',
-        libvirt_config,
-        'root',
-        libvirt_perms],
-    'nova-compute': [
-        '/usr/bin/nova-compute',
-        nova_config,
-        'nova',
-        file_perms],
-    'neutron-openvswitch-agent': [
-        '/usr/bin/neutron-openvswitch-agent',
-        neutron_openvswitch_agent_config,
-        'neutron',
-        file_perms],
-    'ovs-vswitchd': [
-        '/usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock -vconsole:emer -vsyslog:err -vfile:info --mlockall --log-file=/var/log/kolla/openvswitch/ovs-vswitchd.log'],
-    'ovsdb-server': [
-        '/usr/sbin/ovsdb-server /etc/openvswitch/conf.db -vconsole:emer -vsyslog:err -vfile:info --remote=punix:/run/openvswitch/db.sock --remote=ptcp:6640:127.0.0.1 --log-file=/var/log/kolla/openvswitch/ovsdb-server.log']
-}
-
-
-def build_config_files(config, owner, perms):
-    config_source = '/var/lib/kolla/config_files/'
-    config_files_dict = {}
-    source = os.path.basename(config)
-    dest = config
-    config_files_dict.update({'source': config_source + source,
-                              'dest': dest,
-                              'owner': owner,
-                              'perm': perms})
-    return config_files_dict
-
-
-for service in services:
-    if service != 'ovs-vswitchd' and service != 'ovsdb-server':
-        command = services.get(service)[0]
-        config_files = services.get(service)[1]
-        owner = services.get(service)[2]
-        perms = services.get(service)[3]
-        config_files_list = []
-        for config_file in config_files:
-            if service == 'nova-libvirt':
-                command = command + ' --config ' + config_file
-            else:
-                command = command + ' --config-file ' + config_file
-            data['command'] = command
-            config_files_dict = build_config_files(config_file, owner, perms)
-            config_files_list.append(config_files_dict)
-        data['config_files'] = config_files_list
-    else:
-        data['command'] = services.get(service)[0]
-        data['config_files'] = []
-
-    json_config_dir = '/var/lib/etc-data/json-config/'
-    with open(json_config_dir + service + '.json', 'w') as json_file:
-        json.dump(data, json_file, sort_keys=True, indent=4,
-                  separators=(',', ': '))
index 1ba96e2..865c74e 100644 (file)
@@ -20,23 +20,6 @@ parameters:
       Setting this to a unique value will re-run any deployment tasks which
       perform configuration on a Heat stack-update.
 
-  DockerNamespace:
-    description: namespace
-    default: 'tripleoupstream'
-    type: string
-
-  LibvirtConfig:
-    type: string
-    default: "/etc/libvirt/libvirtd.conf"
-
-  NovaConfig:
-    type: string
-    default: "/etc/nova/nova.conf,/etc/nova/rootwrap.conf"
-
-  NeutronOpenvswitchAgentConfig:
-    type: string
-    default: "/etc/neutron/neutron.conf,/etc/neutron/plugins/ml2/openvswitch_agent.ini"
-
 resources:
 
 {% for role in roles %}
@@ -174,31 +157,24 @@ resources:
       servers: {get_param: [servers, {{role.name}}]}
       config: {get_resource: CopyEtcConfig}
 
-  CopyJsonConfig:
-    type: OS::Heat::SoftwareConfig
+  {{role.name}}KollaJsonConfig:
+    type: OS::Heat::StructuredConfig
+    depends_on: CopyEtcDeployment
     properties:
-      group: script
-      inputs:
-      - name: libvirt_config
-      - name: nova_config
-      - name: neutron_openvswitch_agent_config
-      config: {get_file: ../docker/copy-json.py}
+      group: json-file
+      config:
+        {get_param: [role_data, {{role.name}}, kolla_config]}
 
-  CopyJsonDeployment:
+  {{role.name}}KollaJsonDeployment:
     type: OS::Heat::SoftwareDeploymentGroup
-    depends_on: CopyEtcDeployment
     properties:
-      name: CopyJsonDeployment
-      config: {get_resource: CopyJsonConfig}
+      name: {{role.name}}KollaJsonDeployment
+      config: {get_resource: {{role.name}}KollaJsonConfig}
       servers: {get_param: [servers, {{role.name}}]}
-      input_values:
-        libvirt_config: {get_param: LibvirtConfig}
-        nova_config: {get_param: NovaConfig}
-        neutron_openvswitch_agent_config: {get_param: NeutronOpenvswitchAgentConfig}
 
   {{role.name}}ContainersConfig_Step1:
     type: OS::Heat::StructuredConfig
-    depends_on: CopyJsonDeployment
+    depends_on: {{role.name}}KollaJsonDeployment
     properties:
       group: docker-cmd
       config:
@@ -206,7 +182,7 @@ resources:
 
   {{role.name}}ContainersConfig_Step2:
     type: OS::Heat::StructuredConfig
-    depends_on: CopyJsonDeployment
+    depends_on: {{role.name}}KollaJsonDeployment
     properties:
       group: docker-cmd
       config:
index 8d1f9e8..60719bf 100644 (file)
@@ -32,6 +32,11 @@ are re-asserted when applying latter ones.
  * config_settings: Custom hiera settings for this service. These are
    used to generate configs.
 
+ * kolla_config: Contains YAML that represents how to map config files
+   into the kolla container. This config file is typically mapped into
+   the container itself at the /var/lib/kolla/config_files/config.json
+   location and drives how kolla's external config mechanisms work.
+
  * step_config: A puppet manifest that is used to step through the deployment
    sequence. Each sequence is given a "step" (via hiera('step') that provides
    information for when puppet classes should activate themselves.
index 1c9e60d..0a061f6 100644 (file)
@@ -43,6 +43,22 @@ outputs:
       config_settings: {get_attr: [NeutronOvsAgentBase, role_data, config_settings]}
       step_config: {get_attr: [NeutronOvsAgentBase, role_data, step_config]}
       puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
+      kolla_config:
+        /var/lib/etc-data/json-config/neutron-openvswitch-agent.json:
+           command: /usr/bin/neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+           config_files:
+           - dest: /etc/neutron/neutron.conf
+             owner: neutron
+             perm: '0600'
+             source: /var/lib/kolla/config_files/neutron.conf
+           - dest: /etc/neutron/plugins/ml2/openvswitch_agent.ini
+             owner: neutron
+             perm: '0600'
+             source: /var/lib/kolla/config_files/openvswitch_agent.ini
+           - dest: /etc/neutron/plugins/ml2/ml2_conf.ini
+             owner: neutron
+             perm: '0600'
+             source: /var/lib/kolla/config_files/ml2_conf.ini
       docker_config:
         step_1:
           neutronovsagent:
index c695c94..e765609 100644 (file)
@@ -41,6 +41,18 @@ outputs:
       config_settings: {get_attr: [NovaComputeBase, role_data, config_settings]}
       step_config: {get_attr: [NovaComputeBase, role_data, step_config]}
       puppet_tags: nova_config,nova_paste_api_ini
+      kolla_config:
+        /var/lib/etc-data/json-config/nova-compute.json:
+           command: /usr/bin/nova-compute --config-file /etc/nova/nova.conf --config-file /etc/nova/rootwrap.conf
+           config_files:
+           - dest: /etc/nova/nova.conf
+             owner: nova
+             perm: '0600'
+             source: /var/lib/kolla/config_files/nova.conf
+           - dest: /etc/nova/rootwrap.conf
+             owner: nova
+             perm: '0600'
+             source: /var/lib/kolla/config_files/rootwrap.conf
       docker_config:
         step_1:
           novacompute:
index a40a21f..004d624 100644 (file)
@@ -41,6 +41,14 @@ outputs:
       config_settings: {get_attr: [NovaLibvirtBase, role_data, config_settings]}
       step_config: {get_attr: [NovaLibvirtBase, role_data, step_config]}
       puppet_tags: nova_config
+      kolla_config:
+        /var/lib/etc-data/json-config/nova-libvirt.json:
+           command: /usr/sbin/libvirtd --config /etc/libvirt/libvirtd.conf
+           config_files:
+           - dest: /etc/libvirt/libvirtd.conf
+             owner: root
+             perm: '0644'
+             source: /var/lib/kolla/config_files/libvirtd.conf
       docker_config:
         step_1:
           nova_libvirt:
index 3d51eb1..8c31107 100644 (file)
@@ -68,6 +68,8 @@ outputs:
       step_config:
         {get_attr: [PuppetServices, role_data, step_config]}
       puppet_tags: {list_join: [",", {get_attr: [ServiceChain, role_data, puppet_tags]}]}
+      kolla_config:
+        map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
       docker_config:
         step_1: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_1]}}
         step_2: {map_merge: {get_attr: [ServiceChain, role_data, docker_config, step_2]}}
diff --git a/environments/cinder-dellps-config.yaml b/environments/cinder-dellps-config.yaml
new file mode 100644 (file)
index 0000000..eefd0fd
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (c) 2016-2017 Dell Inc, or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A Heat environment file which can be used to enable a
+# a Cinder Dell EMC PS Series backend, configured via puppet
+resource_registry:
+  OS::TripleO::Services::CinderBackendDellPs: ../puppet/services/cinder-backend-dellps.yaml
+
+parameter_defaults:
+  CinderEnableDellPsBackend: true
+  CinderDellPsBackendName: 'tripleo_dellps'
+  CinderDellPsSanIp: ''
+  CinderDellPsSanLogin: ''
+  CinderDellPsSanPassword: ''
+  CinderDellPsSanThinProvision: true
+  CinderDellPsGroupname: 'group-0'
+  CinderDellPsPool: 'default'
+  CinderDellPsChapLogin: ''
+  CinderDellPsChapPassword: ''
+  CinderDellPsUseChap: false
diff --git a/environments/cinder-eqlx-config.yaml b/environments/cinder-eqlx-config.yaml
deleted file mode 100644 (file)
index ca2c5e5..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-# A Heat environment file which can be used to enable a
-# a Cinder  eqlx backen, configured via puppet
-resource_registry:
-  OS::TripleO::ControllerExtraConfigPre: ../puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
-
-parameter_defaults:
-  CinderEnableEqlxBackend: true
-  CinderEqlxBackendName: 'tripleo_eqlx'
-  CinderEqlxSanIp: ''
-  CinderEqlxSanLogin: ''
-  CinderEqlxSanPassword: ''
-  CinderEqlxSanThinProvision: true
-  CinderEqlxGroupname: 'group-0'
-  CinderEqlxPool: 'default'
-  CinderEqlxChapLogin: ''
-  CinderEqlxChapPassword: ''
-  CinderEqlxUseChap: false
diff --git a/environments/deployed-server-bootstrap-environment-rhel.yaml b/environments/deployed-server-bootstrap-environment-rhel.yaml
new file mode 100644 (file)
index 0000000..f614a91
--- /dev/null
@@ -0,0 +1,7 @@
+# An environment that can be used with the deployed-server.yaml template to do
+# initial bootstrapping of the deployed servers.
+resource_registry:
+  OS::TripleO::DeployedServer::Bootstrap: ../deployed-server/deployed-server-bootstrap-rhel.yaml
+
+parameter_defaults:
+  EnablePackageInstall: True
index 69d72ed..4283b21 100644 (file)
@@ -1,8 +1,2 @@
-# We run the upgrade steps without disabling the OS::TripleO::PostDeploySteps
-# this means you can do a major upgrade in one pass, which may be useful
-# e.g for all-in-one deployments where we can upgrade the compute services
-# at the same time as the controlplane
-# Note that it will be necessary to pass a mapping of OS::Heat::None again for
-# any subsequent updates, or the upgrade steps will run again.
 resource_registry:
-  OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml
+  OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
index 7e10014..4283b21 100644 (file)
@@ -1,3 +1,2 @@
 resource_registry:
-  OS::TripleO::UpgradeSteps: ../puppet/major_upgrade_steps.yaml
-  OS::TripleO::PostDeploySteps: OS::Heat::None
+  OS::TripleO::PostDeploySteps: ../puppet/major_upgrade_steps.yaml
diff --git a/environments/neutron-opendaylight-l3.yaml b/environments/neutron-opendaylight-l3.yaml
deleted file mode 100644 (file)
index 6d5c740..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-# A Heat environment that can be used to deploy OpenDaylight with L3 DVR
-resource_registry:
-  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
-  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
-  OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
-  OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
-  OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
-  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
-
-parameter_defaults:
-  NeutronEnableForceMetadata: true
-  NeutronMechanismDrivers: 'opendaylight_v2'
-  NeutronServicePlugins: 'odl-router_v2'
-  OpenDaylightEnableL3: "'yes'"
index edcc9e8..c66dd01 100755 (executable)
 echo "Started yum_update.sh on server $deploy_server_id at `date`"
 echo -n "false" > $heat_outputs_path.update_managed_packages
 
+if [ -f /.dockerenv ]; then
+    echo "Not running due to running inside a container"
+    exit 0
+fi
+
 if [[ -z "$update_identifier" ]]; then
     echo "Not running due to unset update_identifier"
     exit 0
index 7e8088b..990cbab 100755 (executable)
@@ -280,8 +280,9 @@ def main():
     try:
         if options.check:
             if not check_up_to_date(options.output_file, options.input_file):
-                print('EndpointMap template does not match input data',
-                      file=sys.stderr)
+                print('EndpointMap template does not match input data. Please '
+                      'run the build_endpoint_map.py tool to update the '
+                      'template.', file=sys.stderr)
                 sys.exit(2)
         else:
             build_endpoint_map(options.output_file, options.input_file)
index 390b18b..b2562c7 100644 (file)
@@ -49,6 +49,7 @@ parameters:
       NovaPlacementNetwork: internal_api
       NovaMetadataNetwork: internal_api
       NovaVncProxyNetwork: internal_api
+      NovaLibvirtNetwork: internal_api
       Ec2ApiNetwork: internal_api
       Ec2ApiMetadataNetwork: internal_api
       SwiftStorageNetwork: storage_mgmt
index 1b9646f..471a0ff 100644 (file)
@@ -2,6 +2,7 @@ resource_registry:
 
   OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment
   OS::TripleO::PostDeploySteps: puppet/post.yaml
+  OS::TripleO::PostUpgradeSteps: puppet/post.yaml
   OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
   OS::TripleO::Hosts::SoftwareConfig: hosts-config.yaml
   OS::TripleO::DefaultPasswords: default_passwords.yaml
@@ -110,7 +111,6 @@ resource_registry:
 
   # Upgrade resources
   OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml
-  OS::TripleO::UpgradeSteps: OS::Heat::None
 
   # services
   OS::TripleO::Services: puppet/services/services.yaml
index f93c19a..dea748e 100644 (file)
@@ -598,9 +598,9 @@ resources:
         {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
 {% endfor %}
 
-  # Upgrade steps for all roles
-  AllNodesUpgradeSteps:
-    type: OS::TripleO::UpgradeSteps
+  # Post deployment steps for all roles
+  AllNodesDeploySteps:
+    type: OS::TripleO::PostDeploySteps
     depends_on:
 {% for role in roles %}
       - {{role.name}}AllNodesDeployment
@@ -615,20 +615,6 @@ resources:
         {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
 {% endfor %}
 
-  # Post deployment steps for all roles
-  AllNodesDeploySteps:
-    type: OS::TripleO::PostDeploySteps
-    depends_on: AllNodesUpgradeSteps
-    properties:
-      servers:
-{% for role in roles %}
-        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
-{% endfor %}
-      role_data:
-{% for role in roles %}
-        {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
-{% endfor %}
-
 outputs:
   ManagedEndpoints:
     description: Asserts that the keystone endpoints have been provisioned.
index 9e35af5..854e51e 100644 (file)
@@ -460,7 +460,6 @@ resources:
           - '"%{::osfamily}"'
           - cinder_dellsc_data # Optionally provided by ControllerExtraConfigPre
           - cinder_netapp_data # Optionally provided by ControllerExtraConfigPre
-          - cinder_eqlx_data # Optionally provided by ControllerExtraConfigPre
           - neutron_bigswitch_data # Optionally provided by ControllerExtraConfigPre
           - neutron_cisco_data # Optionally provided by ControllerExtraConfigPre
           - cisco_n1kv_data # Optionally provided by ControllerExtraConfigPre
index cb8d498..9b900bc 100644 (file)
@@ -245,7 +245,9 @@ resources:
         for map_name in mappings:
           f_name = '/root/' + map_name
           map_data = os.getenv(map_name, "Nada")
-          with open(f_name, 'a') as f:
+          with os.fdopen(os.open(f_name,
+                                 os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o644),
+                         'w') as f:
             f.write(map_data)
           if map_data is not "Nada":
             if map_name is not 'nexus_config':
@@ -260,7 +262,9 @@ resources:
             for mac in vals[1:]:
               mac2host[mac.lower()] = vals[0]
 
-        with open('/root/mac2host', 'a') as f:
+        with os.fdopen(os.open('/root/mac2host',
+                               os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o644),
+                       'w') as f:
           f.write(str(mac2host))
 
         # now we have mac to host, map host to switchport in hieradata
diff --git a/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml b/puppet/extraconfig/pre_deploy/controller/cinder-eqlx.yaml
deleted file mode 100644 (file)
index 3050904..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-heat_template_version: ocata
-
-description: Configure hieradata for Cinder Eqlx configuration
-
-parameters:
-  server:
-    description: ID of the controller node to apply this config to
-    type: string
-
-  # Config specific parameters, to be provided via parameter_defaults
-  CinderEnableEqlxBackend:
-    type: boolean
-    default: true
-  CinderEqlxBackendName:
-    type: string
-    default: 'tripleo_eqlx'
-  CinderEqlxSanIp:
-    type: string
-  CinderEqlxSanLogin:
-    type: string
-  CinderEqlxSanPassword:
-    type: string
-    hidden: true
-  CinderEqlxSanThinProvision:
-    type: boolean
-    default: true
-  CinderEqlxGroupname:
-    type: string
-    default: 'group-0'
-  CinderEqlxPool:
-    type: string
-    default: 'default'
-  CinderEqlxChapLogin:
-    type: string
-    default: ''
-  CinderEqlxChapPassword:
-    type: string
-    default: ''
-  CinderEqlxUseChap:
-    type: boolean
-    default: false
-
-resources:
-  CinderEqlxConfig:
-    type: OS::Heat::StructuredConfig
-    properties:
-      group: os-apply-config
-      config:
-        hiera:
-          datafiles:
-            cinder_eqlx_data:
-              mapped_data:
-                tripleo::profile::base::cinder::volume::cinder_enable_eqlx_backend: {get_input: EnableEqlxBackend}
-                cinder::backend::eqlx::volume_backend_name: {get_input: EqlxBackendName}
-                cinder::backend::eqlx::san_ip: {get_input: EqlxSanIp}
-                cinder::backend::eqlx::san_login: {get_input: EqlxSanLogin}
-                cinder::backend::eqlx::san_password: {get_input: EqlxSanPassword}
-                cinder::backend::eqlx::san_thin_provision: {get_input: EqlxSanThinProvision}
-                cinder::backend::eqlx::eqlx_group_name: {get_input: EqlxGroupname}
-                cinder::backend::eqlx::eqlx_pool: {get_input: EqlxPool}
-                cinder::backend::eqlx::eqlx_use_chap: {get_input: EqlxUseChap}
-                cinder::backend::eqlx::eqlx_chap_login: {get_input: EqlxChapLogin}
-                cinder::backend::eqlx::eqlx_chap_password: {get_input: EqlxChapPassword}
-
-  CinderEqlxDeployment:
-    type: OS::Heat::StructuredDeployment
-    properties:
-      config: {get_resource: CinderEqlxConfig}
-      server: {get_param: server}
-      input_values:
-        EnableEqlxBackend: {get_param: CinderEnableEqlxBackend}
-        EqlxBackendName: {get_param: CinderEqlxBackendName}
-        EqlxSanIp: {get_param: CinderEqlxSanIp}
-        EqlxSanLogin: {get_param: CinderEqlxSanLogin}
-        EqlxSanPassword: {get_param: CinderEqlxSanPassword}
-        EqlxSanThinProvision: {get_param: CinderEqlxSanThinProvision}
-        EqlxGroupname: {get_param: CinderEqlxGroupname}
-        EqlxPool: {get_param: CinderEqlxPool}
-        EqlxUseChap: {get_param: CinderEqlxUseChap}
-        EqlxChapLogin: {get_param: CinderEqlxChapLogin}
-        EqlxChapPassword: {get_param: CinderEqlxChapPassword}
-
-outputs:
-  deploy_stdout:
-    description: Deployment reference, used to trigger puppet apply on changes
-    value: {get_attr: [CinderEqlxDeployment, deploy_stdout]}
index eae8599..b879faf 100644 (file)
@@ -1,4 +1,6 @@
-{% set upgrade_steps_max = 8 -%}
+{% set enabled_roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% set batch_upgrade_steps_max = 3 -%}
+{% set upgrade_steps_max = 6 -%}
 heat_template_version: ocata
 description: 'Upgrade steps for all roles'
 
@@ -18,54 +20,53 @@ parameters:
 
 conditions:
   # Conditions to disable any steps where the task list is empty
-{% for step in range(0, upgrade_steps_max) %}
-  {% for role in roles %}
-  UpgradeBatchConfig_Step{{step}}Enabled:
+{%- for role in roles %}
+  {{role.name}}UpgradeBatchConfigEnabled:
     not:
       equals:
         - {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
         - []
-  UpgradeConfig_Step{{step}}Enabled:
+  {{role.name}}UpgradeConfigEnabled:
     not:
       equals:
         - {get_param: [role_data, {{role.name}}, upgrade_tasks]}
         - []
-  {% endfor %}
-{% endfor %}
+{%- endfor %}
 
 resources:
 
 # Upgrade Steps for all roles, batched updates
-# FIXME(shardy): would be nice to make the number of steps configurable
-{% for step in range(0, upgrade_steps_max) %}
-  {% for role in roles %}
-  # Step {{step}} resources
+# The UpgradeConfig resources could actually be created without
+# serialization, but the event output is easier to follow if we
+# do, and there should be minimal performance hit (creating the
+# config is cheap compared to the time to apply the deployment).
+{% for step in range(0, batch_upgrade_steps_max) %}
+  # Batch config resources step {{step}}
+  {%- for role in roles %}
   {{role.name}}UpgradeBatchConfig_Step{{step}}:
     type: OS::TripleO::UpgradeConfig
-    condition: UpgradeBatchConfig_Step{{step}}Enabled
-  # The UpgradeConfig resources could actually be created without
-  # serialization, but the event output is easier to follow if we
-  # do, and there should be minimal performance hit (creating the
-  # config is cheap compared to the time to apply the deployment).
-  {% if step > 0 %}
+  {%- if step > 0 %}
     depends_on:
-      {% for dep in roles %}
+      {%- for dep in enabled_roles %}
       - {{dep.name}}UpgradeBatch_Step{{step -1}}
-      {% endfor %}
-  {% endif %}
+      {%- endfor %}
+  {%- endif %}
     properties:
       UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_batch_tasks]}
       step: {{step}}
+  {%- endfor %}
 
+  # Batch deployment resources for step {{step}} (only for enabled roles)
+  {%- for role in enabled_roles %}
   {{role.name}}UpgradeBatch_Step{{step}}:
-    type: OS::Heat::StructuredDeploymentGroup
-    condition: UpgradeBatchConfig_Step{{step}}Enabled
-  {% if step > 0 %}
+    type: OS::Heat::SoftwareDeploymentGroup
+    condition: {{role.name}}UpgradeBatchConfigEnabled
+  {%- if step > 0 %}
     depends_on:
-      {% for dep in roles %}
+      {%- for dep in enabled_roles %}
       - {{dep.name}}UpgradeBatch_Step{{step -1}}
-      {% endfor %}
-  {% endif %}
+      {%- endfor %}
+  {%- endif %}
     update_policy:
       batch_create:
         max_batch_size: {{role.upgrade_batch_size|default(1)}}
@@ -78,52 +79,49 @@ resources:
       input_values:
         role: {{role.name}}
         update_identifier: {get_param: UpdateIdentifier}
-  {% endfor %}
-{% endfor %}
+  {%- endfor %}
+{%- endfor %}
 
 # Upgrade Steps for all roles
-# FIXME(shardy): would be nice to make the number of steps configurable
-{% for step in range(0, upgrade_steps_max) %}
-  {% for role in roles %}
-  # Step {{step}} resources
+{%- for step in range(0, upgrade_steps_max) %}
+  # Config resources for step {{step}}
+  {%- for role in roles %}
   {{role.name}}UpgradeConfig_Step{{step}}:
     type: OS::TripleO::UpgradeConfig
-    condition: UpgradeConfig_Step{{step}}Enabled
   # The UpgradeConfig resources could actually be created without
   # serialization, but the event output is easier to follow if we
   # do, and there should be minimal performance hit (creating the
   # config is cheap compared to the time to apply the deployment).
     depends_on:
-  {% if step > 0 %}
-      {% for dep in roles %}
-        {% if not dep.disable_upgrade_deployment|default(false) %}
+  {%- if step > 0 %}
+      {%- for dep in enabled_roles %}
       - {{dep.name}}Upgrade_Step{{step -1}}
-        {% endif %}
-      {% endfor %}
-  {% else %}
-      {% for dep in roles %}
-      - {{dep.name}}UpgradeBatch_Step{{upgrade_steps_max -1}}
-      {% endfor %}
-  {% endif %}
+      {%- endfor %}
+  {%- else %}
+      {%- for dep in enabled_roles %}
+      - {{dep.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+      {%- endfor %}
+  {%- endif %}
     properties:
       UpgradeStepConfig: {get_param: [role_data, {{role.name}}, upgrade_tasks]}
       step: {{step}}
-  {% if not role.disable_upgrade_deployment|default(false) %}
+  {%- endfor %}
+
+  # Deployment resources for step {{step}} (only for enabled roles)
+  {%- for role in enabled_roles %}
   {{role.name}}Upgrade_Step{{step}}:
-    type: OS::Heat::StructuredDeploymentGroup
-    condition: UpgradeConfig_Step{{step}}Enabled
+    type: OS::Heat::SoftwareDeploymentGroup
+    condition: {{role.name}}UpgradeConfigEnabled
     depends_on:
-  {% if step > 0 %}
-      {% for dep in roles %}
-        {% if not dep.disable_upgrade_deployment|default(false) %}
+  {%- if step > 0 %}
+      {%- for dep in enabled_roles %}
       - {{dep.name}}Upgrade_Step{{step -1}}
-        {% endif %}
-      {% endfor %}
-  {% else %}
-      {% for dep in roles %}
-      - {{dep.name}}UpgradeBatch_Step{{upgrade_steps_max -1}}
-      {% endfor %}
-  {% endif %}
+      {%- endfor %}
+  {%- else %}
+      {%- for dep in enabled_roles %}
+      - {{dep.name}}UpgradeBatch_Step{{batch_upgrade_steps_max -1}}
+      {%- endfor %}
+  {%- endif %}
     properties:
       name: {{role.name}}Upgrade_Step{{step}}
       servers: {get_param: [servers, {{role.name}}]}
@@ -131,9 +129,21 @@ resources:
       input_values:
         role: {{role.name}}
         update_identifier: {get_param: UpdateIdentifier}
-  {% endif %}
-  {% endfor %}
-{% endfor %}
+  {%- endfor %}
+{%- endfor %}
+
+  # Post upgrade deployment steps for all roles
+  # This runs the normal configuration (e.g puppet) steps unless upgrade
+  # is disabled for the role
+  AllNodesPostUpgradeSteps:
+    type: OS::TripleO::PostUpgradeSteps
+    depends_on:
+{%- for dep in enabled_roles %}
+      - {{dep.name}}Upgrade_Step{{upgrade_steps_max - 1}}
+{%- endfor %}
+    properties:
+      servers: {get_param: servers}
+      role_data: {get_param: role_data}
 
 outputs:
   # Output the config for each role, just use Step1 as the config should be
diff --git a/puppet/post-upgrade.j2.yaml b/puppet/post-upgrade.j2.yaml
new file mode 100644 (file)
index 0000000..b84039d
--- /dev/null
@@ -0,0 +1,27 @@
+heat_template_version: ocata
+
+description: >
+  Post-upgrade configuration steps via puppet for all roles
+  where upgrade is not disabled as defined in ../roles_data.yaml
+
+parameters:
+  servers:
+    type: json
+    description: Mapping of Role name e.g Controller to a list of servers
+
+  role_data:
+    type: json
+    description: Mapping of Role name e.g Controller to the per-role data
+
+  DeployIdentifier:
+    default: ''
+    type: string
+    description: >
+      Setting this to a unique value will re-run any deployment tasks which
+      perform configuration on a Heat stack-update.
+
+resources:
+# Note the include here is the same as post.j2.yaml but the data used at
+# the time of rendering is different if any roles disable upgrades
+{% set roles = roles|rejectattr('disable_upgrade_deployment')|list -%}
+{% include 'puppet-steps.j2' %}
index 83c3286..39155c3 100644 (file)
@@ -21,92 +21,4 @@ parameters:
       perform configuration on a Heat stack-update.
 
 resources:
-  # Post deployment steps for all roles
-  # A single config is re-applied with an incrementing step number
-{% for role in roles %}
-  # {{role.name}} Role post deploy steps
-  {{role.name}}ArtifactsConfig:
-    type: deploy-artifacts.yaml
-
-  {{role.name}}ArtifactsDeploy:
-    type: OS::Heat::StructuredDeployments
-    properties:
-      servers:  {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}ArtifactsConfig}
-
-  {{role.name}}PreConfig:
-    type: OS::TripleO::Tasks::{{role.name}}PreConfig
-    properties:
-      servers: {get_param: [servers, {{role.name}}]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  {{role.name}}Config:
-    type: OS::TripleO::{{role.name}}Config
-    properties:
-      StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
-
-  {% if role.name == 'Controller' %}
-  ControllerPrePuppet:
-    type: OS::TripleO::Tasks::ControllerPrePuppet
-    properties:
-      servers: {get_param: [servers, Controller]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-  {% endif %}
-
-  # Step through a series of configuration steps
-{% for step in range(1, 6) %}
-  {{role.name}}Deployment_Step{{step}}:
-    type: OS::Heat::StructuredDeploymentGroup
-  {% if step == 1 %}
-    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
-  {% else %}
-    depends_on:
-    {% for dep in roles %}
-      - {{dep.name}}Deployment_Step{{step -1}}
-    {% endfor %}
-  {% endif %}
-    properties:
-      name: {{role.name}}Deployment_Step{{step}}
-      servers: {get_param: [servers, {{role.name}}]}
-      config: {get_resource: {{role.name}}Config}
-      input_values:
-        step: {{step}}
-        update_identifier: {get_param: DeployIdentifier}
-{% endfor %}
-
-  {{role.name}}PostConfig:
-    type: OS::TripleO::Tasks::{{role.name}}PostConfig
-    depends_on:
-  {% for dep in roles %}
-      - {{dep.name}}Deployment_Step5
-  {% endfor %}
-    properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  {{role.name}}ExtraConfigPost:
-    depends_on:
-  {% for dep in roles %}
-      - {{dep.name}}PostConfig
-  {% endfor %}
-    type: OS::TripleO::NodeExtraConfigPost
-    properties:
-        servers: {get_param: [servers, {{role.name}}]}
-
-  {% if role.name == 'Controller' %}
-  ControllerPostPuppet:
-    depends_on:
-      - ControllerExtraConfigPost
-    type: OS::TripleO::Tasks::ControllerPostPuppet
-    properties:
-      servers: {get_param: [servers, Controller]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-  {% endif %}
-
-{% endfor %}
+{% include 'puppet-steps.j2' %}
diff --git a/puppet/puppet-steps.j2 b/puppet/puppet-steps.j2
new file mode 100644 (file)
index 0000000..c3b54cc
--- /dev/null
@@ -0,0 +1,88 @@
+  # Post deployment steps for all roles
+  # A single config is re-applied with an incrementing step number
+{% for role in roles %}
+  # {{role.name}} Role post-deploy steps
+  {{role.name}}ArtifactsConfig:
+    type: deploy-artifacts.yaml
+
+  {{role.name}}ArtifactsDeploy:
+    type: OS::Heat::StructuredDeployments
+    properties:
+      servers:  {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}ArtifactsConfig}
+
+  {{role.name}}PreConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PreConfig
+    properties:
+      servers: {get_param: [servers, {{role.name}}]}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}Config:
+    type: OS::TripleO::{{role.name}}Config
+    properties:
+      StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
+
+  {% if role.name == 'Controller' %}
+  ControllerPrePuppet:
+    type: OS::TripleO::Tasks::ControllerPrePuppet
+    properties:
+      servers: {get_param: [servers, Controller]}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+  {% endif %}
+
+  # Step through a series of configuration steps
+{% for step in range(1, 6) %}
+  {{role.name}}Deployment_Step{{step}}:
+    type: OS::Heat::StructuredDeploymentGroup
+  {% if step == 1 %}
+    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+  {% else %}
+    depends_on:
+    {% for dep in roles %}
+      - {{dep.name}}Deployment_Step{{step -1}}
+    {% endfor %}
+  {% endif %}
+    properties:
+      name: {{role.name}}Deployment_Step{{step}}
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}Config}
+      input_values:
+        step: {{step}}
+        update_identifier: {get_param: DeployIdentifier}
+{% endfor %}
+
+  {{role.name}}PostConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PostConfig
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step5
+  {% endfor %}
+    properties:
+      servers:  {get_param: servers}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
+  # Note, this should come last, so use depends_on to ensure
+  # this is created after any other resources.
+  {{role.name}}ExtraConfigPost:
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}PostConfig
+  {% endfor %}
+    type: OS::TripleO::NodeExtraConfigPost
+    properties:
+        servers: {get_param: [servers, {{role.name}}]}
+
+  {% if role.name == 'Controller' %}
+  ControllerPostPuppet:
+    depends_on:
+      - ControllerExtraConfigPost
+    type: OS::TripleO::Tasks::ControllerPostPuppet
+    properties:
+      servers: {get_param: [servers, Controller]}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+  {% endif %}
+{% endfor %}
index 34cb350..9c2d8c5 100644 (file)
@@ -57,10 +57,14 @@ is a list of ansible tasks to be performed during the upgrade process.
 
 Similar to the step_config, we allow a series of steps for the per-service
 upgrade sequence, defined as ansible tasks with a tag e.g "step1" for the first
-step, "step2" for the second, etc. Note that each step is performed in batches,
-then we move on to the next step which is also performed in batches (we don't
-perform all steps on one node, then move on to the next one which means you
-can sequence rolling upgrades of dependent services via the step value).
+step, "step2" for the second, etc (currently only two steps are supported, but
+more may be added when required as additional services get converted to batched
+upgrades).
+
+Note that each step is performed in batches, then we move on to the next step
+which is also performed in batches (we don't perform all steps on one node,
+then move on to the next one which means you can sequence rolling upgrades of
+dependent services via the step value).
 
 The tasks performed at each step is service specific, but note that all batch
 upgrade steps are performed before the `upgrade_tasks` described below.  This
@@ -93,9 +97,9 @@ step, "step2" for the second, etc.
 
    5) Perform any migration tasks, e.g DB sync commands
 
-   6) Start control-plane services
-
-   7) Any additional online migration tasks (e.g data migrations)
+Note that the services are not started in the upgrade tasks - we instead re-run
+puppet which does any reconfiguration required for the new version, then starts
+the services.
 
 Nova Server Metadata Settings
 -----------------------------
index 2401d76..62c4b09 100644 (file)
@@ -86,9 +86,6 @@ outputs:
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: "PreUpgrade step0,validation: Check service openstack-aodh-api is running"
-          shell: /usr/bin/systemctl show 'openstack-aodh-api' --property ActiveState | grep '\bactive\b'
-          tags: step0,validation
         - name: Stop aodh_api service (running under httpd)
           tags: step2
           service: name=httpd state=stopped
index cc0d158..741f8da 100644 (file)
@@ -93,12 +93,6 @@ outputs:
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: "PreUpgrade step0,validation: Check service openstack-ceilometer-api is running"
-          shell: /usr/bin/systemctl show 'openstack-ceilometer-api' --property ActiveState | grep '\bactive\b'
-          tags: step0,validation
         - name: Stop ceilometer_api service (running under httpd)
           tags: step2
           service: name=httpd state=stopped
-        - name: Run ceilometer upgrade
-          tags: step5
-          command: ceilometer-upgrade --config-file=/etc/ceilometer/ceilometer.conf --skip-gnocchi-resource-types
index 98f83d0..9bd83aa 100644 (file)
@@ -68,14 +68,14 @@ outputs:
           command: ceph osd set noscrub
         - name: Stop Ceph OSD
           tags: step1
-          service: name=ceph-osd@$item state=stopped
+          service: name=ceph-osd@{{ item }} state=stopped
           with_items: "{{osd_ids.stdout.strip().split()}}"
         - name: Update ceph OSD packages
           tags: step1
           yum: name=ceph-osd state=latest
         - name: Start ceph-osd service
           tags: step1
-          service: name=ceph-osd@$item state=started
+          service: name=ceph-osd@{{ item }} state=started
           with_items: "{{osd_ids.stdout.strip().split()}}"
         - name: ceph osd unset noout
           tags: step1
index 9253699..83339f2 100644 (file)
@@ -77,3 +77,14 @@ outputs:
           ceph::rgw::keystone::auth::tenant: service
           ceph::rgw::keystone::auth::user: swift
           ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
+      upgrade_tasks:
+        - name: Gather RGW instance ID
+          tags: step0
+          shell: hiera -c /etc/puppet/hiera.yaml ceph::profile::params::rgw_name radosgw.gateway
+          register: rgw_id
+        - name: Check status
+          shell: /usr/bin/systemctl show ceph-radosgw@{{rgw_id.stdout}} --property ActiveState | grep '\bactive\b'
+          tags: step0,validation
+        - name: Stop RGW instance
+          tags: step1
+          service: name=ceph-radosgw@{{rgw_id.stdout}} state=stopped
diff --git a/puppet/services/cinder-backend-dellps.yaml b/puppet/services/cinder-backend-dellps.yaml
new file mode 100644 (file)
index 0000000..1f15c53
--- /dev/null
@@ -0,0 +1,85 @@
+# Copyright (c) 2017 Dell Inc. or its subsidiaries.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+heat_template_version: ocata
+
+description: >
+  Openstack Cinder Dell EMC PS Series backend
+
+parameters:
+  CinderEnableDellPsBackend:
+    type: boolean
+    default: true
+  CinderDellPsBackendName:
+    type: string
+    default: 'tripleo_dellps'
+  CinderDellPsSanIp:
+    type: string
+  CinderDellPsSanLogin:
+    type: string
+  CinderDellPsSanPassword:
+    type: string
+    hidden: true
+  CinderDellPsSanThinProvision:
+    type: boolean
+    default: true
+  CinderDellPsGroupname:
+    type: string
+    default: 'group-0'
+  CinderDellPsPool:
+    type: string
+    default: 'default'
+  CinderDellPsChapLogin:
+    type: string
+    default: ''
+  CinderDellPsChapPassword:
+    type: string
+    default: ''
+  CinderDellPsUseChap:
+    type: boolean
+    default: false
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    type: json
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Dell EMC PS Series backend.
+    value:
+      service_name: cinder_backend_dellps
+      config_settings:
+        tripleo::profile::base::cinder::volume::cinder_enable_dellps_backend: {get_param: CinderEnableDellPsBackend}
+        cinder::backend::eqlx::volume_backend_name: {get_param: CinderDellPsBackendName}
+        cinder::backend::eqlx::san_ip: {get_param: CinderDellPsSanIp}
+        cinder::backend::eqlx::san_login: {get_param: CinderDellPsSanLogin}
+        cinder::backend::eqlx::san_password: {get_param: CinderDellPsSanPassword}
+        cinder::backend::eqlx::san_thin_provision: {get_param: CinderDellPsSanThinProvision}
+        cinder::backend::eqlx::eqlx_group_name: {get_param: CinderDellPsGroupname}
+        cinder::backend::eqlx::eqlx_pool: {get_param: CinderDellPsPool}
+        cinder::backend::eqlx::eqlx_use_chap: {get_param: CinderDellPsUseChap}
+        cinder::backend::eqlx::eqlx_chap_login: {get_param: CinderDellPsChapLogin}
+        cinder::backend::eqlx::eqlx_chap_password: {get_param: CinderDellPsChapPassword}
+      step_config: |
+        include ::tripleo::profile::base::cinder::volume
index 2aea29f..2a1ed2a 100644 (file)
@@ -103,12 +103,6 @@ outputs:
             # internal_api_subnet - > IP/CIDR
             gnocchi::wsgi::apache::bind_host: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
             gnocchi::wsgi::apache::wsgi_process_display_name: 'gnocchi_wsgi'
-            gnocchi::api::host:
-              str_replace:
-                template:
-                  "%{hiera('fqdn_$NETWORK')}"
-                params:
-                  $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
 
             gnocchi::api::keystone_auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
             gnocchi::api::keystone_identity_uri: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
@@ -134,9 +128,6 @@ outputs:
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: "PreUpgrade step0,validation: Check service openstack-gnocchi-api is running"
-          shell: /usr/bin/systemctl show 'openstack-gnocchi-api' --property ActiveState | grep '\bactive\b'
-          tags: step0,validation
         - name: Stop gnocchi_api service (running under httpd)
           tags: step2
           service: name=httpd state=stopped
index ff91eb6..bc34b73 100644 (file)
@@ -25,6 +25,10 @@ parameters:
   MonitoringSubscriptionIronicApi:
     default: 'overcloud-ironic-api'
     type: string
+  KeystoneRegion:
+    type: string
+    default: 'regionOne'
+    description: Keystone region for endpoint
 
 resources:
   IronicBase:
@@ -73,6 +77,7 @@ outputs:
           ironic::keystone::auth::auth_name: 'ironic'
           ironic::keystone::auth::password: {get_param: IronicPassword }
           ironic::keystone::auth::tenant: 'service'
+          ironic::keystone::auth::region: {get_param: KeystoneRegion}
         mysql:
           ironic::db::mysql::password: {get_param: IronicPassword}
           ironic::db::mysql::user: ironic
index a10c03a..48d8720 100644 (file)
@@ -24,6 +24,14 @@ parameters:
                  "full" for full cleaning, "metadata" to clean only disk
                  metadata (partition table).
     type: string
+  IronicCleaningNetwork:
+    default: 'provisioning'
+    description: Name or UUID of the *overcloud* network used for cleaning
+                 bare metal nodes. The default value of "provisioning" can be
+                 left during the initial deployment (when no networks are
+                 created yet) and should be changed to an actual UUID in
+                 a post-deployment stack update.
+    type: string
   IronicEnabledDrivers:
     default: ['pxe_ipmitool', 'pxe_drac', 'pxe_ilo']
     description: Enabled Ironic drivers
@@ -61,6 +69,7 @@ outputs:
           - ironic::api::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
             ironic::conductor::api_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
             ironic::conductor::cleaning_disk_erase: {get_param: IronicCleaningDiskErase}
+            ironic::conductor::cleaning_network: {get_param: IronicCleaningNetwork}
             ironic::conductor::enabled_drivers: {get_param: IronicEnabledDrivers}
             # We need an endpoint containing a real IP, not a VIP here
             ironic_conductor_http_host: {get_param: [ServiceNetMap, IronicNetwork]}
index b989d50..7da4a9c 100644 (file)
@@ -313,8 +313,5 @@ outputs:
         - name: Sync keystone DB
           tags: step5
           command: keystone-manage db_sync
-        - name: Start keystone service (running under httpd)
-          tags: step6
-          service: name=httpd state=started
       metadata_settings:
         get_attr: [ApacheServiceBase, role_data, metadata_settings]
index 5c77e35..e25bc49 100644 (file)
@@ -18,6 +18,11 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  HostCpusList:
+    description: List of cores to be used for host process
+    type: string
+    constraints:
+      - allowed_pattern: "'[0-9,-]+'"
   NeutronDpdkCoreList:
     description: List of cores to be used for DPDK Poll Mode Driver
     type: string
@@ -68,7 +73,8 @@ outputs:
           - neutron::agents::ml2::ovs::enable_dpdk: true
             neutron::agents::ml2::ovs::datapath_type: {get_param: NeutronDatapathType}
             neutron::agents::ml2::ovs::vhostuser_socket_dir: {get_param: NeutronVhostuserSocketDir}
-            vswitch::dpdk::core_list: {get_param: NeutronDpdkCoreList}
+            vswitch::dpdk::host_core_list: {get_param: HostCpusList}
+            vswitch::dpdk::pmd_core_list: {get_param: NeutronDpdkCoreList}
             vswitch::dpdk::memory_channels: {get_param: NeutronDpdkMemoryChannels}
             vswitch::dpdk::socket_mem: {get_param: NeutronDpdkSocketMemory}
             vswitch::dpdk::driver_type: {get_param: NeutronDpdkDriverType}
index c448bf4..49cba79 100644 (file)
@@ -145,8 +145,6 @@ outputs:
                 - '@'
                 - {get_param: [EndpointMap, MysqlInternal, host]}
                 - '/nova'
-                - '?bind_address='
-                - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
           nova::api_database_connection:
             list_join:
               - ''
@@ -156,8 +154,6 @@ outputs:
                 - '@'
                 - {get_param: [EndpointMap, MysqlInternal, host]}
                 - '/nova_api'
-                - '?bind_address='
-                - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
           nova::placement_database_connection:
             list_join:
               - ''
@@ -167,8 +163,6 @@ outputs:
                 - '@'
                 - {get_param: [EndpointMap, MysqlInternal, host]}
                 - '/nova_placement'
-                - '?bind_address='
-                - "%{hiera('tripleo::profile::base::database::mysql::client_bind_address')}"
           nova::debug: {get_param: Debug}
           nova::purge_config: {get_param: EnableConfigPurge}
           nova::network::neutron::neutron_project_name: 'service'
index a9b2b3f..faf1ae4 100644 (file)
@@ -62,6 +62,7 @@ outputs:
             nova::compute::libvirt::qemu::configure_qemu: true
             nova::compute::libvirt::qemu::max_files: 32768
             nova::compute::libvirt::qemu::max_processes: 131072
+            nova::compute::libvirt::vncserver_listen: {get_param: [ServiceNetMap, NovaLibvirtNetwork]}
             tripleo.nova_libvirt.firewall_rules:
               '200 nova_libvirt':
                 dport:
index a8a9fb9..c47229f 100644 (file)
@@ -39,6 +39,12 @@ parameters:
     description: The password for the 'pcsd' user for pacemaker.
     hidden: true
     default: ''
+  CorosyncSettleTries:
+    type: number
+    description: Number of tries for cluster settling. This has the
+                 same default as the pacemaker puppet module. Override
+                 to a smaller value when in need to replace a controller node.
+    default: 360
   FencingConfig:
     default: {}
     description: |
@@ -97,6 +103,7 @@ outputs:
         pacemaker::resource_defaults::defaults:
           resource-stickiness: { value: INFINITY }
         corosync_token_timeout: 10000
+        pacemaker::corosync::settle_tries: {get_param: CorosyncSettleTries}
         tripleo.pacemaker.firewall_rules:
           '130 pacemaker tcp':
             proto: 'tcp'
index 62d227a..31a4c17 100644 (file)
@@ -57,6 +57,12 @@ parameters:
     default: 5672
     description: Set rabbit subscriber port, change this if using SSL
     type: number
+  RabbitClientUseSSL:
+    default: false
+    description: >
+        Rabbit client subscriber parameter to specify
+        an SSL connection to the RabbitMQ host.
+    type: string
 
 conditions:
 
@@ -91,6 +97,7 @@ outputs:
             swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
             swift::proxy::ceilometer::nonblocking_notify: true
             tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
+            tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
             tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
             tripleo.swift_proxy.firewall_rules:
               '122 swift proxy':
diff --git a/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml b/releasenotes/notes/ironic-cleaning-network-1e06881df0402221.yaml
new file mode 100644 (file)
index 0000000..72601f9
--- /dev/null
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    New parameter "IronicCleaningNetwork" can be used to override the name
+    or UUID of the **overcloud** network Ironic uses for cleaning.
+fixes:
+  - |
+    A default value is now provided for Ironic ``cleaning_network``
+    configuration option. Not providing it on start up was deprecated since
+    Newton, and will result in a failure in the near future.
index 19e40d1..0eacbc6 100755 (executable)
@@ -66,7 +66,10 @@ def validate_mysql_connection(settings):
 
     def validate_mysql_uri(key, items):
         # Only consider a connection if it targets mysql
-        if key.endswith('connection') and \
+        # TODO(owalsh): skip nova mysql uris,temporary workaround for
+        # tripleo/+bug/1662344
+        if not key.startswith('nova') and \
+           key.endswith('connection') and \
            search(items, mysql_protocol, no_op):
             # Assume the "bind_address" option is one of
             # the token that made up the uri
@@ -94,10 +97,6 @@ def validate_mysql_connection(settings):
 
 
 def validate_service(filename, tpl):
-    if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha():
-        print('ERROR: heat_template_version needs to be the release alias not a date: %s'
-              % filename)
-        return 1
     if 'outputs' in tpl and 'role_data' in tpl['outputs']:
         if 'value' not in tpl['outputs']['role_data']:
             print('ERROR: invalid role_data for filename: %s'
@@ -135,6 +134,13 @@ def validate(filename):
     try:
         tpl = yaml.load(open(filename).read())
 
+        # The template alias version should be used instead a date, this validation
+        # will be applied to all templates not just for those in the services folder.
+        if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha():
+            print('ERROR: heat_template_version needs to be the release alias not a date: %s'
+                  % filename)
+            return 1
+
         if (filename.startswith('./puppet/services/') and
                 filename != './puppet/services/services.yaml'):
             retval = validate_service(filename, tpl)
diff --git a/tox.ini b/tox.ini
index 969f21d..3796a54 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -13,6 +13,7 @@ commands = {posargs}
 [testenv:pep8]
 commands =
     python ./tools/process-templates.py
+    python ./network/endpoints/build_endpoint_map.py --check
     python ./tools/yaml-validate.py .
 
 [testenv:templates]