Merge "New environment file to configure containers."
authorJenkins <jenkins@review.openstack.org>
Wed, 5 Jul 2017 00:35:31 +0000 (00:35 +0000)
committerGerrit Code Review <review@openstack.org>
Wed, 5 Jul 2017 00:35:31 +0000 (00:35 +0000)
45 files changed:
ci/environments/README.rst [new file with mode: 0644]
ci/environments/scenario001-multinode-containers.yaml
docker/deploy-steps-playbook.yaml
docker/docker-puppet.py
docker/docker-steps.j2
docker/docker-toool
docker/services/ceilometer-agent-ipmi.yaml
docker/services/ceilometer-agent-notification.yaml
docker/services/ec2-api.yaml
docker/services/keystone.yaml
docker/services/neutron-api.yaml
docker/services/neutron-dhcp.yaml
docker/services/neutron-l3.yaml
docker/services/neutron-ovs-agent.yaml
docker/services/nova-api.yaml
docker/services/sahara-api.yaml
docker/services/swift-storage.yaml
environments/docker-ha.yaml [new file with mode: 0644]
environments/major-upgrade-composable-steps-docker.yaml
environments/neutron-ml2-cisco-nexus-ucsm.yaml
overcloud-resource-registry-puppet.j2.yaml
overcloud.j2.yaml
puppet/blockstorage-role.yaml
puppet/cephstorage-role.yaml
puppet/compute-role.yaml
puppet/controller-role.yaml
puppet/major_upgrade_steps.j2.yaml
puppet/post-upgrade.j2.yaml
puppet/role.role.j2.yaml
puppet/services/ceph-base.yaml
puppet/services/ceph-external.yaml
puppet/services/disabled/ceilometer-expirer-disabled.yaml
puppet/services/glance-api.yaml
puppet/services/nova-api.yaml
puppet/services/nova-conductor.yaml
puppet/services/nova-metadata.yaml
puppet/services/nova-placement.yaml
puppet/services/swift-proxy.yaml
releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml [new file with mode: 0644]
releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml [new file with mode: 0644]
roles/Controller.yaml
roles/ControllerOpenstack.yaml
roles_data.yaml
test-requirements.txt
tools/yaml-validate.py

diff --git a/ci/environments/README.rst b/ci/environments/README.rst
new file mode 100644 (file)
index 0000000..4a3cb9d
--- /dev/null
@@ -0,0 +1,4 @@
+This directory contains environments that are used in tripleo-ci.  They may change from
+release to release or within a release, and should not be relied upon in a production
+environment.  The top-level ``environments`` directory in tripleo-heat-templates
+contains the production-ready environment files.
index c142922..7c32381 100644 (file)
@@ -6,15 +6,17 @@
 resource_registry:
   OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
   OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
+  # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/
   OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml
   OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml
   OS::TripleO::Services::CephClient: ../../puppet/services/ceph-client.yaml
-  OS::TripleO::Services::PankoApi: ../../puppet/services/panko-api.yaml
-  OS::TripleO::Services::Collectd: ../../puppet/services/metrics/collectd.yaml
-  OS::TripleO::Services::Tacker: ../../puppet/services/tacker.yaml
-  OS::TripleO::Services::Congress: ../../puppet/services/congress.yaml
+  OS::TripleO::Services::PankoApi: ../../docker/services/panko-api.yaml
+  OS::TripleO::Services::Collectd: ../../docker/services/collectd.yaml
+  OS::TripleO::Services::Tacker: ../../docker/services/tacker.yaml
+  OS::TripleO::Services::Congress: ../../docker/services/congress-api.yaml
+  # TODO fluentd is being containerized: https://review.openstack.org/#/c/467072/
   OS::TripleO::Services::FluentdClient: ../../puppet/services/logging/fluentd-client.yaml
-  OS::TripleO::Services::SensuClient: ../../puppet/services/monitoring/sensu-client.yaml
+  OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
   # NOTE: This is needed because of upgrades from Ocata to Pike. We
   # deploy the initial environment with Ocata templates, and
   # overcloud-resource-registry.yaml there doesn't have this Docker
index b3cb500..87587a4 100644 (file)
@@ -17,6 +17,7 @@
       shell: python /var/lib/docker-puppet/docker-puppet.py
       environment:
         NET_HOST: 'true'
+        DEBUG: '{{docker_puppet_debug}}'
       when: step == "1"
       changed_when: false
       check_mode: no
index 1321167..4c193e4 100755 (executable)
@@ -29,9 +29,13 @@ import tempfile
 import multiprocessing
 
 log = logging.getLogger()
-log.setLevel(logging.DEBUG)
 ch = logging.StreamHandler(sys.stdout)
-ch.setLevel(logging.DEBUG)
+if os.environ.get('DEBUG', False):
+    log.setLevel(logging.DEBUG)
+    ch.setLevel(logging.DEBUG)
+else:
+    log.setLevel(logging.INFO)
+    ch.setLevel(logging.INFO)
 formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
 ch.setFormatter(formatter)
 log.addHandler(ch)
@@ -145,11 +149,11 @@ for service in (json_data or []):
     if not manifest or not config_image:
         continue
 
-    log.debug('config_volume %s' % config_volume)
-    log.debug('puppet_tags %s' % puppet_tags)
-    log.debug('manifest %s' % manifest)
-    log.debug('config_image %s' % config_image)
-    log.debug('volumes %s' % volumes)
+    log.info('config_volume %s' % config_volume)
+    log.info('puppet_tags %s' % puppet_tags)
+    log.info('manifest %s' % manifest)
+    log.info('config_image %s' % config_image)
+    log.info('volumes %s' % volumes)
     # We key off of config volume for all configs.
     if config_volume in configs:
         # Append puppet tags and manifest.
@@ -217,7 +221,7 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
 
             # Write a checksum of the config-data dir, this is used as a
             # salt to trigger container restart when the config changes
-            tar cf - /var/lib/config-data/${NAME} | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
+            tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
         fi
         """)
 
@@ -272,13 +276,17 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
         subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE, env=env)
         cmd_stdout, cmd_stderr = subproc.communicate()
-        if cmd_stdout:
-            log.debug(cmd_stdout)
-        if cmd_stderr:
-            log.debug(cmd_stderr)
         if subproc.returncode != 0:
             log.error('Failed running docker-puppet.py for %s' % config_volume)
+            if cmd_stdout:
+                log.error(cmd_stdout)
+            if cmd_stderr:
+                log.error(cmd_stderr)
         else:
+            if cmd_stdout:
+                log.debug(cmd_stdout)
+            if cmd_stderr:
+                log.debug(cmd_stderr)
             # only delete successful runs, for debugging
             rm_container('docker-puppet-%s' % config_volume)
         return subproc.returncode
index 8377202..73a3cb7 100644 (file)
@@ -38,6 +38,10 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  DockerPuppetDebug:
+    type: string
+    default: ''
+    description: Set to True to enable debug logging with docker-puppet.py
   ctlplane_service_ips:
     type: json
 
@@ -84,6 +88,7 @@ resources:
         - name: role_name
         - name: update_identifier
         - name: bootstrap_server_id
+        - name: docker_puppet_debug
       config: {get_file: deploy-steps-playbook.yaml}
 
 {%- for step in range(1, deploy_steps_max) %}
@@ -286,6 +291,7 @@ resources:
         role_name: {{role.name}}
         update_identifier: {get_param: DeployIdentifier}
         bootstrap_server_id: {get_param: [servers, {{primary_role_name}}, '0']}
+        docker_puppet_debug: {get_param: DockerPuppetDebug}
 
   {% endfor %}
   # END CONFIG STEPS
index 0b87ea9..a1ffe34 100755 (executable)
@@ -69,10 +69,15 @@ def parse_opts(argv):
                         action='store_true',
                         help="""Start docker container interactively (-ti).""",
                         default=False)
+    parser.add_argument('-d', '--detach',
+                        action='store_true',
+                        help="""Start container detached.""",
+                        default=False)
     opts = parser.parse_args(argv[1:])
 
     return opts
 
+
 def docker_arg_map(key, value):
     value = str(value).encode('ascii', 'ignore')
     if len(value) == 0:
@@ -84,12 +89,12 @@ def docker_arg_map(key, value):
         'net': "--net=%s" % value,
         'pid': "--pid=%s" % value,
         'privileged': "--privileged=%s" % value.lower(),
-        #'restart': "--restart=%s" % "false",
         'user': "--user=%s" % value,
         'volumes': "--volume=%s" % value,
         'volumes_from': "--volumes-from=%s" % value,
     }.get(key, None)
 
+
 def run_docker_container(opts, container_name):
     container_found = False
 
@@ -142,13 +147,15 @@ def run_docker_container(opts, container_name):
                             if opts.user:
                                 continue
                         arg = docker_arg_map(container_data,
-                                json_data[step][container][container_data])
+                                             json_data[step][container][container_data])
                         if arg:
                             cmd.append(arg)
 
                 if opts.user:
                     cmd.append('--user')
                     cmd.append(opts.user)
+                if opts.detach:
+                    cmd.append('--detach')
                 if opts.interactive:
                     cmd.append('-ti')
                     # May as well remove it when we're done too
@@ -167,19 +174,17 @@ def run_docker_container(opts, container_name):
     if not container_found:
         print("Container '%s' not found!" % container_name)
 
+
 def list_docker_containers(opts):
-    print opts
     with open(opts.config) as f:
         json_data = json.load(f)
 
     for step in (json_data or []):
         if step is None:
             continue
-        print step
         for container in (json_data[step] or []):
             print('\tcontainer: %s' % container)
             for container_data in (json_data[step][container] or []):
-                #print('\t\tcontainer_data: %s' % container_data)
                 if container_data == "start_order":
                     print('\t\tstart_order: %s' % json_data[step][container][container_data])
 
@@ -189,4 +194,3 @@ if opts.container:
     run_docker_container(opts, opts.container)
 else:
     list_docker_containers(opts)
-
index 977d37a..7d02939 100644 (file)
@@ -100,20 +100,6 @@ outputs:
                   - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-        step_5:
-          ceilometer_gnocchi_upgrade:
-            start_order: 1
-            image: *ceilometer_agent_ipmi_image
-            net: host
-            detach: false
-            privileged: false
-            volumes:
-              list_concat:
-                - {get_attr: [ContainersCommon, volumes]}
-                -
-                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
-                  - /var/log/containers/ceilometer:/var/log/ceilometer
-            command: "/usr/bin/bootstrap_host_exec ceilometer su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'"
       upgrade_tasks:
         - name: Stop and disable ceilometer agent ipmi service
           tags: step2
index 27ab3a9..b2e85bb 100644 (file)
@@ -100,20 +100,6 @@ outputs:
                   - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
             environment:
               - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
-        step_5:
-          ceilometer_gnocchi_upgrade:
-            start_order: 1
-            image: *ceilometer_agent_notification_image
-            net: host
-            detach: false
-            privileged: false
-            volumes:
-              list_concat:
-                - {get_attr: [ContainersCommon, volumes]}
-                -
-                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
-                  - /var/log/containers/ceilometer:/var/log/ceilometer
-            command: ["/usr/bin/ceilometer-upgrade", "--skip-metering-database"]
       upgrade_tasks:
         - name: Stop and disable ceilometer agent notification service
           tags: step2
index 0c65a90..d4cfe49 100644 (file)
@@ -13,7 +13,7 @@ parameters:
     default: 'centos-binary-ec2-api:latest'
     type: string
   DockerEc2ApiConfigImage:
-    description: The container image to use for the ec2api config_volume
+    description: The container image to use for the ec2_api config_volume
     default: 'centos-binary-ec2-api:latest'
     type: string
   EndpointMap:
@@ -64,7 +64,7 @@ outputs:
       service_config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
-        config_volume: ec2api
+        config_volume: ec2_api
         puppet_tags: ec2api_api_paste_ini,ec2api_config
         step_config: *step_config
         config_image:
index 011ffaa..b6cfa21 100644 (file)
@@ -113,6 +113,7 @@ outputs:
           keystone_db_sync:
             image: *keystone_image
             net: host
+            user: root
             privileged: false
             detach: false
             volumes: &keystone_volumes
@@ -152,6 +153,7 @@ outputs:
           keystone_bootstrap:
             start_order: 3
             action: exec
+            user: root
             command:
               [ 'keystone', '/usr/bin/bootstrap_host_exec', 'keystone' ,'keystone-manage', 'bootstrap', '--bootstrap-password', {get_param: AdminPassword} ]
       docker_puppet_tasks:
index ad8e87f..2890dec 100644 (file)
@@ -82,7 +82,7 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron_api.json:
-          command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini
+          command: /usr/bin/neutron-server --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-server
           permissions:
             - path: /var/log/neutron
               owner: neutron:neutron
index a2bd07f..460b2ee 100644 (file)
@@ -75,7 +75,7 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron_dhcp.json:
-          command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log
+          command: /usr/bin/neutron-dhcp-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp_agent.ini --log-file /var/log/neutron/dhcp-agent.log --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-dhcp-agent
           permissions:
             - path: /var/log/neutron
               owner: neutron:neutron
index f7d0d03..b692f73 100644 (file)
@@ -71,7 +71,7 @@ outputs:
             - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron_l3_agent.json:
-          command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini
+          command: /usr/bin/neutron-l3-agent --config-file /usr/share/neutron/neutron-dist.conf --config-dir /usr/share/neutron/l3_agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3_agent.ini --config-dir /etc/neutron/conf.d/common --config-dir /etc/neutron/conf.d/neutron-l3-agent
           permissions:
             - path: /var/log/neutron
               owner: neutron:neutron
index 80de2cc..27919a3 100644 (file)
@@ -71,7 +71,7 @@ outputs:
           - [ {get_param: DockerNamespace}, {get_param: DockerNeutronConfigImage} ]
       kolla_config:
         /var/lib/kolla/config_files/neutron_ovs_agent.json:
-          command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini
+          command: /usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/openvswitch_agent.ini --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-dir /etc/neutron/conf.d/common
           permissions:
             - path: /var/log/neutron
               owner: neutron:neutron
index 27be3e5..5d410fb 100644 (file)
@@ -62,6 +62,9 @@ outputs:
         map_merge:
           - get_attr: [NovaApiBase, role_data, config_settings]
           - apache::default_vhost: false
+            nova_wsgi_enabled: false
+            nova::api::service_name: '%{::nova::params::api_service_name}'
+            nova::wsgi::apache_api::ssl: false
       step_config: &step_config
         list_join:
           - "\n"
index 55c42ab..32d6458 100644 (file)
@@ -92,6 +92,7 @@ outputs:
             net: host
             privileged: false
             detach: false
+            user: root
             volumes: &sahara_volumes
               list_concat:
                 - {get_attr: [ContainersCommon, volumes]}
index 011e6d4..f2b8c47 100644 (file)
@@ -86,7 +86,7 @@ outputs:
       # BEGIN DOCKER SETTINGS
       puppet_config:
         config_volume: swift
-        puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config
+        puppet_tags: swift_config,swift_container_config,swift_container_sync_realms_config,swift_account_config,swift_object_config,swift_object_expirer_config,rsync::server
         step_config: *step_config
         config_image:
           list_join:
@@ -123,6 +123,8 @@ outputs:
             - path: /var/log/swift
               owner: swift:swift
               recurse: true
+        /var/lib/kolla/config_files/swift_xinetd_rsync.json:
+          command: /usr/sbin/xinetd -dontfork
       docker_config:
         step_3:
           # The puppet config sets this up but we don't have a way to mount the named
@@ -356,6 +358,24 @@ outputs:
                   - /dev:/dev
                   - /var/log/containers/swift:/var/log/swift
             environment: *kolla_env
+          swift_xinetd_rsync:
+            image: *swift_object_image
+            net: host
+            user: root
+            restart: always
+            privileged: true
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/swift_xinetd_rsync.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/swift/etc:/etc
+                  - /run:/run
+                  - /srv/node:/srv/node
+                  - /dev:/dev
+                  - /var/log/containers/swift:/var/log/swift
+            environment: *kolla_env
+
       host_prep_tasks:
         - name: create persistent directories
           file:
diff --git a/environments/docker-ha.yaml b/environments/docker-ha.yaml
new file mode 100644 (file)
index 0000000..442262b
--- /dev/null
@@ -0,0 +1,22 @@
+# Environment file to deploy the HA services via docker
+# Add it *after* -e docker.yaml:
+# ...deploy..-e docker.yaml -e docker-ha.yaml
+resource_registry:
+  # Pacemaker runs on the host
+  OS::TripleO::Tasks::ControllerPreConfig: ../extraconfig/tasks/pre_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostConfig: ../extraconfig/tasks/post_puppet_pacemaker.yaml
+  OS::TripleO::Tasks::ControllerPostPuppetRestart: ../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
+  OS::TripleO::Services::Pacemaker: ../puppet/services/pacemaker.yaml
+  OS::TripleO::Services::PacemakerRemote: ../puppet/services/pacemaker_remote.yaml
+
+  # Services that are disabled for HA deployments with pacemaker
+  OS::TripleO::Services::Keepalived: OS::Heat::None
+
+  # HA Containers managed by pacemaker
+  OS::TripleO::Services::CinderVolume: ../docker/services/pacemaker/cinder-volume.yaml
+  OS::TripleO::Services::CinderBackup: ../docker/services/pacemaker/cinder-backup.yaml
+  OS::TripleO::Services::Clustercheck: ../docker/services/pacemaker/clustercheck.yaml
+  OS::TripleO::Services::HAproxy: ../docker/services/pacemaker/haproxy.yaml
+  OS::TripleO::Services::MySQL: ../docker/services/pacemaker/database/mysql.yaml
+  OS::TripleO::Services::RabbitMQ: ../docker/services/pacemaker/rabbitmq.yaml
+  OS::TripleO::Services::Redis: ../docker/services/pacemaker/database/redis.yaml
index 24eedf8..20340c7 100644 (file)
@@ -9,4 +9,6 @@ parameter_defaults:
   UpgradeLevelNovaCompute: auto
   UpgradeInitCommonCommand: |
     #!/bin/bash
+    set -eu
     # Ocata to Pike, put any needed host-level workarounds here
+    yum install -y ansible-pacemaker
index f5a0a39..2c87470 100644 (file)
@@ -2,7 +2,6 @@
 # a Cisco Neutron plugin.
 resource_registry:
   OS::TripleO::AllNodesExtraConfig: ../puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
-  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
 
 parameter_defaults:
index b1a3529..51cc85d 100644 (file)
@@ -268,6 +268,7 @@ resource_registry:
   OS::TripleO::Services::Docker: OS::Heat::None
   OS::TripleO::Services::CertmongerUser: OS::Heat::None
   OS::TripleO::Services::Iscsid: OS::Heat::None
+  OS::TripleO::Services::Clustercheck: OS::Heat::None
 
 parameter_defaults:
   EnablePackageInstall: false
index 1848e09..56a10a5 100644 (file)
@@ -462,6 +462,21 @@ resources:
             servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
 {% endfor %}
 
+  # This is a different format to *Servers, as it creates a map of lists
+  # whereas *Servers creates a map of maps with keys of the nested resource names
+  ServerIdMap:
+    type: OS::Heat::Value
+    properties:
+      value:
+        server_ids:
+{% for role in roles %}
+          {{role.name}}: {get_attr: [{{role.name}}, nova_server_resource]}
+{% endfor %}
+        bootstrap_server_id:
+          yaql:
+            expression: coalesce($.data, []).first(null)
+            data: {get_attr: [{{primary_role_name}}, nova_server_resource]}
+
   # This resource just creates a dict out of the DeploymentServerBlacklist,
   # which is a list. The dict is used in the role templates to set a condition
   # on whether to create the deployment resources. We can't use the list
@@ -825,3 +840,6 @@ outputs:
       map_merge:
         - {get_attr: [VipMap, net_ip_map]}
         - redis: {get_attr: [RedisVirtualIP, ip_address]}
+  ServerIdData:
+    description: Mapping of each role to a list of nova server IDs and the bootstrap ID
+    value: {get_attr: [ServerIdMap, value]}
index 612a4a0..f28f606 100644 (file)
@@ -142,6 +142,7 @@ parameters:
   RoleParameters:
     type: json
     description: Role Specific Parameters
+    default: {}
   DeploymentSwiftDataMap:
     type: json
     description: |
index e7afcb4..85b276d 100644 (file)
@@ -148,6 +148,7 @@ parameters:
   RoleParameters:
     type: json
     description: Role Specific Parameters
+    default: {}
   DeploymentSwiftDataMap:
     type: json
     description: |
index 5a662e8..10d082c 100644 (file)
@@ -160,6 +160,7 @@ parameters:
   RoleParameters:
     type: json
     description: Role Specific Parameters
+    default: {}
   DeploymentSwiftDataMap:
     type: json
     description: |
index 09e5b2b..ca08c65 100644 (file)
@@ -178,6 +178,7 @@ parameters:
   RoleParameters:
     type: json
     description: Role Specific Parameters
+    default: {}
   DeploymentSwiftDataMap:
     type: json
     description: |
index b9e0860..574c41b 100644 (file)
@@ -14,7 +14,8 @@ parameters:
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
-
+  ctlplane_service_ips:
+    type: json
   UpdateIdentifier:
     type: string
     description: >
@@ -210,6 +211,7 @@ resources:
       servers: {get_param: servers}
       stack_name: {get_param: stack_name}
       role_data: {get_param: role_data}
+      ctlplane_service_ips: {get_param: ctlplane_service_ips}
 
 outputs:
   # Output the config for each role, just use Step1 as the config should be
index c51b6e1..bdd1e61 100644 (file)
@@ -8,17 +8,20 @@ parameters:
   servers:
     type: json
     description: Mapping of Role name e.g Controller to a list of servers
-
+  stack_name:
+    type: string
+    description: Name of the topmost stack
   role_data:
     type: json
     description: Mapping of Role name e.g Controller to the per-role data
-
   DeployIdentifier:
     default: ''
     type: string
     description: >
       Setting this to a unique value will re-run any deployment tasks which
       perform configuration on a Heat stack-update.
+  ctlplane_service_ips:
+    type: json
 
 resources:
 # Note the include here is the same as post.j2.yaml but the data used at
index b45736c..5aac089 100644 (file)
@@ -168,6 +168,7 @@ parameters:
   RoleParameters:
     type: json
     description: Role Specific Parameters
+    default: {}
   DeploymentSwiftDataMap:
     type: json
     description: |
index e12c55e..48d9599 100644 (file)
@@ -29,20 +29,9 @@ parameters:
   GlanceRbdPoolName:
     default: images
     type: string
-  GlanceBackend:
-    default: swift
-    description: The short name of the Glance backend to use. Should be one
-      of swift, rbd, or file
-    type: string
-    constraints:
-    - allowed_values: ['swift', 'file', 'rbd']
   GnocchiRbdPoolName:
     default: metrics
     type: string
-  NovaEnableRbdBackend:
-    default: false
-    description: Whether to enable or not the Rbd backend for Nova
-    type: boolean
   NovaRbdPoolName:
     default: vms
     type: string
@@ -82,16 +71,6 @@ parameter_groups:
   parameters:
   - ControllerEnableCephStorage
 
-conditions:
-  glance_multiple_locations:
-    and:
-    - equals:
-      - get_param: GlanceBackend
-      - rbd
-    - equals:
-      - get_param: NovaEnableRbdBackend
-      - true
-
 outputs:
   role_data:
     description: Role data for the Ceph base service.
@@ -153,6 +132,3 @@ outputs:
             - keys:
                 CEPH_CLIENT_KEY:
                   list_join: ['.', ['client', {get_param: CephClientUserName}]]
-      service_config_settings:
-        glance_api:
-          glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
index 599532c..65e6ea8 100644 (file)
@@ -27,20 +27,9 @@ parameters:
   GlanceRbdPoolName:
     default: images
     type: string
-  GlanceBackend:
-    default: swift
-    description: The short name of the Glance backend to use. Should be one
-      of swift, rbd, or file
-    type: string
-    constraints:
-    - allowed_values: ['swift', 'file', 'rbd']
   GnocchiRbdPoolName:
     default: metrics
     type: string
-  NovaEnableRbdBackend:
-    default: false
-    description: Whether to enable or not the Rbd backend for Nova
-    type: boolean
   NovaRbdPoolName:
     default: vms
     type: string
@@ -76,16 +65,6 @@ parameters:
                  clients using older Ceph servers.
     type: string
 
-conditions:
-  glance_multiple_locations:
-    and:
-    - equals:
-      - get_param: GlanceBackend
-      - rbd
-    - equals:
-      - get_param: NovaEnableRbdBackend
-      - true
-
 outputs:
   role_data:
     description: Role data for the Ceph External service.
@@ -122,8 +101,5 @@ outputs:
           - ceph-base
           - ceph-mon
           - ceph-osd
-      service_config_settings:
-        glance_api:
-          glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
       step_config: |
         include ::tripleo::profile::base::ceph::client
index 7be394b..7110afa 100644 (file)
@@ -36,3 +36,6 @@ outputs:
         - name: Remove ceilometer expirer cron tab on upgrade
           tags: step1
           shell: '/usr/bin/crontab -u ceilometer -r'
+          register: remove_ceilometer_expirer_crontab
+          failed_when: remove_ceilometer_expirer_crontab.rc != 0 and remove_ceilometer_expirer_crontab.stderr != "no crontab for ceilometer"
+          changed_when: remove_ceilometer_expirer_crontab.stderr != "no crontab for ceilometer"
index 7812c8e..a3d5a79 100644 (file)
@@ -96,6 +96,10 @@ parameters:
   GlanceRbdPoolName:
     default: images
     type: string
+  NovaEnableRbdBackend:
+    default: false
+    description: Whether to enable or not the Rbd backend for Nova
+    type: boolean
   RabbitPassword:
     description: The password for RabbitMQ
     type: string
@@ -129,6 +133,14 @@ conditions:
   use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
   glance_workers_unset: {equals : [{get_param: GlanceWorkers}, '']}
   service_debug_unset: {equals : [{get_param: GlanceDebug}, '']}
+  glance_multiple_locations:
+    and:
+    - equals:
+      - get_param: GlanceBackend
+      - rbd
+    - equals:
+      - get_param: NovaEnableRbdBackend
+      - true
 
 resources:
 
@@ -187,6 +199,7 @@ outputs:
             glance::keystone::authtoken::project_domain_name: 'Default'
             glance::api::pipeline: 'keystone'
             glance::api::show_image_direct_url: true
+            glance::api::show_multiple_locations: {if: [glance_multiple_locations, true, false]}
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
             # (eg. for internal_api):
index fe2f294..4ce5316 100644 (file)
@@ -28,7 +28,7 @@ parameters:
     type: json
   NovaWorkers:
     default: 0
-    description: Number of workers for Nova API service.
+    description: Number of workers for Nova services.
     type: number
   NovaPassword:
     description: The password for the nova service and db account, used by nova-api.
@@ -81,17 +81,15 @@ conditions:
   nova_workers_zero: {equals : [{get_param: NovaWorkers}, 0]}
 
 resources:
-  # Temporarily disable Nova API deployed in WSGI
-  # https://bugs.launchpad.net/nova/+bug/1661360
-  # ApacheServiceBase:
-  #   type: ./apache.yaml
-  #   properties:
-  #     ServiceNetMap: {get_param: ServiceNetMap}
-  #     DefaultPasswords: {get_param: DefaultPasswords}
-  #     EndpointMap: {get_param: EndpointMap}
-  #     RoleName: {get_param: RoleName}
-  #     RoleParameters: {get_param: RoleParameters}
-  #     EnableInternalTLS: {get_param: EnableInternalTLS}
+  ApacheServiceBase:
+    type: ./apache.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+      EnableInternalTLS: {get_param: EnableInternalTLS}
 
   NovaBase:
     type: ./nova-base.yaml
@@ -114,9 +112,7 @@ outputs:
       config_settings:
         map_merge:
         - get_attr: [NovaBase, role_data, config_settings]
-        # Temporarily disable Nova API deployed in WSGI
-        # https://bugs.launchpad.net/nova/+bug/1661360
-        # - get_attr: [ApacheServiceBase, role_data, config_settings]
+        - get_attr: [ApacheServiceBase, role_data, config_settings]
         - nova::cron::archive_deleted_rows::hour: '*/12'
           nova::cron::archive_deleted_rows::destination: '/dev/null'
           tripleo.nova_api.firewall_rules:
@@ -143,23 +139,21 @@ outputs:
                 "%{hiera('fqdn_$NETWORK')}"
               params:
                 $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
-          # Temporarily disable Nova API deployed in WSGI
-          # https://bugs.launchpad.net/nova/+bug/1661360
-          nova_wsgi_enabled: false
-          # nova::api::service_name: 'httpd'
-          # nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
+          nova_wsgi_enabled: true
+          nova::api::service_name: 'httpd'
+          nova::wsgi::apache_api::ssl: {get_param: EnableInternalTLS}
           # NOTE: bind IP is found in Heat replacing the network name with the local node IP
           # for the given network; replacement examples (eg. for internal_api):
           # internal_api -> IP
           # internal_api_uri -> [IP]
           # internal_api_subnet - > IP/CIDR
-          nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
-          nova::wsgi::apache_api::servername:
-            str_replace:
-              template:
-                "%{hiera('fqdn_$NETWORK')}"
-              params:
-                $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
+          nova::wsgi::apache_api::bind_host: {get_param: [ServiceNetMap, NovaApiNetwork]}
+          nova::wsgi::apache_api::servername:
+            str_replace:
+              template:
+                "%{hiera('fqdn_$NETWORK')}"
+              params:
+                $NETWORK: {get_param: [ServiceNetMap, NovaApiNetwork]}
           nova::api::neutron_metadata_proxy_shared_secret: {get_param: NeutronMetadataProxySharedSecret}
           nova::api::instance_name_template: {get_param: InstanceNameTemplate}
           nova_enable_db_purge: {get_param: NovaEnableDBPurge}
@@ -169,9 +163,7 @@ outputs:
           - nova_workers_zero
           - {}
           - nova::api::osapi_compute_workers: {get_param: NovaWorkers}
-          # Temporarily disable Nova API deployed in WSGI
-          # https://bugs.launchpad.net/nova/+bug/1661360
-          # nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
+            nova::wsgi::apache_api::workers: {get_param: NovaWorkers}
       step_config: |
         include tripleo::profile::base::nova::api
       service_config_settings:
@@ -199,87 +191,91 @@ outputs:
           nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
           nova::keystone::auth::password: {get_param: NovaPassword}
           nova::keystone::auth::region: {get_param: KeystoneRegion}
-      # Temporarily disable Nova API deployed in WSGI
-      # https://bugs.launchpad.net/nova/+bug/1661360
-      # metadata_settings:
-      #   get_attr: [ApacheServiceBase, role_data, metadata_settings]
+      metadata_settings:
+        get_attr: [ApacheServiceBase, role_data, metadata_settings]
       upgrade_tasks:
-        - name: get bootstrap nodeid
-          tags: common
-          command: hiera bootstrap_nodeid
-          register: bootstrap_node
-        - name: set is_bootstrap_node fact
-          tags: common
-          set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
-        - name: Extra migration for nova tripleo/+bug/1656791
-          tags: step0,pre-upgrade
-          when: is_bootstrap_node
-          command: nova-manage db online_data_migrations
-        - name: Stop and disable nova_api service (pre-upgrade not under httpd)
-          tags: step2
-          service: name=openstack-nova-api state=stopped enabled=no
-        - name: Create puppet manifest to set transport_url in nova.conf
-          tags: step5
-          when: is_bootstrap_node
-          copy:
-            dest: /root/nova-api_upgrade_manifest.pp
-            mode: 0600
-            content: >
-              $transport_url = os_transport_url({
-                'transport' => hiera('messaging_service_name', 'rabbit'),
-                'hosts'     => any2array(hiera('rabbitmq_node_names', undef)),
-                'port'      => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
-                'username'  => hiera('nova::rabbit_userid', 'guest'),
-                'password'  => hiera('nova::rabbit_password'),
-                'ssl'       => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
-              })
-              oslo::messaging::default { 'nova_config':
-                transport_url => $transport_url
-              }
-        - name: Run puppet apply to set tranport_url in nova.conf
-          tags: step5
-          when: is_bootstrap_node
-          command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
-          register: puppet_apply_nova_api_upgrade
-          failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
-          changed_when: puppet_apply_nova_api_upgrade.rc == 2
-        - name: Setup cell_v2 (map cell0)
-          tags: step5
-          when: is_bootstrap_node
-          shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
-        - name: Setup cell_v2 (create default cell)
-          tags: step5
-          when: is_bootstrap_node
-          # (owalsh) puppet-nova expects the cell name 'default'
-          # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
-          shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
-          register: nova_api_create_cell
-          failed_when: nova_api_create_cell.rc not in [0,2]
-          changed_when: nova_api_create_cell.rc == 0
-        - name: Setup cell_v2 (sync nova/cell DB)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage db sync
-          async: {get_param: NovaDbSyncTimeout}
-          poll: 10
-        - name: Setup cell_v2 (get cell uuid)
-          tags: step5
-          when: is_bootstrap_node
-          shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
-          register: nova_api_cell_uuid
-        - name: Setup cell_v2 (migrate hosts)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
-        - name: Setup cell_v2 (migrate instances)
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
-        - name: Sync nova_api DB
-          tags: step5
-          command: nova-manage api_db sync
-          when: is_bootstrap_node
-        - name: Online data migration for nova
-          tags: step5
-          when: is_bootstrap_node
-          command: nova-manage db online_data_migrations
+        yaql:
+          expression: $.data.apache_upgrade + $.data.nova_api_upgrade
+          data:
+            apache_upgrade:
+              get_attr: [ApacheServiceBase, role_data, upgrade_tasks]
+            nova_api_upgrade:
+              - name: get bootstrap nodeid
+                tags: common
+                command: hiera bootstrap_nodeid
+                register: bootstrap_node
+              - name: set is_bootstrap_node fact
+                tags: common
+                set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
+              - name: Extra migration for nova tripleo/+bug/1656791
+                tags: step0,pre-upgrade
+                when: is_bootstrap_node
+                command: nova-manage db online_data_migrations
+              - name: Stop and disable nova_api service (pre-upgrade not under httpd)
+                tags: step2
+                service: name=openstack-nova-api state=stopped enabled=no
+              - name: Create puppet manifest to set transport_url in nova.conf
+                tags: step5
+                when: is_bootstrap_node
+                copy:
+                  dest: /root/nova-api_upgrade_manifest.pp
+                  mode: 0600
+                  content: >
+                    $transport_url = os_transport_url({
+                      'transport' => hiera('messaging_service_name', 'rabbit'),
+                      'hosts'     => any2array(hiera('rabbitmq_node_names', undef)),
+                      'port'      => sprintf('%s',hiera('nova::rabbit_port', '5672') ),
+                      'username'  => hiera('nova::rabbit_userid', 'guest'),
+                      'password'  => hiera('nova::rabbit_password'),
+                      'ssl'       => sprintf('%s', bool2num(str2bool(hiera('nova::rabbit_use_ssl', '0'))))
+                    })
+                    oslo::messaging::default { 'nova_config':
+                      transport_url => $transport_url
+                    }
+              - name: Run puppet apply to set tranport_url in nova.conf
+                tags: step5
+                when: is_bootstrap_node
+                command: puppet apply --modulepath /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules --detailed-exitcodes /root/nova-api_upgrade_manifest.pp
+                register: puppet_apply_nova_api_upgrade
+                failed_when: puppet_apply_nova_api_upgrade.rc not in [0,2]
+                changed_when: puppet_apply_nova_api_upgrade.rc == 2
+              - name: Setup cell_v2 (map cell0)
+                tags: step5
+                when: is_bootstrap_node
+                shell: nova-manage cell_v2 map_cell0 --database_connection=$(hiera nova::cell0_database_connection)
+              - name: Setup cell_v2 (create default cell)
+                tags: step5
+                when: is_bootstrap_node
+                # (owalsh) puppet-nova expects the cell name 'default'
+                # (owalsh) pass the db uri explicitly to avoid https://bugs.launchpad.net/tripleo/+bug/1662344
+                shell: nova-manage cell_v2 create_cell --name='default' --database_connection=$(hiera nova::database_connection)
+                register: nova_api_create_cell
+                failed_when: nova_api_create_cell.rc not in [0,2]
+                changed_when: nova_api_create_cell.rc == 0
+              - name: Setup cell_v2 (sync nova/cell DB)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage db sync
+                async: {get_param: NovaDbSyncTimeout}
+                poll: 10
+              - name: Setup cell_v2 (get cell uuid)
+                tags: step5
+                when: is_bootstrap_node
+                shell: nova-manage cell_v2 list_cells | sed -e '1,3d' -e '$d' | awk -F ' *| *' '$2 == "default" {print $4}'
+                register: nova_api_cell_uuid
+              - name: Setup cell_v2 (migrate hosts)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage cell_v2 discover_hosts --cell_uuid {{nova_api_cell_uuid.stdout}} --verbose
+              - name: Setup cell_v2 (migrate instances)
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage cell_v2 map_instances --cell_uuid {{nova_api_cell_uuid.stdout}}
+              - name: Sync nova_api DB
+                tags: step5
+                command: nova-manage api_db sync
+                when: is_bootstrap_node
+              - name: Online data migration for nova
+                tags: step5
+                when: is_bootstrap_node
+                command: nova-manage db online_data_migrations
index 30eb127..b83b985 100644 (file)
@@ -28,7 +28,7 @@ parameters:
     type: json
   NovaWorkers:
     default: 0
-    description: Number of workers for Nova Conductor service.
+    description: Number of workers for Nova services.
     type: number
   MonitoringSubscriptionNovaConductor:
     default: 'overcloud-nova-conductor'
index 335b2c2..bc7dc1b 100644 (file)
@@ -28,7 +28,7 @@ parameters:
     type: json
   NovaWorkers:
     default: 0
-    description: Number of workers for Nova API service.
+    description: Number of workers for Nova services.
     type: number
 
 conditions:
index 86aa079..aaa7ef5 100644 (file)
@@ -28,7 +28,7 @@ parameters:
     type: json
   NovaWorkers:
     default: 0
-    description: Number of workers for Nova Placement API service.
+    description: Number of workers for Nova services.
     type: number
   NovaPassword:
     description: The password for the nova service and db account, used by nova-placement.
index 9a304ed..c707efb 100644 (file)
@@ -59,10 +59,10 @@ parameters:
     type: string
   SwiftCeilometerPipelineEnabled:
     description: Set to False to disable the swift proxy ceilometer pipeline.
-    default: True
+    default: false
     type: boolean
   SwiftCeilometerIgnoreProjects:
-    default: ['services']
+    default: ['service']
     description: Comma-seperated list of project names to ignore.
     type: comma_delimited_list
   RabbitClientPort:
@@ -81,7 +81,7 @@ parameters:
 
 conditions:
 
-  ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, True]}
+  ceilometer_pipeline_enabled: {equals : [{get_param: SwiftCeilometerPipelineEnabled}, true]}
   use_tls_proxy: {equals : [{get_param: EnableInternalTLS}, true]}
 
 resources:
@@ -118,14 +118,20 @@ outputs:
             swift::proxy::authtoken::project_name: 'service'
             swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
             swift::proxy::workers: {get_param: SwiftWorkers}
-            swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
-            swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
-            swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
-            swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
-            swift::proxy::ceilometer::password: {get_param: SwiftPassword}
-            swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
-            swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
-            swift::proxy::ceilometer::nonblocking_notify: true
+          -
+            if:
+            - ceilometer_pipeline_enabled
+            -
+              swift::proxy::ceilometer::rabbit_user: {get_param: RabbitUserName}
+              swift::proxy::ceilometer::rabbit_password: {get_param: RabbitPassword}
+              swift::proxy::ceilometer::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
+              swift::proxy::ceilometer::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+              swift::proxy::ceilometer::password: {get_param: SwiftPassword}
+              swift::proxy::ceilometer::ignore_projects: {get_param: SwiftCeilometerIgnoreProjects}
+              swift::proxy::ceilometer::nonblocking_notify: true
+              swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
+            - {}
+          - swift::proxy::staticweb::url_base: {get_param: [EndpointMap, SwiftPublic, uri_no_suffix]}
             tripleo::profile::base::swift::proxy::rabbit_port: {get_param: RabbitClientPort}
             tripleo::profile::base::swift::proxy::ceilometer_messaging_use_ssl: {get_param: RabbitClientUseSSL}
             tripleo::profile::base::swift::proxy::ceilometer_enabled: {get_param: SwiftCeilometerPipelineEnabled}
@@ -168,7 +174,6 @@ outputs:
                     - ''
                   - 'proxy-logging'
                   - 'proxy-server'
-            swift::proxy::ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
             swift::proxy::account_autocreate: true
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
diff --git a/releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml b/releasenotes/notes/disable-ceilo-middleware-6853cb92e3e08161.yaml
new file mode 100644 (file)
index 0000000..28dac8b
--- /dev/null
@@ -0,0 +1,5 @@
+---
+fixes:
+  - Disable ceilometer in the swift proxy middleware pipeline out of the box.
+    This generates a lot of events with gnocchi and swift backend and causes
+    heavy load. It should be easy to enable if needed.
diff --git a/releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml b/releasenotes/notes/generated-sample-environments-8b523f55f36e940c.yaml
new file mode 100644 (file)
index 0000000..0721334
--- /dev/null
@@ -0,0 +1,21 @@
+---
+features:
+  - |
+    There is now a tool in tripleo-heat-templates, similar to the
+    oslo-config-generator, that can be used to programmatically generate
+    sample environment files based directly on the contents of the templates
+    themselves.  This ensures consistency in the sample environments, as well
+    as making it easier to update environments to reflect changes to the
+    templates.
+upgrade:
+  - |
+    Some sample environment files will be moving as part of the work to
+    generate them programmatically.  The old versions will be left in place for
+    one cycle to allow a smooth upgrade process.  When upgrading, if any of the
+    environment files in use for the deployment have been deprecated they
+    should be replaced with the new generated verions.
+deprecations:
+  - |
+    Where a generated sample environment replaces an existing one, the existing
+    environment is deprecated.  This will be noted in a comment at the top of
+    the file.
index b0a1313..e156396 100644 (file)
@@ -46,6 +46,7 @@
     - OS::TripleO::Services::CinderVolume
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Congress
+    - OS::TripleO::Services::Clustercheck
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::Etcd
index 6cf2120..4ad405a 100644 (file)
@@ -40,6 +40,7 @@
     - OS::TripleO::Services::CinderVolume
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Congress
+    - OS::TripleO::Services::Clustercheck
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::Ec2Api
     - OS::TripleO::Services::Etcd
index f96e562..d84b637 100644 (file)
@@ -47,6 +47,7 @@
     - OS::TripleO::Services::CinderHPELeftHandISCSI
     - OS::TripleO::Services::CinderScheduler
     - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::Clustercheck
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Congress
     - OS::TripleO::Services::Docker
index 8113635..6e0eea3 100644 (file)
@@ -4,7 +4,7 @@
 PyYAML>=3.10.0 # MIT
 Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
 six>=1.9.0 # MIT
-sphinx!=1.6.1,>=1.5.1 # BSD
+sphinx>=1.6.2 # BSD
 oslosphinx>=4.7.0 # Apache-2.0
 reno!=2.3.1,>=1.8.0 # Apache-2.0
 coverage!=4.4,>=4.0 # Apache-2.0
index 233ec18..674449f 100755 (executable)
@@ -200,6 +200,23 @@ def validate_docker_service(filename, tpl):
                       % (expected_config_image_parameter, config_volume))
                 return 1
 
+        if 'docker_config' in role_data:
+            docker_config = role_data['docker_config']
+            for _, step in docker_config.items():
+                for _, container in step.items():
+                    if not isinstance(container, dict):
+                        # NOTE(mandre) this skips everything that is not a dict
+                        # so we may ignore some containers definitions if they
+                        # are in a map_merge for example
+                        continue
+                    command = container.get('command', '')
+                    if isinstance(command, list):
+                        command = ' '.join(map(str, command))
+                    if 'bootstrap_host_exec' in command \
+                            and container.get('user') != 'root':
+                      print('ERROR: bootstrap_host_exec needs to run as the root user.')
+                      return 1
+
     if 'parameters' in tpl:
         for param in required_params:
             if param not in tpl['parameters']: