Merge "Support config dir for env generator input files"
authorJenkins <jenkins@review.openstack.org>
Mon, 19 Jun 2017 15:26:33 +0000 (15:26 +0000)
committerGerrit Code Review <review@openstack.org>
Mon, 19 Jun 2017 15:26:33 +0000 (15:26 +0000)
120 files changed:
ci/environments/multinode-container-upgrade.yaml [deleted file]
common/README [new file with mode: 0644]
docker/docker-puppet.py
docker/docker-steps.j2
docker/docker-toool
docker/services/aodh-api.yaml
docker/services/ceilometer-agent-ipmi.yaml [new file with mode: 0644]
docker/services/cinder-api.yaml [new file with mode: 0644]
docker/services/cinder-backup.yaml [new file with mode: 0644]
docker/services/cinder-scheduler.yaml [new file with mode: 0644]
docker/services/cinder-volume.yaml [new file with mode: 0644]
docker/services/congress-api.yaml
docker/services/containers-common.yaml
docker/services/database/mysql.yaml
docker/services/database/redis.yaml
docker/services/ec2-api.yaml [new file with mode: 0644]
docker/services/glance-api.yaml
docker/services/gnocchi-api.yaml
docker/services/heat-engine.yaml
docker/services/horizon.yaml
docker/services/ironic-api.yaml
docker/services/iscsid.yaml [new file with mode: 0644]
docker/services/keystone.yaml
docker/services/manila-scheduler.yaml [new file with mode: 0644]
docker/services/memcached.yaml
docker/services/mistral-api.yaml
docker/services/multipathd.yaml [new file with mode: 0644]
docker/services/neutron-api.yaml
docker/services/nova-api.yaml
docker/services/nova-consoleauth.yaml [new file with mode: 0644]
docker/services/nova-vnc-proxy.yaml [new file with mode: 0644]
docker/services/pacemaker/cinder-volume.yaml [new file with mode: 0644]
docker/services/pacemaker/haproxy.yaml
docker/services/panko-api.yaml
docker/services/rabbitmq.yaml
docker/services/sahara-api.yaml [new file with mode: 0644]
docker/services/sahara-engine.yaml [new file with mode: 0644]
docker/services/sensu-client.yaml [new file with mode: 0644]
docker/services/services.yaml [deleted file]
docker/services/swift-ringbuilder.yaml
docker/services/tacker.yaml
environments/cinder-dellsc-config.yaml
environments/docker-services-tls-everywhere.yaml
environments/docker.yaml
environments/enable-tls.yaml
environments/host-config-and-reboot.j2.yaml [new file with mode: 0644]
environments/host-config-pre-network.j2.yaml [deleted file]
environments/hyperconverged-ceph.yaml
environments/network-isolation.j2.yaml [new file with mode: 0644]
environments/network-isolation.yaml [deleted file]
environments/neutron-ml2-ovn-ha.yaml [new file with mode: 0644]
environments/services-docker/ec2-api.yaml [new file with mode: 0644]
environments/services-docker/manila.yaml
environments/services-docker/sahara.yaml [new file with mode: 0644]
environments/services-docker/sensu-client.yaml [new file with mode: 0644]
environments/services-docker/undercloud-ceilometer.yaml
environments/services/ironic.yaml
environments/tls-endpoints-public-dns.yaml
environments/tls-endpoints-public-ip.yaml
environments/tls-everywhere-endpoints-dns.yaml
environments/undercloud.yaml
extraconfig/pre_network/ansible_host_config.yaml [moved from extraconfig/pre_network/ansible_host_config.ansible with 90% similarity]
extraconfig/pre_network/host_config_and_reboot.role.j2.yaml
extraconfig/pre_network/host_config_and_reboot.yaml [new file with mode: 0644]
extraconfig/tasks/pacemaker_common_functions.sh
extraconfig/tasks/post_puppet_pacemaker.j2.yaml
extraconfig/tasks/yum_update.sh
network/endpoints/endpoint_data.yaml
network/endpoints/endpoint_map.yaml
overcloud-resource-registry-puppet.j2.yaml
overcloud.j2.yaml
puppet/all-nodes-config.yaml
puppet/blockstorage-role.yaml
puppet/cephstorage-role.yaml
puppet/compute-role.yaml
puppet/controller-role.yaml
puppet/objectstorage-role.yaml
puppet/puppet-steps.j2
puppet/role.role.j2.yaml
puppet/services/cinder-backend-dellsc.yaml
puppet/services/cinder-backend-netapp.yaml
puppet/services/cinder-volume.yaml
puppet/services/gnocchi-base.yaml
puppet/services/horizon.yaml
puppet/services/ironic-conductor.yaml
puppet/services/ironic-inspector.yaml [new file with mode: 0644]
puppet/services/keystone.yaml
puppet/services/neutron-sriov-agent.yaml
puppet/services/nova-api.yaml
puppet/services/ovn-dbs.yaml
puppet/services/pacemaker/ovn-dbs.yaml [new file with mode: 0644]
puppet/services/panko-api.yaml
releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml [new file with mode: 0644]
releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml [new file with mode: 0644]
releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml [new file with mode: 0644]
releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml [new file with mode: 0644]
releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml [new file with mode: 0644]
releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml [new file with mode: 0644]
releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml [new file with mode: 0644]
releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml [new file with mode: 0644]
releasenotes/notes/ovn-ha-c0139ac519680872.yaml [new file with mode: 0644]
releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml [new file with mode: 0644]
roles/BlockStorage.yaml
roles/CephStorage.yaml
roles/Compute.yaml
roles/Controller.yaml
roles/ControllerOpenstack.yaml
roles/Database.yaml
roles/IronicConductor.yaml [new file with mode: 0644]
roles/Messaging.yaml
roles/Networker.yaml
roles/ObjectStorage.yaml
roles/README.rst
roles/Telemetry.yaml
roles/Undercloud.yaml
roles_data.yaml
roles_data_undercloud.yaml
services.yaml [moved from puppet/services/services.yaml with 88% similarity]
test-requirements.txt
tools/yaml-validate.py

diff --git a/ci/environments/multinode-container-upgrade.yaml b/ci/environments/multinode-container-upgrade.yaml
deleted file mode 100644 (file)
index 24bb1f4..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-# NOTE: This is an environment specific for containers CI. Mainly we
-# deploy non-pacemakerized overcloud. Once we are able to deploy and
-# upgrade pacemakerized and containerized overcloud, we should remove
-# this file and use normal CI multinode environments/scenarios.
-
-resource_registry:
-  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
-  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
-
-  # NOTE: This is needed because of upgrades from Ocata to Pike. We
-  # deploy the initial environment with Ocata templates, and
-  # overcloud-resource-registry.yaml there doesn't have this Docker
-  # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
-  # remove this.
-  OS::TripleO::Services::Docker: OS::Heat::None
-
-parameter_defaults:
-  ControllerServices:
-    - OS::TripleO::Services::CephMon
-    - OS::TripleO::Services::CephOSD
-    - OS::TripleO::Services::CinderApi
-    - OS::TripleO::Services::CinderScheduler
-    - OS::TripleO::Services::CinderVolume
-    - OS::TripleO::Services::Docker
-    - OS::TripleO::Services::Kernel
-    - OS::TripleO::Services::Keystone
-    - OS::TripleO::Services::GlanceApi
-    - OS::TripleO::Services::HeatApi
-    - OS::TripleO::Services::HeatApiCfn
-    - OS::TripleO::Services::HeatApiCloudwatch
-    - OS::TripleO::Services::HeatEngine
-    - OS::TripleO::Services::MySQL
-    - OS::TripleO::Services::MySQLClient
-    - OS::TripleO::Services::NeutronDhcpAgent
-    - OS::TripleO::Services::NeutronL3Agent
-    - OS::TripleO::Services::NeutronMetadataAgent
-    - OS::TripleO::Services::NeutronServer
-    - OS::TripleO::Services::NeutronCorePlugin
-    - OS::TripleO::Services::NeutronOvsAgent
-    - OS::TripleO::Services::RabbitMQ
-    - OS::TripleO::Services::HAproxy
-    - OS::TripleO::Services::Keepalived
-    - OS::TripleO::Services::Memcached
-    - OS::TripleO::Services::Pacemaker
-    - OS::TripleO::Services::NovaConductor
-    - OS::TripleO::Services::NovaApi
-    - OS::TripleO::Services::NovaPlacement
-    - OS::TripleO::Services::NovaMetadata
-    - OS::TripleO::Services::NovaScheduler
-    - OS::TripleO::Services::Ntp
-    - OS::TripleO::Services::SwiftProxy
-    - OS::TripleO::Services::SwiftStorage
-    - OS::TripleO::Services::SwiftRingBuilder
-    - OS::TripleO::Services::Snmp
-    - OS::TripleO::Services::Timezone
-    - OS::TripleO::Services::TripleoPackages
-    - OS::TripleO::Services::NovaCompute
-    - OS::TripleO::Services::NovaLibvirt
-    - OS::TripleO::Services::Sshd
-  ControllerExtraConfig:
-    nova::compute::libvirt::services::libvirt_virt_type: qemu
-    nova::compute::libvirt::libvirt_virt_type: qemu
-    # Required for Centos 7.3 and Qemu 2.6.0
-    nova::compute::libvirt::libvirt_cpu_mode: 'none'
-    #NOTE(gfidente): not great but we need this to deploy on ext4
-    #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
-    ceph::profile::params::osd_max_object_name_len: 256
-    ceph::profile::params::osd_max_object_namespace_len: 64
-  SwiftCeilometerPipelineEnabled: False
-  Debug: True
diff --git a/common/README b/common/README
new file mode 100644 (file)
index 0000000..6a52311
--- /dev/null
@@ -0,0 +1 @@
+This will contain some common templates but it needs to be added to the RPM spec first
index 340a9e9..1321167 100755 (executable)
@@ -190,37 +190,30 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
         if [ -n "$PUPPET_TAGS" ]; then
             TAGS="--tags \"$PUPPET_TAGS\""
         fi
+
+        # workaround LP1696283
+        mkdir -p /etc/ssh
+        touch /etc/ssh/ssh_known_hosts
+
         FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
 
         # Disables archiving
         if [ -z "$NO_ARCHIVE" ]; then
-            rm -Rf /var/lib/config-data/${NAME}
-
-            # copying etc should be enough for most services
-            mkdir -p /var/lib/config-data/${NAME}/etc
-            cp -a /etc/* /var/lib/config-data/${NAME}/etc/
-
-            # workaround LP1696283
-            mkdir -p /var/lib/config-data/${NAME}/etc/ssh
-            touch /var/lib/config-data/${NAME}/etc/ssh/ssh_known_hosts
-
-            if [ -d /root/ ]; then
-              cp -a /root/ /var/lib/config-data/${NAME}/root/
-            fi
-            if [ -d /var/lib/ironic/tftpboot/ ]; then
-              mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
-              cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/tftpboot/
-            fi
-            if [ -d /var/lib/ironic/httpboot/ ]; then
-              mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
-              cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/httpboot/
-            fi
-
-            # apache services may files placed in /var/www/
-            if [ -d /var/www/ ]; then
-             mkdir -p /var/lib/config-data/${NAME}/var/www
-             cp -a /var/www/* /var/lib/config-data/${NAME}/var/www/
-            fi
+            archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
+            rsync_srcs=""
+            for d in "${archivedirs[@]}"; do
+                if [ -d "$d" ]; then
+                    rsync_srcs+=" $d"
+                fi
+            done
+            rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
+
+            # Also make a copy of files modified during puppet run
+            # This is useful for debugging
+            mkdir -p /var/lib/config-data/puppet-generated/${NAME}
+            rsync -a -R -0 --delay-updates --delete-after \
+                          --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \
+                          / /var/lib/config-data/puppet-generated/${NAME}
 
             # Write a checksum of the config-data dir, this is used as a
             # salt to trigger container restart when the config changes
index cf798e8..3dd963b 100644 (file)
@@ -216,26 +216,31 @@ resources:
   {% endfor %}
   # END CONFIG STEPS
 
-  {{role.name}}PostConfig:
-    type: OS::TripleO::Tasks::{{role.name}}PostConfig
+  # Note, this should be the last step to execute configuration changes.
+  # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+  # after all the previous deployment steps.
+  {{role.name}}ExtraConfigPost:
     depends_on:
   {% for dep in roles %}
       - {{dep.name}}Deployment_Step5
   {% endfor %}
+    type: OS::TripleO::NodeExtraConfigPost
     properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
+        servers: {get_param: [servers, {{role.name}}]}
 
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  {{role.name}}ExtraConfigPost:
+  # The {{role.name}}PostConfig steps are in charge of
+  # quiescing all services, i.e. in the Controller case,
+  # we should run a full service reload.
+  {{role.name}}PostConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PostConfig
     depends_on:
   {% for dep in roles %}
-      - {{dep.name}}PostConfig
+      - {{dep.name}}ExtraConfigPost
   {% endfor %}
-    type: OS::TripleO::NodeExtraConfigPost
     properties:
-        servers: {get_param: [servers, {{role.name}}]}
+      servers:  {get_param: servers}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
 
 {% endfor %}
index 36aba4a..0b87ea9 100755 (executable)
@@ -75,6 +75,9 @@ def parse_opts(argv):
 
 def docker_arg_map(key, value):
     value = str(value).encode('ascii', 'ignore')
+    if len(value) == 0:
+        return ''
+
     return {
         'environment': "--env=%s" % value,
         # 'image': value,
index 4b93ddd..bda5469 100644 (file)
@@ -86,16 +86,15 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           aodh_init_log:
-            start_order: 0
             image: *aodh_image
             user: root
             volumes:
               - /var/log/containers/aodh:/var/log/aodh
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R aodh:aodh /var/log/aodh']
+        step_3:
           aodh_db_sync:
-            start_order: 1
             image: *aodh_image
             net: host
             privileged: false
diff --git a/docker/services/ceilometer-agent-ipmi.yaml b/docker/services/ceilometer-agent-ipmi.yaml
new file mode 100644 (file)
index 0000000..02793e4
--- /dev/null
@@ -0,0 +1,113 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Ceilometer Agent Ipmi service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCeilometerIpmiImage:
+    description: image
+    default: 'centos-binary-ceilometer-ipmi:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+   type: ./containers-common.yaml
+
+  CeilometerAgentIpmiBase:
+    type: ../../puppet/services/ceilometer-agent-ipmi.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Ceilometer Agent Ipmi role.
+    value:
+      service_name: {get_attr: [CeilometerAgentIpmiBase, role_data, service_name]}
+      config_settings: {get_attr: [CeilometerAgentIpmiBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CeilometerAgentIpmiBase, role_data, step_config]
+      service_config_settings: {get_attr: [CeilometerAgentIpmiBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: ceilometer
+        puppet_tags: ceilometer_config
+        step_config: *step_config
+        config_image: &ceilometer_agent_ipmi_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCeilometerIpmiImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:
+          command: /usr/bin/ceilometer-polling --polling-namespaces ipmi
+      docker_config:
+        step_3:
+          ceilometer_init_log:
+            start_order: 0
+            image: *ceilometer_agent_ipmi_image
+            user: root
+            command: ['/bin/bash', '-c', 'chown -R ceilometer:ceilometer /var/log/ceilometer']
+            volumes:
+              - /var/log/containers/ceilometer:/var/log/ceilometer
+        step_4:
+          ceilometer_agent_ipmi:
+            image: *ceilometer_agent_ipmi_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ceilometer-agent-ipmi.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+        step_5:
+          ceilometer_gnocchi_upgrade:
+            start_order: 1
+            image: *ceilometer_agent_ipmi_image
+            net: host
+            detach: false
+            privileged: false
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/ceilometer/etc/ceilometer/:/etc/ceilometer/:ro
+                  - /var/log/containers/ceilometer:/var/log/ceilometer
+            command: "/usr/bin/bootstrap_host_exec ceilometer su ceilometer -s /bin/bash -c '/usr/bin/ceilometer-upgrade --skip-metering-database'"
+      upgrade_tasks:
+        - name: Stop and disable ceilometer agent ipmi service
+          tags: step2
+          service: name=openstack-ceilometer-agent-ipmi state=stopped enabled=no
diff --git a/docker/services/cinder-api.yaml b/docker/services/cinder-api.yaml
new file mode 100644 (file)
index 0000000..94bd66d
--- /dev/null
@@ -0,0 +1,156 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder API service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderApiImage:
+    description: image
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  # we configure all cinder services in the same cinder base container
+  DockerCinderConfigImage:
+    description: image
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EnableInternalTLS:
+    type: boolean
+    default: false
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CinderBase:
+    type: ../../puppet/services/cinder-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder API role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CinderBase, role_data, step_config]
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_api.json:
+          command: /usr/sbin/httpd -DFOREGROUND
+          permissions:
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_2:
+          cinder_api_init_logs:
+            image: &cinder_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderApiImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_3:
+          cinder_api_db_sync:
+            image: *cinder_api_image
+            net: host
+            privileged: false
+            detach: false
+            user: root
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/log/containers/cinder:/var/log/cinder
+            command:
+              - '/usr/bin/bootstrap_host_exec'
+              - 'cinder_api'
+              - "su cinder -s /bin/bash -c 'cinder-manage db sync'"
+        step_4:
+          cinder_api:
+            image: *cinder_api_image
+            net: host
+            privileged: false
+            restart: always
+            # NOTE(mandre) kolla image changes the user to 'cinder', we need it
+            # to be root to run httpd
+            user: root
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/cinder_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/lib/config-data/cinder/etc/httpd/:/etc/httpd/:ro
+                  - /var/lib/config-data/cinder/var/www/:/var/www/:ro
+                  - /var/log/containers/cinder:/var/log/cinder
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/certs/httpd:/etc/pki/tls/certs/httpd:ro
+                      - ''
+                  -
+                    if:
+                      - internal_tls_enabled
+                      - /etc/pki/tls/private/httpd:/etc/pki/tls/private/httpd:ro
+                      - ''
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/cinder
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable cinder_api service
+          tags: step2
+          service: name=httpd state=stopped enabled=no
diff --git a/docker/services/cinder-backup.yaml b/docker/services/cinder-backup.yaml
new file mode 100644 (file)
index 0000000..0958a7e
--- /dev/null
@@ -0,0 +1,132 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Backup service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderBackupImage:
+    description: image
+    default: 'centos-binary-cinder-backup:latest'
+    type: string
+  # we configure all cinder services in the same cinder base container
+  DockerCinderConfigImage:
+    description: image
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CinderBase:
+    type: ../../puppet/services/cinder-backup.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Backup role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CinderBase, role_data, step_config]
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_backup.json:
+          command: /usr/bin/cinder-backup --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/lib/cinder
+              owner: cinder:cinder
+              recurse: true
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_3:
+          cinder_backup_init_logs:
+            start_order: 0
+            image: &cinder_backup_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderBackupImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_4:
+          cinder_backup:
+            image: *cinder_backup_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/cinder_backup.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/lib/config-data/ceph/etc/ceph/:/etc/ceph/:ro #FIXME: we need to generate a ceph.conf with puppet for this
+                  - /dev/:/dev/
+                  - /run/:/run/
+                  - /sys:/sys
+                  - /lib/modules:/lib/modules:ro
+                  - /etc/iscsi:/etc/iscsi
+                  - /var/lib/cinder:/var/lib/cinder
+                  - /var/log/containers/cinder:/var/log/cinder
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/lib/cinder
+            - /var/log/containers/cinder
+      upgrade_tasks:
+        - name: Stop and disable cinder_backup service
+          tags: step2
+          service: name=openstack-cinder-backup state=stopped enabled=no
diff --git a/docker/services/cinder-scheduler.yaml b/docker/services/cinder-scheduler.yaml
new file mode 100644 (file)
index 0000000..8199c34
--- /dev/null
@@ -0,0 +1,120 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Scheduler service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderSchedulerImage:
+    description: image
+    default: 'centos-binary-cinder-scheduler:latest'
+    type: string
+  # we configure all cinder services in the same cinder base container
+  DockerCinderConfigImage:
+    description: image
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CinderBase:
+    type: ../../puppet/services/cinder-scheduler.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Scheduler role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CinderBase, role_data, step_config]
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_scheduler.json:
+          command: /usr/bin/cinder-scheduler --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_2:
+          cinder_scheduler_init_logs:
+            image: &cinder_scheduler_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderSchedulerImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_4:
+          cinder_scheduler:
+            image: *cinder_scheduler_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/cinder_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/log/containers/cinder:/var/log/cinder
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/cinder
+      upgrade_tasks:
+        - name: Stop and disable cinder_scheduler service
+          tags: step2
+          service: name=openstack-cinder-scheduler state=stopped enabled=no
diff --git a/docker/services/cinder-volume.yaml b/docker/services/cinder-volume.yaml
new file mode 100644 (file)
index 0000000..26eb10e
--- /dev/null
@@ -0,0 +1,167 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Volume service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderVolumeImage:
+    description: image
+    default: 'centos-binary-cinder-volume:latest'
+    type: string
+  # we configure all cinder services in the same cinder base container
+  DockerCinderConfigImage:
+    description: image
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  # custom parameters for the Cinder volume role
+  CinderEnableIscsiBackend:
+    default: true
+    description: Whether to enable or not the Iscsi backend for Cinder
+    type: boolean
+  CinderLVMLoopDeviceSize:
+    default: 10280
+    description: The size of the loopback file used by the cinder LVM driver.
+    type: number
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  CinderBase:
+    type: ../../puppet/services/cinder-volume.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Volume role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings: {get_attr: [CinderBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [CinderBase, role_data, step_config]
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_volume.json:
+          command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_3:
+          cinder_volume_init_logs:
+            start_order: 0
+            image: &cinder_volume_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderVolumeImage} ]
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_4:
+          cinder_volume:
+            image: *cinder_volume_image
+            net: host
+            privileged: true
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/cinder_volume.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/cinder/etc/cinder/:/etc/cinder/:ro
+                  - /var/lib/config-data/ceph/etc/ceph/:/etc/ceph/:ro #FIXME: we need to generate a ceph.conf with puppet for this
+                  - /dev/:/dev/
+                  - /run/:/run/
+                  - /sys:/sys
+                  - /etc/iscsi:/etc/iscsi
+                  - /var/lib/cinder:/var/lib/cinder
+                  - /var/log/containers/cinder:/var/log/cinder
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/cinder
+            - /var/lib/cinder
+        - name: cinder_enable_iscsi_backend fact
+          set_fact:
+            cinder_enable_iscsi_backend: {get_param: CinderEnableIscsiBackend}
+        - name: cinder create LVM volume group dd
+          command:
+            list_join:
+            - ''
+            - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
+              - str_replace:
+                  template: VALUE
+                  params:
+                    VALUE: {get_param: CinderLVMLoopDeviceSize}
+              - 'M'
+          args:
+            creates: /var/lib/cinder/cinder-volumes
+          when: cinder_enable_iscsi_backend
+        - name: cinder create LVM volume group
+          shell: |
+            if ! losetup /dev/loop2; then
+              losetup /dev/loop2 /var/lib/cinder/cinder-volumes
+            fi
+            if ! pvdisplay | grep cinder-volumes; then
+              pvcreate /dev/loop2
+            fi
+            if ! vgdisplay | grep cinder-volumes; then
+              vgcreate cinder-volumes /dev/loop2
+            fi
+          args:
+            executable: /bin/bash
+            creates: /dev/loop2
+          when: cinder_enable_iscsi_backend
+      upgrade_tasks:
+        - name: Stop and disable cinder_volume service
+          tags: step2
+          service: name=openstack-cinder-volume state=stopped enabled=no
index 3ee1d91..92b0eeb 100644 (file)
@@ -82,9 +82,8 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           congress_init_logs:
-            start_order: 0
             image: &congress_image
               list_join:
                 - '/'
@@ -94,8 +93,8 @@ outputs:
             volumes:
               - /var/log/containers/congress:/var/log/congress
             command: ['/bin/bash', '-c', 'chown -R congress:congress /var/log/congress']
+        step_3:
           congress_db_sync:
-            start_order: 1
             image: *congress_image
             net: host
             privileged: false
index 973d999..d104853 100644 (file)
@@ -3,19 +3,64 @@ heat_template_version: pike
 description: >
   Contains a static list of common things necessary for containers
 
+parameters:
+
+  # Required parameters
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+
+  EnableInternalTLS:
+    type: boolean
+    default: false
+  InternalTLSCAFile:
+    default: '/etc/ipa/ca.crt'
+    type: string
+    description: Specifies the default CA cert to use if TLS is used for
+                 services in the internal network.
+
+conditions:
+
+  internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+
 outputs:
   volumes:
     description: Common volumes for the containers.
     value:
-      - /etc/hosts:/etc/hosts:ro
-      - /etc/localtime:/etc/localtime:ro
-      # required for bootstrap_host_exec
-      - /etc/puppet:/etc/puppet:ro
-      # OpenSSL trusted CAs
-      - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
-      - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
-      - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
-      - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
-      # Syslog socket
-      - /dev/log:/dev/log
-      - /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro
+      list_concat:
+        - - /etc/hosts:/etc/hosts:ro
+          - /etc/localtime:/etc/localtime:ro
+          # required for bootstrap_host_exec
+          - /etc/puppet:/etc/puppet:ro
+          # OpenSSL trusted CAs
+          - /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro
+          - /etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro
+          - /etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro
+          - /etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro
+          # Syslog socket
+          - /dev/log:/dev/log
+          - /etc/ssh/ssh_known_hosts:/etc/ssh/ssh_known_hosts:ro
+        - if:
+          - internal_tls_enabled
+          - - {get_param: InternalTLSCAFile}
+          - null
index c73db85..9eabb71 100644 (file)
@@ -87,17 +87,16 @@ outputs:
               recurse: true
       docker_config:
         # Kolla_bootstrap runs before permissions set by kolla_config
-        step_2:
+        step_1:
           mysql_init_logs:
-            start_order: 0
             image: *mysql_image
             privileged: false
             user: root
             volumes:
               - /var/log/containers/mysql:/var/log/mariadb
             command: ['/bin/bash', '-c', 'chown -R mysql:mysql /var/log/mariadb']
+        step_2:
           mysql_bootstrap:
-            start_order: 1
             detach: false
             image: *mysql_image
             net: host
index 9e84dd5..9d0d30c 100644 (file)
@@ -79,6 +79,7 @@ outputs:
         step_1:
           redis_init_logs:
             start_order: 0
+            detach: false
             image: *redis_image
             privileged: false
             user: root
@@ -86,6 +87,7 @@ outputs:
               - /var/log/containers/redis:/var/log/redis
             command: ['/bin/bash', '-c', 'chown -R redis:redis /var/log/redis']
           redis:
+            start_order: 1
             image: *redis_image
             net: host
             privileged: false
diff --git a/docker/services/ec2-api.yaml b/docker/services/ec2-api.yaml
new file mode 100644 (file)
index 0000000..bc3654b
--- /dev/null
@@ -0,0 +1,153 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized EC2 API service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerEc2ApiImage:
+    description: image
+    default: 'centos-binary-ec2-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  Ec2ApiPuppetBase:
+      type: ../../puppet/services/ec2-api.yaml
+      properties:
+        EndpointMap: {get_param: EndpointMap}
+        ServiceNetMap: {get_param: ServiceNetMap}
+        DefaultPasswords: {get_param: DefaultPasswords}
+        RoleName: {get_param: RoleName}
+        RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the EC2 API role.
+    value:
+      service_name: {get_attr: [Ec2ApiPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [Ec2ApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [Ec2ApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: ec2api
+        puppet_tags: ec2api_api_paste_ini,ec2api_config
+        step_config: *step_config
+        config_image: &ec2_api_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerEc2ApiImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/ec2_api.json:
+          command: /usr/bin/ec2-api
+          permissions:
+            - path: /var/log/ec2api
+              owner: ec2api:ec2api
+              recurse: true
+        /var/lib/kolla/config_files/ec2_api_metadata.json:
+          command: /usr/bin/ec2-api-metadata
+          permissions:
+            - path: /var/log/ec2api # default log dir for metadata service as well
+              owner: ec2api:ec2api
+              recurse: true
+      docker_config:
+        # db sync runs before permissions set by kolla_config
+        step_2:
+          ec2_api_init_logs:
+            image: *ec2_api_image
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/ec2_api:/var/log/ec2api
+              # mount ec2_api_metadata to "ec2api-metadata" only here to fix
+              # permissions of both directories in one go
+              - /var/log/containers/ec2_api_metadata:/var/log/ec2api-metadata
+            command: ['/bin/bash', '-c', 'chown -R ec2api:ec2api /var/log/ec2api /var/log/ec2api-metadata']
+        step_3:
+          ec2_api_db_sync:
+            image: *ec2_api_image
+            net: host
+            detach: false
+            privileged: false
+            user: root
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+                  - /var/log/containers/ec2_api:/var/log/ec2api
+            command: "/usr/bin/bootstrap_host_exec ec2_api su ec2api -s /bin/bash -c '/usr/bin/ec2-api-manage db_sync'"
+        step_4:
+          ec2_api:
+            image: *ec2_api_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ec2_api.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+                  - /var/log/containers/ec2_api:/var/log/ec2api
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+          ec2_api_metadata:
+            image: *ec2_api_image
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/ec2_api_metadata.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/ec2_api/etc/ec2api/:/etc/ec2api/:ro
+                  - /var/log/containers/ec2_api_metadata:/var/log/ec2api
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent log directories
+          file:
+            path: /var/log/containers/{{ item }}
+            state: directory
+          with_items:
+            - ec2_api
+            - ec2_api_metadata
+      upgrade_tasks:
+        - name: Stop and disable EC2-API services
+          tags: step2
+          service: name={{ item }} state=stopped enabled=no
+          with_items:
+            - openstack-ec2-api
+            - openstack-ec2-api-metadata
index c3af523..5c24401 100644 (file)
@@ -85,17 +85,16 @@ outputs:
           command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
         # Kolla_bootstrap/db_sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           glance_init_logs:
-            start_order: 0
             image: *glance_image
             privileged: false
             user: root
             volumes:
               - /var/log/containers/glance:/var/log/glance
             command: ['/bin/bash', '-c', 'chown -R glance:glance /var/log/glance']
+        step_3:
           glance_api_db_sync:
-            start_order: 1
             image: *glance_image
             net: host
             privileged: false
index e3b72bc..bd1c316 100644 (file)
@@ -86,16 +86,15 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           gnocchi_init_log:
-            start_order: 0
             image: *gnocchi_image
             user: root
             volumes:
               - /var/log/containers/gnocchi:/var/log/gnocchi
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R gnocchi:gnocchi /var/log/gnocchi']
+        step_3:
           gnocchi_db_sync:
-            start_order: 1
             image: *gnocchi_image
             net: host
             detach: false
index 0adad53..7a3312d 100644 (file)
@@ -80,16 +80,15 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           heat_init_log:
-            start_order: 0
             image: *heat_engine_image
             user: root
             volumes:
               - /var/log/containers/heat:/var/log/heat
             command: ['/bin/bash', '-c', 'chown -R heat:heat /var/log/heat']
+        step_3:
           heat_engine_db_sync:
-            start_order: 1
             image: *heat_engine_image
             net: host
             privileged: false
index 022eb5d..13bd091 100644 (file)
@@ -86,7 +86,7 @@ outputs:
               owner: apache:apache
               recurse: false
       docker_config:
-        step_3:
+        step_2:
           horizon_fix_perms:
             image: *horizon_image
             user: root
@@ -99,8 +99,8 @@ outputs:
             volumes:
               - /var/log/containers/horizon:/var/log/horizon
               - /var/lib/config-data/horizon/etc/:/etc/
+        step_3:
           horizon:
-            start_order: 1
             image: *horizon_image
             net: host
             privileged: false
index 650ce4c..a32176a 100644 (file)
@@ -82,9 +82,8 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           ironic_init_logs:
-            start_order: 0
             image: &ironic_image
               list_join:
                 - '/'
@@ -94,6 +93,7 @@ outputs:
             volumes:
               - /var/log/containers/ironic:/var/log/ironic
             command: ['/bin/bash', '-c', 'chown -R ironic:ironic /var/log/ironic']
+        step_3:
           ironic_db_sync:
             start_order: 1
             image: *ironic_image
diff --git a/docker/services/iscsid.yaml b/docker/services/iscsid.yaml
new file mode 100644 (file)
index 0000000..53f5aff
--- /dev/null
@@ -0,0 +1,109 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Iscsid service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerIscsidImage:
+    description: image
+    default: 'centos-binary-iscsid:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+outputs:
+  role_data:
+    description: Role data for the Iscsid API role.
+    value:
+      service_name: iscsid
+      config_settings: {}
+      step_config: ''
+      service_config_settings: {}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: iscsid
+        #puppet_tags: file
+        step_config: ''
+        config_image: &iscsid_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerIscsidImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/iscsid.json:
+          command: /usr/sbin/iscsid -f
+      docker_config:
+        step_3:
+          iscsid:
+            start_order: 2
+            image: *iscsid_image
+            net: host
+            privileged: true
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/iscsid.json:/var/lib/kolla/config_files/config.json:ro
+                  - /dev/:/dev/
+                  - /run/:/run/
+                  - /sys:/sys
+                  - /lib/modules:/lib/modules:ro
+                  - /etc/iscsi:/etc/iscsi
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /etc/iscsi
+          file:
+            path: /etc/iscsi
+            state: directory
+        - name: stat /lib/systemd/system/iscsid.socket
+          stat: path=/lib/systemd/system/iscsid.socket
+          register: stat_iscsid_socket
+        - name: Stop and disable iscsid.socket service
+          service: name=iscsid.socket state=stopped enabled=no
+          when: stat_iscsid_socket.stat.exists
+      upgrade_tasks:
+        - name: stat /lib/systemd/system/iscsid.service
+          stat: path=/lib/systemd/system/iscsid.service
+          register: stat_iscsid_service
+        - name: Stop and disable iscsid service
+          tags: step2
+          service: name=iscsid state=stopped enabled=no
+          when: stat_iscsid_service.stat.exists
+        - name: stat /lib/systemd/system/iscsid.socket
+          stat: path=/lib/systemd/system/iscsid.socket
+          register: stat_iscsid_socket
+        - name: Stop and disable iscsid.socket service
+          tags: step2
+          service: name=iscsid.socket state=stopped enabled=no
+          when: stat_iscsid_socket.stat.exists
+      metadata_settings: {}
index 5b253b4..4cd44f2 100644 (file)
@@ -95,16 +95,15 @@ outputs:
           command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
         # Kolla_bootstrap/db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           keystone_init_log:
-            start_order: 0
             image: *keystone_image
             user: root
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R keystone:keystone /var/log/keystone']
             volumes:
               - /var/log/containers/keystone:/var/log/keystone
+        step_3:
           keystone_db_sync:
-            start_order: 1
             image: *keystone_image
             net: host
             privileged: false
diff --git a/docker/services/manila-scheduler.yaml b/docker/services/manila-scheduler.yaml
new file mode 100644 (file)
index 0000000..fbc80fc
--- /dev/null
@@ -0,0 +1,105 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Manila Scheduler service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerManilaSchedulerImage:
+    description: image
+    default: 'centos-binary-manila-scheduler:latest'
+    type: string
+  DockerManilaConfigImage:
+    description: image
+    default: 'centos-binary-manila-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  ManilaSchedulerPuppetBase:
+    type: ../../puppet/services/manila-scheduler.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Manila Scheduler role.
+    value:
+      service_name: {get_attr: [ManilaSchedulerPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [ManilaSchedulerPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        {get_attr: [ManilaSchedulerPuppetBase, role_data, step_config]}
+      service_config_settings: {get_attr: [ManilaSchedulerPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: manila
+        puppet_tags: manila_config,manila_scheduler_paste_ini
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerManilaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/manila_scheduler.json:
+          command: /usr/bin/manila-scheduler --config-file /usr/share/manila/manila-dist.conf --config-file /etc/manila/manila.conf
+          permissions:
+            - path: /var/log/manila
+              owner: manila:manila
+              recurse: true
+      docker_config:
+        step_4:
+          manila_scheduler:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerManilaSchedulerImage} ]
+            net: host
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/manila_scheduler.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+                  - /var/log/containers/manila:/var/log/manila
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: Create persistent manila logs directory
+          file:
+            path: /var/log/containers/manila
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable manila_scheduler service
+          tags: step2
+          service: name=openstack-manila-scheduler state=stopped enabled=no
index d85a087..d453964 100644 (file)
@@ -72,6 +72,7 @@ outputs:
         step_1:
           memcached_init_logs:
             start_order: 0
+            detach: false
             image: *memcached_image
             privileged: false
             user: root
index cc7e613..30c3cde 100644 (file)
@@ -82,9 +82,8 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           mistral_init_logs:
-            start_order: 0
             image: &mistral_image
               list_join:
                 - '/'
@@ -94,8 +93,9 @@ outputs:
             volumes:
               - /var/log/containers/mistral:/var/log/mistral
             command: ['/bin/bash', '-c', 'chown -R mistral:mistral /var/log/mistral']
+        step_3:
           mistral_db_sync:
-            start_order: 1
+            start_order: 0
             image: *mistral_image
             net: host
             privileged: false
@@ -109,7 +109,7 @@ outputs:
                   - /var/log/containers/mistral:/var/log/mistral
             command: "/usr/bin/bootstrap_host_exec mistral_api su mistral -s /bin/bash -c 'mistral-db-manage --config-file /etc/mistral/mistral.conf upgrade head'"
           mistral_db_populate:
-            start_order: 2
+            start_order: 1
             image: *mistral_image
             net: host
             privileged: false
diff --git a/docker/services/multipathd.yaml b/docker/services/multipathd.yaml
new file mode 100644 (file)
index 0000000..d8927d4
--- /dev/null
@@ -0,0 +1,89 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Multipathd service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerMultipathdImage:
+    description: image
+    default: 'centos-binary-multipathd:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+outputs:
+  role_data:
+    description: Role data for the Multipathd API role.
+    value:
+      service_name: multipathd
+      config_settings: {}
+      step_config: ''
+      service_config_settings: {}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: multipathd
+        #puppet_tags: file
+        step_config: ''
+        config_image: &multipathd_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerMultipathdImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/multipathd.json:
+          command: /usr/sbin/multipathd -d
+      docker_config:
+        step_3:
+          multipathd:
+            start_order: 1
+            image: *multipathd_image
+            net: host
+            privileged: true
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/multipathd.json:/var/lib/kolla/config_files/config.json:ro
+                  - /dev/:/dev/
+                  - /run/:/run/
+                  - /sys:/sys
+                  - /lib/modules:/lib/modules:ro
+                  - /etc/iscsi:/etc/iscsi
+                  - /var/lib/cinder:/var/lib/cinder
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+      upgrade_tasks:
+        - name: Stop and disable multipathd service
+          tags: step2
+          service: name=multipathd state=stopped enabled=no
+      metadata_settings: {}
index fbdf75a..6c2d4ca 100644 (file)
@@ -92,9 +92,8 @@ outputs:
           command: /usr/sbin/httpd -DFOREGROUND
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           neutron_init_logs:
-            start_order: 0
             image: &neutron_api_image
               list_join:
                 - '/'
@@ -104,8 +103,8 @@ outputs:
             volumes:
               - /var/log/containers/neutron:/var/log/neutron
             command: ['/bin/bash', '-c', 'chown -R neutron:neutron /var/log/neutron']
+        step_3:
           neutron_db_sync:
-            start_order: 1
             image: *neutron_api_image
             net: host
             privileged: false
index 2375dad..c97f45d 100644 (file)
@@ -86,9 +86,8 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           nova_init_logs:
-            start_order: 0
             image: &nova_api_image
               list_join:
                 - '/'
@@ -98,8 +97,9 @@ outputs:
             volumes:
               - /var/log/containers/nova:/var/log/nova
             command: ['/bin/bash', '-c', 'chown -R nova:nova /var/log/nova']
+        step_3:
           nova_api_db_sync:
-            start_order: 1
+            start_order: 0
             image: *nova_api_image
             net: host
             detach: false
@@ -116,7 +116,7 @@ outputs:
           # to be capable of upgrading a baremetal setup. This is to ensure the name
           # of the cell is 'default'
           nova_api_map_cell0:
-            start_order: 2
+            start_order: 1
             image: *nova_api_image
             net: host
             detach: false
@@ -124,7 +124,7 @@ outputs:
             volumes: *nova_api_volumes
             command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 map_cell0'"
           nova_api_create_default_cell:
-            start_order: 3
+            start_order: 2
             image: *nova_api_image
             net: host
             detach: false
@@ -136,7 +136,7 @@ outputs:
             user: root
             command: "/usr/bin/bootstrap_host_exec nova_api su nova -s /bin/bash -c '/usr/bin/nova-manage cell_v2 create_cell --name=default'"
           nova_db_sync:
-            start_order: 4
+            start_order: 3
             image: *nova_api_image
             net: host
             detach: false
diff --git a/docker/services/nova-consoleauth.yaml b/docker/services/nova-consoleauth.yaml
new file mode 100644 (file)
index 0000000..19f25d8
--- /dev/null
@@ -0,0 +1,108 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Nova Consoleauth service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerNovaConsoleauthImage:
+    description: image
+    default: 'centos-binary-nova-consoleauth:latest'
+    type: string
+  DockerNovaConfigImage:
+    description: image
+    default: 'centos-binary-nova-base:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  NovaConsoleauthPuppetBase:
+    type: ../../puppet/services/nova-consoleauth.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Nova Consoleauth service.
+    value:
+      service_name: {get_attr: [NovaConsoleauthPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [NovaConsoleauthPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [NovaConsoleauthPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [NovaConsoleauthPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: nova
+        puppet_tags: nova_config
+        step_config: *step_config
+        config_image:
+          list_join:
+          - '/'
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/nova_consoleauth.json:
+          command: /usr/bin/nova-consoleauth
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
+      docker_config:
+        step_4:
+          nova_consoleauth:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNovaConsoleauthImage} ]
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_consoleauth.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+                  - /var/log/containers/nova:/var/log/nova
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/nova
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable nova_consoleauth service
+          tags: step2
+          service: name=openstack-nova-consoleauth state=stopped enabled=no
diff --git a/docker/services/nova-vnc-proxy.yaml b/docker/services/nova-vnc-proxy.yaml
new file mode 100644 (file)
index 0000000..97d2d15
--- /dev/null
@@ -0,0 +1,108 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Nova Vncproxy service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerNovaVncProxyImage:
+    description: image
+    default: 'centos-binary-nova-novncproxy:latest'
+    type: string
+  DockerNovaConfigImage:
+    description: image
+    default: 'centos-binary-nova-base:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  NovaVncProxyPuppetBase:
+    type: ../../puppet/services/nova-vnc-proxy.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Nova Vncproxy service.
+    value:
+      service_name: {get_attr: [NovaVncProxyPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [NovaVncProxyPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        get_attr: [NovaVncProxyPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [NovaVncProxyPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: nova
+        puppet_tags: nova_config
+        step_config: *step_config
+        config_image:
+          list_join:
+          - '/'
+          - [ {get_param: DockerNamespace}, {get_param: DockerNovaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/nova_vnc_proxy.json:
+          command: /usr/bin/nova-novncproxy --web /usr/share/novnc/
+          permissions:
+            - path: /var/log/nova
+              owner: nova:nova
+              recurse: true
+      docker_config:
+        step_4:
+          nova_vnc_proxy:
+            image:
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerNovaVncProxyImage} ]
+            net: host
+            privileged: false
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/nova_vnc_proxy.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/nova/etc/nova/:/etc/nova/:ro
+                  - /var/log/containers/nova:/var/log/nova
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create persistent logs directory
+          file:
+            path: /var/log/containers/nova
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable nova_vnc_proxy service
+          tags: step2
+          service: name=openstack-nova-novncproxy state=stopped enabled=no
diff --git a/docker/services/pacemaker/cinder-volume.yaml b/docker/services/pacemaker/cinder-volume.yaml
new file mode 100644 (file)
index 0000000..987ebaf
--- /dev/null
@@ -0,0 +1,170 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Cinder Volume service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerCinderVolumeImage:
+    description: image
+    default: 'centos-binary-cinder-volume:latest'
+    type: string
+  # we configure all cinder services in the same cinder base container
+  DockerCinderConfigImage:
+    description: image
+    default: 'centos-binary-cinder-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  # custom parameters for the Cinder volume role
+  CinderEnableIscsiBackend:
+    default: true
+    description: Whether to enable or not the Iscsi backend for Cinder
+    type: boolean
+  CinderLVMLoopDeviceSize:
+    default: 10280
+    description: The size of the loopback file used by the cinder LVM driver.
+    type: number
+
+resources:
+
+  CinderBase:
+    type: ../../../puppet/services/cinder-volume.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the Cinder Volume role.
+    value:
+      service_name: {get_attr: [CinderBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [CinderBase, role_data, config_settings]
+          - tripleo::profile::pacemaker::cinder::volume_bundle::cinder_volume_docker_image: &cinder_volume_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerCinderVolumeImage} ]
+            cinder::volume::manage_service: false
+            cinder::volume::enabled: false
+            cinder::host: hostgroup
+      step_config: ""
+      service_config_settings: {get_attr: [CinderBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: cinder
+        puppet_tags: cinder_config,file,concat,file_line
+        step_config: {get_attr: [CinderBase, role_data, step_config]}
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerCinderConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/cinder_volume.json:
+          command: /usr/bin/cinder-volume --config-file /usr/share/cinder/cinder-dist.conf --config-file /etc/cinder/cinder.conf
+          permissions:
+            - path: /var/log/cinder
+              owner: cinder:cinder
+              recurse: true
+      docker_config:
+        step_3:
+          cinder_volume_init_logs:
+            start_order: 0
+            image: *cinder_volume_image
+            privileged: false
+            user: root
+            volumes:
+              - /var/log/containers/cinder:/var/log/cinder
+            command: ['/bin/bash', '-c', 'chown -R cinder:cinder /var/log/cinder']
+        step_5:
+          cinder_volume_init_bundle:
+            start_order: 0
+            detach: false
+            net: host
+            user: root
+            command:
+              - '/bin/bash'
+              - '-c'
+              - str_replace:
+                  template:
+                    list_join:
+                      - '; '
+                      - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 5}' > /etc/puppet/hieradata/docker.json"
+                        - "FACTER_uuid=docker puppet apply --tags file_line,concat,augeas,TAGS --debug -v -e 'CONFIG'"
+                  params:
+                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::constraint::location'
+                    CONFIG: 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::cinder::volume_bundle'
+            image: *cinder_volume_image
+            volumes:
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /etc/puppet:/tmp/puppet-etc:ro
+              - /usr/share/openstack-puppet/modules:/usr/share/openstack-puppet/modules:ro
+              - /etc/corosync/corosync.conf:/etc/corosync/corosync.conf:ro
+              - /dev/shm:/dev/shm:rw
+      host_prep_tasks:
+        - name: create persistent directories
+          file:
+            path: "{{ item }}"
+            state: directory
+          with_items:
+            - /var/log/containers/cinder
+            - /var/lib/cinder
+        #FIXME: all of this should be conditional on the CinderEnableIscsiBackend value being set to true
+        - name: cinder create LVM volume group dd
+          command:
+            list_join:
+            - ''
+            - - 'dd if=/dev/zero of=/var/lib/cinder/cinder-volumes bs=1 count=0 seek='
+              - str_replace:
+                  template: VALUE
+                  params:
+                    VALUE: {get_param: CinderLVMLoopDeviceSize}
+              - 'M'
+          args:
+            creates: /var/lib/cinder/cinder-volumes
+        - name: cinder create LVM volume group
+          shell: |
+            if ! losetup /dev/loop2; then
+              losetup /dev/loop2 /var/lib/cinder/cinder-volumes
+            fi
+            if ! pvdisplay | grep cinder-volumes; then
+              pvcreate /dev/loop2
+            fi
+            if ! vgdisplay | grep cinder-volumes; then
+              vgcreate cinder-volumes /dev/loop2
+            fi
+          args:
+            executable: /bin/bash
+            creates: /dev/loop2
+      upgrade_tasks:
+        - name: Stop and disable cinder_volume service
+          tags: step2
+          service: name=openstack-cinder-volume state=stopped enabled=no
index ae19652..7557afd 100644 (file)
@@ -60,11 +60,7 @@ outputs:
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
-      step_config:
-          list_join:
-            - "\n"
-            - - &noop_pcmk "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
-              - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
+      step_config: ""
       service_config_settings: {get_attr: [HAProxyBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
@@ -74,8 +70,8 @@ outputs:
           list_join:
             - "\n"
             - - "exec {'wait-for-settle': command => '/bin/true' }"
-              - &noop_firewall "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
-              - *noop_pcmk
+              - "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
+              - "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
               - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
         config_image: *haproxy_image
       kolla_config:
@@ -88,6 +84,7 @@ outputs:
             detach: false
             net: host
             user: root
+            privileged: true
             command:
               - '/bin/bash'
               - '-c'
@@ -98,14 +95,20 @@ outputs:
                       - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
                         - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
                   params:
-                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+                    TAGS: 'tripleo::firewall::rule,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
                     CONFIG:
                       list_join:
                         - ';'
-                        - - *noop_firewall
-                          - 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::haproxy_bundle'
+                        - - 'include ::tripleo::profile::base::pacemaker'
+                          - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
             image: *haproxy_image
             volumes:
+              # puppet saves iptables rules in /etc/sysconfig
+              - /etc/sysconfig:/etc/sysconfig:rw
+              # saving rules require accessing /usr/libexec/iptables/iptables.init, just bind-mount
+              # the necessary bit and prevent systemd to try to reload the service in the container
+              - /usr/libexec/iptables:/usr/libexec/iptables:ro
+              - /usr/libexec/initscripts/legacy-actions:/usr/libexec/initscripts/legacy-actions:ro
               - /etc/hosts:/etc/hosts:ro
               - /etc/localtime:/etc/localtime:ro
               - /etc/puppet:/tmp/puppet-etc:ro
index c381c0d..585148e 100644 (file)
@@ -87,16 +87,15 @@ outputs:
               owner: panko:panko
               recurse: true
       docker_config:
-        step_3:
+        step_2:
           panko_init_log:
-            start_order: 0
             image: *panko_image
             user: root
             volumes:
               - /var/log/containers/panko:/var/log/panko
             command: ['/bin/bash', '-c', 'mkdir -p /var/log/httpd; chown -R panko:panko /var/log/panko']
+        step_3:
           panko_db_sync:
-            start_order: 1
             image: *panko_image
             net: host
             detach: false
index 609aec0..06d663c 100644 (file)
@@ -89,6 +89,7 @@ outputs:
         step_1:
           rabbitmq_init_logs:
             start_order: 0
+            detach: false
             image: *rabbitmq_image
             privileged: false
             user: root
@@ -97,6 +98,7 @@ outputs:
             command: ['/bin/bash', '-c', 'chown -R rabbitmq:rabbitmq /var/log/rabbitmq']
           rabbitmq_bootstrap:
             start_order: 1
+            detach: false
             image: *rabbitmq_image
             net: host
             privileged: false
diff --git a/docker/services/sahara-api.yaml b/docker/services/sahara-api.yaml
new file mode 100644 (file)
index 0000000..1067079
--- /dev/null
@@ -0,0 +1,119 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Sahara service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSaharaApiImage:
+    description: image
+    default: 'centos-binary-sahara-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SaharaApiPuppetBase:
+    type: ../../puppet/services/sahara-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sahara API role.
+    value:
+      service_name: {get_attr: [SaharaApiPuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SaharaApiPuppetBase, role_data, config_settings]
+          - sahara::sync_db: false
+      step_config: &step_config
+        get_attr: [SaharaApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [SaharaApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: sahara
+        puppet_tags: sahara_api_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+        step_config: *step_config
+        config_image: &sahara_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sahara-api.json:
+          command: /usr/bin/sahara-api --config-file /etc/sahara/sahara.conf
+          permissions:
+            - path: /var/lib/sahara
+              owner: sahara:sahara
+              recurse: true
+            - path: /var/log/sahara
+              owner: sahara:sahara
+              recurse: true
+      docker_config:
+        step_3:
+          sahara_db_sync:
+            image: *sahara_image
+            net: host
+            privileged: false
+            detach: false
+            volumes: &sahara_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/sahara-api.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /var/lib/sahara:/var/lib/sahara
+                  - /var/log/containers/sahara:/var/log/sahara
+            command: "/usr/bin/bootstrap_host_exec sahara_api su sahara -s /bin/bash -c 'sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head'"
+        step_4:
+          sahara_api:
+            image: *sahara_image
+            net: host
+            privileged: false
+            restart: always
+            volumes: *sahara_volumes
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /var/lib/sahara
+          file:
+            path: /var/lib/sahara
+            state: directory
+        - name: create persistent sahara logs directory
+          file:
+            path: /var/log/containers/sahara
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable sahara_api service
+          tags: step2
+          service: name=openstack-sahara-api state=stopped enabled=no
diff --git a/docker/services/sahara-engine.yaml b/docker/services/sahara-engine.yaml
new file mode 100644 (file)
index 0000000..41b5790
--- /dev/null
@@ -0,0 +1,110 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Sahara service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSaharaEngineImage:
+    description: image
+    default: 'centos-binary-sahara-engine:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SaharaEnginePuppetBase:
+    type: ../../puppet/services/sahara-engine.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sahara Engine role.
+    value:
+      service_name: {get_attr: [SaharaEnginePuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SaharaEnginePuppetBase, role_data, config_settings]
+          - sahara::sync_db: false
+      step_config: &step_config
+        get_attr: [SaharaEnginePuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [SaharaEnginePuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: sahara
+        puppet_tags: sahara_engine_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+        step_config: *step_config
+        config_image: &sahara_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sahara-engine.json:
+          command: /usr/bin/sahara-engine --config-file /etc/sahara/sahara.conf
+          permissions:
+            - path: /var/lib/sahara
+              owner: sahara:sahara
+              recurse: true
+            - path: /var/log/sahara
+              owner: sahara:sahara
+              recurse: true
+      docker_config:
+        step_4:
+          sahara_engine:
+            image: *sahara_image
+            net: host
+            privileged: false
+            restart: always
+            volumes: &sahara_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/sahara-engine.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+                  - /var/lib/sahara:/var/lib/sahara
+                  - /var/log/containers/sahara:/var/log/sahara
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /var/lib/sahara
+          file:
+            path: /var/lib/sahara
+            state: directory
+        - name: create persistent sahara logs directory
+          file:
+            path: /var/log/containers/sahara
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable sahara_engine service
+          tags: step2
+          service: name=openstack-sahara-engine state=stopped enabled=no
diff --git a/docker/services/sensu-client.yaml b/docker/services/sensu-client.yaml
new file mode 100644 (file)
index 0000000..e6bdf15
--- /dev/null
@@ -0,0 +1,131 @@
+heat_template_version: pike
+
+description: >
+  Containerized Sensu client service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSensuClientImage:
+    description: image
+    default: 'centos-binary-sensu-client:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  SensuDockerCheckCommand:
+    type: string
+    default: |
+      for i in $(docker ps --format '{{.ID}}'); do
+        if result=$(docker inspect --format='{{.State.Health.Status}}' $i 2>/dev/null); then
+          if [ "$result" != 'healthy' ]; then
+            echo "$(docker inspect --format='{{.Name}}' $i) ($i): $(docker inspect --format='{{json .State}}' $i)" && exit 2;
+          fi
+        fi
+      done
+  SensuDockerCheckInterval:
+    type: number
+    description: The frequency in seconds the docker health check is executed.
+    default: 10
+  SensuDockerCheckHandlers:
+    default: []
+    description: The Sensu event handler to use for events
+                 created by the docker health check.
+    type: comma_delimited_list
+  SensuDockerCheckOccurrences:
+    type: number
+    description: The number of event occurrences before sensu-plugin-aware handler should take action.
+    default: 3
+  SensuDockerCheckRefresh:
+    type: number
+    description: The number of seconds sensu-plugin-aware handlers should wait before taking second action.
+    default: 90
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SensuClientBase:
+    type: ../../puppet/services/monitoring/sensu-client.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sensu client role.
+    value:
+      service_name: {get_attr: [SensuClientBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SensuClientBase, role_data, config_settings]
+          - sensu::checks:
+              check-docker-health:
+                standalone: true
+                command: {get_param: SensuDockerCheckCommand}
+                interval: {get_param: SensuDockerCheckInterval}
+                handlers: {get_param: SensuDockerCheckHandlers}
+                occurrences: {get_param: SensuDockerCheckOccurrences}
+                refresh: {get_param: SensuDockerCheckRefresh}
+      step_config: &step_config
+        get_attr: [SensuClientBase, role_data, step_config]
+      service_config_settings: {get_attr: [SensuClientBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: sensu
+        puppet_tags:  sensu_rabbitmq_config,sensu_client_config,sensu_check_config,sensu_check
+        step_config: *step_config
+        config_image: &sensu_client_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sensu-client.json:
+          command: /usr/bin/sensu-client -d /etc/sensu/conf.d/
+      docker_config:
+        step_3:
+          sensu_client:
+            image: *sensu_client_image
+            net: host
+            privileged: true
+            # NOTE(mmagr) kolla image changes the user to 'sensu', we need it
+            # to be root have rw permission to docker.sock to run successfully
+            # "docker inspect" command
+            user: root
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/run/docker.sock:/var/run/docker.sock:rw
+                  - /var/lib/kolla/config_files/sensu-client.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/sensu/etc/sensu/:/etc/sensu/:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      upgrade_tasks:
+        - name: Stop and disable sensu-client service
+          tags: step2
+          service: name=sensu-client.service state=stopped enabled=no
diff --git a/docker/services/services.yaml b/docker/services/services.yaml
deleted file mode 100644 (file)
index 2ad3b63..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-heat_template_version: pike
-
-description: >
-  Utility stack to convert an array of services into a set of combined
-  role configs.
-
-parameters:
-  Services:
-    default: []
-    description: |
-        List nested stack service templates.
-    type: comma_delimited_list
-  ServiceNetMap:
-    default: {}
-    description: Mapping of service_name -> network name. Typically set
-                 via parameter_defaults in the resource registry.  This
-                 mapping overrides those in ServiceNetMapDefaults.
-    type: json
-  EndpointMap:
-    default: {}
-    description: Mapping of service endpoint -> protocol. Typically set
-                 via parameter_defaults in the resource registry.
-    type: json
-  DefaultPasswords:
-    default: {}
-    description: Mapping of service -> default password. Used to help
-                 pass top level passwords managed by Heat into services.
-    type: json
-  RoleName:
-    default: ''
-    description: Role name on which the service is applied
-    type: string
-  RoleParameters:
-    default: {}
-    description: Parameters specific to the role
-    type: json
-
-resources:
-
-  PuppetServices:
-    type: ../../puppet/services/services.yaml
-    properties:
-      Services: {get_param: Services}
-      ServiceNetMap: {get_param: ServiceNetMap}
-      EndpointMap: {get_param: EndpointMap}
-      DefaultPasswords: {get_param: DefaultPasswords}
-      RoleName: {get_param: RoleName}
-      RoleParameters: {get_param: RoleParameters}
-
-  ServiceChain:
-    type: OS::Heat::ResourceChain
-    properties:
-      resources: {get_param: Services}
-      concurrent: true
-      resource_properties:
-        ServiceNetMap: {get_param: ServiceNetMap}
-        EndpointMap: {get_param: EndpointMap}
-        DefaultPasswords: {get_param: DefaultPasswords}
-        RoleName: {get_param: RoleName}
-        RoleParameters: {get_param: RoleParameters}
-
-outputs:
-  role_data:
-    description: Combined Role data for this set of services.
-    value:
-      service_names:
-        {get_attr: [PuppetServices, role_data, service_names]}
-      monitoring_subscriptions:
-        {get_attr: [PuppetServices, role_data, monitoring_subscriptions]}
-      logging_sources:
-        {get_attr: [PuppetServices, role_data, logging_sources]}
-      logging_groups:
-        {get_attr: [PuppetServices, role_data, logging_groups]}
-      service_config_settings:
-        {get_attr: [PuppetServices, role_data, service_config_settings]}
-      config_settings:
-        {get_attr: [PuppetServices, role_data, config_settings]}
-      global_config_settings:
-        {get_attr: [PuppetServices, role_data, global_config_settings]}
-      step_config:
-        {get_attr: [ServiceChain, role_data, step_config]}
-      puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
-      kolla_config:
-        map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
-      docker_config:
-        {get_attr: [ServiceChain, role_data, docker_config]}
-      docker_puppet_tasks:
-        {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
-      host_prep_tasks:
-        yaql:
-          # Note we use distinct() here to filter any identical tasks
-          expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
-          data: {get_attr: [ServiceChain, role_data]}
-      upgrade_tasks:
-        yaql:
-          # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
-          expression: $.data.where($ != null).select($.get('upgrade_tasks')).where($ != null).flatten().distinct()
-          data: {get_attr: [ServiceChain, role_data]}
-      upgrade_batch_tasks:
-        yaql:
-          # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
-          expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
-          data: {get_attr: [ServiceChain, role_data]}
-      service_metadata_settings:
-        get_attr: [PuppetServices, role_data, service_metadata_settings]
index bfd445d..075d8d7 100644 (file)
@@ -58,6 +58,14 @@ parameters:
     default: true
     description: 'Use a local directory for Swift storage services when building rings'
     type: boolean
+  SwiftRingGetTempurl:
+    default: ''
+    description: A temporary Swift URL to download rings from.
+    type: string
+  SwiftRingPutTempurl:
+    default: ''
+    description: A temporary Swift URL to upload rings to.
+    type: string
 
 resources:
 
@@ -75,14 +83,17 @@ outputs:
     description: Role data for Swift Ringbuilder configuration in containers.
     value:
       service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]}
-      config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+      config_settings:
+        map_merge:
+          - {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+          - tripleo::profile::base::swift::ringbuilder:skip_consistency_check: true
       step_config: &step_config
         get_attr: [SwiftRingbuilderBase, role_data, step_config]
       service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
         config_volume: 'swift'
-        puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
+        puppet_tags: exec,fetch_swift_ring_tarball,extract_swift_ring_tarball,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance,create_swift_ring_tarball,upload_swift_ring_tarball
         step_config: *step_config
         config_image:
           list_join:
index 2fc99d6..df9750c 100644 (file)
@@ -82,9 +82,8 @@ outputs:
               recurse: true
       docker_config:
         # db sync runs before permissions set by kolla_config
-        step_3:
+        step_2:
           tacker_init_logs:
-            start_order: 0
             image: &tacker_image
               list_join:
                 - '/'
@@ -94,8 +93,8 @@ outputs:
             volumes:
               - /var/log/containers/tacker:/var/log/tacker
             command: ['/bin/bash', '-c', 'chown -R tacker:tacker /var/log/tacker']
+        step_3:
           tacker_db_sync:
-            start_order: 1
             image: *tacker_image
             net: host
             privileged: false
index 99e517b..681a2fe 100644 (file)
@@ -19,3 +19,4 @@ parameter_defaults:
   CinderDellScSecondarySanLogin: 'Admin'
   CinderDellScSecondarySanPassword: ''
   CinderDellScSecondaryScApiPort: 3033
+  CinderDellScExcludedDomainIp: ''
index 2740664..f1e3d96 100644 (file)
@@ -35,8 +35,6 @@ resource_registry:
   OS::TripleO::PostDeploySteps: ../docker/post.yaml
   OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
 
-  OS::TripleO::Services: ../docker/services/services.yaml
-
 parameter_defaults:
   # Defaults to 'tripleoupstream'.  Specify a local docker registry
   # Example: 192.168.24.1:8787/tripleoupstream
index 2852794..6a5ec87 100644 (file)
@@ -2,10 +2,11 @@ resource_registry:
   # This can be used when you don't want to run puppet on the host,
   # e.g atomic, but it has been replaced with OS::TripleO::Services::Docker
   # OS::TripleO::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
-  OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
   # The compute node still needs extra initialization steps
   OS::TripleO::Compute::NodeUserData: ../docker/firstboot/setup_docker_host.yaml
 
+  OS::TripleO::Services::Docker: ../puppet/services/docker.yaml
+
   #NOTE (dprince) add roles to be docker enabled as we support them
   OS::TripleO::Services::NovaLibvirt: ../docker/services/nova-libvirt.yaml
   OS::TripleO::Services::ComputeNeutronOvsAgent: ../docker/services/neutron-ovs-agent.yaml
@@ -18,7 +19,9 @@ resource_registry:
   OS::TripleO::Services::NovaApi: ../docker/services/nova-api.yaml
   OS::TripleO::Services::NovaPlacement: ../docker/services/nova-placement.yaml
   OS::TripleO::Services::NovaConductor: ../docker/services/nova-conductor.yaml
+  OS::TripleO::Services::NovaConsoleauth: ../docker/services/nova-consoleauth.yaml
   OS::TripleO::Services::NovaScheduler: ../docker/services/nova-scheduler.yaml
+  OS::TripleO::Services::NovaVncProxy: ../docker/services/nova-vnc-proxy.yaml
   OS::TripleO::Services::NeutronServer: ../docker/services/neutron-api.yaml
   OS::TripleO::Services::NeutronApi: ../docker/services/neutron-api.yaml
   OS::TripleO::Services::NeutronCorePlugin: ../docker/services/neutron-plugin-ml2.yaml
@@ -27,7 +30,6 @@ resource_registry:
   OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
   OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
   OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
-  OS::TripleO::Services::HAProxy: ../docker/services/haproxy.yaml
   OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
   OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
   OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
@@ -44,15 +46,21 @@ resource_registry:
   OS::TripleO::Services::AodhListener: ../docker/services/aodh-listener.yaml
   OS::TripleO::Services::PankoApi: ../docker/services/panko-api.yaml
   OS::TripleO::Services::CeilometerAgentCentral: ../docker/services/ceilometer-agent-central.yaml
+  OS::TripleO::Services::CeilometerAgentIpmi: ../docker/services/ceilometer-agent-ipmi.yaml
   OS::TripleO::Services::CeilometerAgentCompute: ../docker/services/ceilometer-agent-compute.yaml
   OS::TripleO::Services::CeilometerAgentNotification: ../docker/services/ceilometer-agent-notification.yaml
   OS::TripleO::Services::Horizon: ../docker/services/horizon.yaml
+  OS::TripleO::Services::Iscsid: ../docker/services/iscsid.yaml
+  OS::TripleO::Services::Multipathd: ../docker/services/multipathd.yaml
+  OS::TripleO::Services::CinderApi: ../docker/services/cinder-api.yaml
+  OS::TripleO::Services::CinderScheduler: ../docker/services/cinder-scheduler.yaml
+  # FIXME: Had to remove these to unblock containers CI. They should be put back when fixed.
+  # OS::TripleO::Services::CinderBackup: ../docker/services/cinder-backup.yaml
+  # OS::TripleO::Services::CinderVolume: ../docker/services/cinder-volume.yaml
 
   OS::TripleO::PostDeploySteps: ../docker/post.yaml
   OS::TripleO::PostUpgradeSteps: ../docker/post-upgrade.yaml
 
-  OS::TripleO::Services: ../docker/services/services.yaml
-
 parameter_defaults:
   # To specify a local docker registry, enable these
   # where 192.168.24.1 is the host running docker-distribution
index f703801..175e1fd 100644 (file)
@@ -5,6 +5,7 @@
 # For these values to take effect, one of the tls-endpoints-*.yaml environments
 # must also be used.
 parameter_defaults:
+  HorizonSecureCookies: True
   SSLCertificate: |
     The contents of your certificate go here
   SSLIntermediateCertificate: ''
diff --git a/environments/host-config-and-reboot.j2.yaml b/environments/host-config-and-reboot.j2.yaml
new file mode 100644 (file)
index 0000000..d5f69ec
--- /dev/null
@@ -0,0 +1,18 @@
+resource_registry:
+{% for role in roles %}
+  OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/host_config_and_reboot.yaml
+{% endfor %}
+
+#parameter_defaults:
+  # Note: There are no global parameters which can be applied to all roles as
+  # these configuration have to be specific to role.
+
+  # Sample parameters for Compute and ComputeOvsDpdk roles
+  #ComputeParameters:
+    #KernelArgs: ""
+    #TunedProfileName: ""
+    #HostIsolatedCoreList: ""
+  #ComputeOvsDpdkParameters:
+    #KernelArgs: ""
+    #TunedProfileName: ""
+    #HostIsolatedCoreList: ""
diff --git a/environments/host-config-pre-network.j2.yaml b/environments/host-config-pre-network.j2.yaml
deleted file mode 100644 (file)
index c79e28b..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-resource_registry:
-# Create the registry only for roles with the word "Compute" in it. Like ComputeOvsDpdk, ComputeSriov, etc.,
-{%- for role in roles -%}
-{% if "Compute" in role.name %}
-  OS::TripleO::{{role.name}}::PreNetworkConfig: ../extraconfig/pre_network/{{role.name.lower()}}-host_config_and_reboot.yaml
-{%- endif -%}
-{% endfor %}
-
-#parameter_defaults:
-  # Sample parameters for Compute and ComputeOvsDpdk roles
-  #ComputeKernelArgs: ""
-  #ComputeTunedProfileName: ""
-  #ComputeHostCpuList: ""
-  #ComputeOvsDpdkKernelArgs: ""
-  #ComputeOvsDpdkTunedProfileName: ""
-  #ComputeOvsDpdkHostCpuList: ""
index 0f7e114..05a3a39 100644 (file)
@@ -36,3 +36,4 @@ parameter_defaults:
     - OS::TripleO::Services::NeutronVppAgent
     - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::Docker
+    - OS::TripleO::Services::Iscsid
diff --git a/environments/network-isolation.j2.yaml b/environments/network-isolation.j2.yaml
new file mode 100644 (file)
index 0000000..6a7318f
--- /dev/null
@@ -0,0 +1,37 @@
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+  {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+    {%- set _ = primary_role.pop() -%}
+    {%- set _ = primary_role.append(role) -%}
+  {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# Enable the creation of Neutron networks for isolated Overcloud
+# traffic and configure each role to assign ports (related
+# to that role) on these networks.
+# primary role is: {{primary_role_name}}
+resource_registry:
+  # networks as defined in network_data.yaml
+  {%- for network in networks if network.enabled|default(true) %}
+  OS::TripleO::Network::{{network.name}}: ../network/{{network.name_lower|default(network.name.lower())}}.yaml
+  {%- endfor %}
+
+  # Port assignments for the VIPs
+  {%- for network in networks if network.vip %}
+  OS::TripleO::Network::Ports::{{network.name}}VipPort: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+  {%- endfor %}
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+
+  OS::TripleO::{{primary_role_name}}::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+{%- for role in roles %}
+  # Port assignments for the {{role.name}}
+  {%- for network in networks %}
+    {%- if network.name in role.networks|default([]) and network.enabled|default(true) %}
+  OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+    {%- else %}
+  OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/noop.yaml
+    {%- endif %}
+  {%- endfor %}
+{%- endfor %}
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
deleted file mode 100644 (file)
index a6b4b8a..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# Enable the creation of Neutron networks for isolated Overcloud
-# traffic and configure each role to assign ports (related
-# to that role) on these networks.
-resource_registry:
-  OS::TripleO::Network::External: ../network/external.yaml
-  OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
-  OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
-  OS::TripleO::Network::Storage: ../network/storage.yaml
-  OS::TripleO::Network::Tenant: ../network/tenant.yaml
-  # Management network is optional and disabled by default.
-  # To enable it, include environments/network-management.yaml
-  #OS::TripleO::Network::Management: ../network/management.yaml
-
-  # Port assignments for the VIPs
-  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
-  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
-  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
-
-  # Port assignments for the controller role
-  OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
-  OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
-  #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the compute role
-  OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
-  #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the ceph storage role
-  OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the swift storage role
-  OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the block storage role
-  OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/neutron-ml2-ovn-ha.yaml b/environments/neutron-ml2-ovn-ha.yaml
new file mode 100644 (file)
index 0000000..c592d57
--- /dev/null
@@ -0,0 +1,24 @@
+# A Heat environment file which can be used to enable OVN
+# extensions, configured via puppet
+resource_registry:
+  OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2OVN
+  OS::TripleO::Services::ComputeNeutronCorePlugin: ../puppet/services/neutron-compute-plugin-ovn.yaml
+  OS::TripleO::Services::OVNDBs: ../puppet/services/pacemaker/ovn-dbs.yaml
+# Disabling Neutron services that overlap with OVN
+  OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
+  OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronMetadataAgent: OS::Heat::None
+  OS::TripleO::Services::NeutronDhcpAgent: OS::Heat::None
+  OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
+
+parameter_defaults:
+  NeutronMechanismDrivers: ovn
+  OVNVifType: ovs
+  OVNNeutronSyncMode: log
+  OVNQosDriver: ovn-qos
+  OVNTunnelEncapType: geneve
+  NeutronEnableDHCPAgent: false
+  NeutronTypeDrivers: 'geneve,vxlan,vlan,flat'
+  NeutronNetworkType: 'geneve'
+  NeutronServicePlugins: 'qos,ovn-router'
+  NeutronVniRanges: ['1:65536', ]
diff --git a/environments/services-docker/ec2-api.yaml b/environments/services-docker/ec2-api.yaml
new file mode 100644 (file)
index 0000000..24cbb03
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::Ec2Api: ../../docker/services/ec2-api.yaml
index 6f7608c..795309f 100644 (file)
@@ -1,2 +1,3 @@
 resource_registry:
   OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
+  OS::TripleO::Services::ManilaScheduler: ../../docker/services/manila-scheduler.yaml
diff --git a/environments/services-docker/sahara.yaml b/environments/services-docker/sahara.yaml
new file mode 100644 (file)
index 0000000..d0bf9fe
--- /dev/null
@@ -0,0 +1,3 @@
+resource_registry:
+  OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+  OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
diff --git a/environments/services-docker/sensu-client.yaml b/environments/services-docker/sensu-client.yaml
new file mode 100644 (file)
index 0000000..c03104d
--- /dev/null
@@ -0,0 +1,3 @@
+
+resource_registry:
+  OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
index 07a61c2..ca55b4d 100644 (file)
@@ -1,3 +1,4 @@
 resource_registry:
   OS::TripleO::Services::UndercloudCeilometerAgentCentral: ../../docker/services/ceilometer-agent-central.yaml
   OS::TripleO::Services::UndercloudCeilometerAgentNotification: ../../docker/services/ceilometer-agent-notification.yaml
+  OS::TripleO::Services::UndercloudCeilometerAgentIpmi: ../../docker/services/ceilometer-agent-ipmi.yaml
index b131738..8359f4a 100644 (file)
@@ -1,5 +1,4 @@
 resource_registry:
   OS::TripleO::Services::IronicApi: ../../puppet/services/ironic-api.yaml
   OS::TripleO::Services::IronicConductor: ../../puppet/services/ironic-conductor.yaml
-  OS::TripleO::Services::IronicPxe: ../../puppet/services/ironic-pxe.yaml
   OS::TripleO::Services::NovaIronic: ../../puppet/services/nova-ironic.yaml
index 856bd3c..83b3249 100644 (file)
@@ -104,9 +104,9 @@ parameter_defaults:
     OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
     OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
     OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
-    PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
-    PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
-    PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+    PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+    PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+    PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'}
     SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
index a67e208..8e50297 100644 (file)
@@ -104,9 +104,9 @@ parameter_defaults:
     OctaviaAdmin: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
     OctaviaInternal: {protocol: 'http', port: '9876', host: 'IP_ADDRESS'}
     OctaviaPublic: {protocol: 'https', port: '13876', host: 'IP_ADDRESS'}
-    PankoAdmin: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
-    PankoInternal: {protocol: 'http', port: '8779', host: 'IP_ADDRESS'}
-    PankoPublic: {protocol: 'https', port: '13779', host: 'IP_ADDRESS'}
+    PankoAdmin: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+    PankoInternal: {protocol: 'http', port: '8977', host: 'IP_ADDRESS'}
+    PankoPublic: {protocol: 'https', port: '13977', host: 'IP_ADDRESS'}
     SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
     SaharaPublic: {protocol: 'https', port: '13386', host: 'IP_ADDRESS'}
index 865ed4c..84cabf1 100644 (file)
@@ -72,8 +72,8 @@ parameter_defaults:
     IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
     IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
     IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
-    IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
-    IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+    IronicInspectorAdmin: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
+    IronicInspectorInternal: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
     IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
     KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
     KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
@@ -100,9 +100,9 @@ parameter_defaults:
     OctaviaAdmin: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
     OctaviaInternal: {protocol: 'https', port: '9876', host: 'IP_ADDRESS'}
     OctaviaPublic: {protocol: 'https', port: '13876', host: 'CLOUDNAME'}
-    PankoAdmin: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
-    PankoInternal: {protocol: 'https', port: '8779', host: 'CLOUDNAME'}
-    PankoPublic: {protocol: 'https', port: '13779', host: 'CLOUDNAME'}
+    PankoAdmin: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
+    PankoInternal: {protocol: 'https', port: '8977', host: 'CLOUDNAME'}
+    PankoPublic: {protocol: 'https', port: '13977', host: 'CLOUDNAME'}
     SaharaAdmin: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
     SaharaInternal: {protocol: 'https', port: '8386', host: 'CLOUDNAME'}
     SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
index 7a2716d..559d81d 100644 (file)
@@ -18,3 +18,5 @@ parameter_defaults:
   HeatConvergenceEngine: false
   HeatMaxResourcesPerStack: -1
   HeatMaxJsonBodySize: 2097152
+  IronicInspectorInterface: br-ctlplane
+  IronicInspectorIpRange: '192.168.24.100,192.168.24.200'
@@ -28,8 +28,8 @@
           lineinfile:
             dest: /etc/tuned/cpu-partitioning-variables.conf
             regexp: '^isolated_cores=.*'
-            line: 'isolated_cores={{ _HOST_CPUS_LIST_ }}'
-          when: _HOST_CPUS_LIST_|default("") != ""
+            line: 'isolated_cores={{ _TUNED_CORES_ }}'
+          when: _TUNED_CORES_|default("") != ""
 
         - name: Tune-d provile activation
           shell: tuned-adm profile {{ _TUNED_PROFILE_NAME_ }}
@@ -52,7 +52,7 @@
         when:
           - item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') != "lo"
           # This condition will list all the interfaces except the one with valid IP (which is Provisioning network at this stage)
-          # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4']['address'] is undefined
-          - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4']['address'] is undefined
+          # Simpler Version - hostvars[inventory_hostname]['ansible_' + iface_name ]['ipv4'] is undefined
+          - hostvars[inventory_hostname]['ansible_' + item.path | regex_replace('(^.*ifcfg-)(.*)', '\\2') ]['ipv4'] is undefined
         with_items:
           - "{{ ifcfg_files.files }}"
index 658fea7..41d8f4f 100644 (file)
@@ -7,6 +7,9 @@ description: >
 parameters:
   server:
     type: string
+  # Deprecated Parameters, these configuration are deprecated in favor or role-specific parameters.
+  # Use: extraconfig/pre_network/host_config_and_reboot.yaml.
+  # Deprecated in Pike and will be removed in Queens.
   {{role}}KernelArgs:
     type: string
     default: ""
@@ -17,6 +20,13 @@ parameters:
     type: string
     default: ""
 
+parameter_group:
+  - label: deprecated
+    parameters:
+      - {{role}}KernelArgs
+      - {{role}}TunedProfileName
+      - {{role}}HostCpusList
+
 conditions:
   param_exists:
     or:
diff --git a/extraconfig/pre_network/host_config_and_reboot.yaml b/extraconfig/pre_network/host_config_and_reboot.yaml
new file mode 100644 (file)
index 0000000..74e716a
--- /dev/null
@@ -0,0 +1,85 @@
+heat_template_version: pike
+
+description: >
+  All configurations which require reboot should be initiated via PreNetworkConfig. After
+  this configuration is completed, the corresponding node will be rebooted.
+
+parameters:
+  server:
+    type: string
+  RoleParameters:
+    type: json
+    description: Role Specific parameters
+    default: {}
+
+conditions:
+  is_host_config_required: {not: {equals: [{get_param: [RoleParameters, KernelArgs]}, ""]}}
+
+resources:
+  HostParametersConfig:
+    type: OS::Heat::SoftwareConfig
+    condition: is_host_config_required
+    properties:
+      group: ansible
+      inputs:
+        - name: _KERNEL_ARGS_
+        - name: _TUNED_PROFILE_NAME_
+        - name: _TUNED_CORES_
+      outputs:
+        - name: result
+      config:
+        get_file: ansible_host_config.yaml
+
+  HostParametersDeployment:
+    type: OS::Heat::SoftwareDeployment
+    condition: is_host_config_required
+    properties:
+      name: HostParametersDeployment
+      server:  {get_param: server}
+      config: {get_resource: HostParametersConfig}
+      actions: ['CREATE'] # Only do this on CREATE
+      input_values:
+        _KERNEL_ARGS_: {get_param: [RoleParameters, KernelArgs]}
+        _TUNED_PROFILE_NAME_: {get_param: [RoleParameters, TunedProfileName]}
+        _TUNED_CORES_: {get_param: [RoleParameters, HostIsolatedCoreList]}
+
+  RebootConfig:
+    type: OS::Heat::SoftwareConfig
+    condition: is_host_config_required
+    properties:
+      group: script
+      config: |
+        #!/bin/bash
+        # Stop os-collect-config to avoid any race collecting another
+        # deployment before reboot happens
+        systemctl stop os-collect-config.service
+        /sbin/reboot
+
+  RebootDeployment:
+    type: OS::Heat::SoftwareDeployment
+    depends_on: HostParametersDeployment
+    condition: is_host_config_required
+    properties:
+      name: RebootDeployment
+      server:  {get_param: server}
+      config: {get_resource: RebootConfig}
+      actions: ['CREATE'] # Only do this on CREATE
+      signal_transport: NO_SIGNAL
+
+outputs:
+  result:
+    condition: is_host_config_required
+    value:
+      get_attr: [HostParametersDeployment, result]
+  stdout:
+    condition: is_host_config_required
+    value:
+      get_attr: [HostParametersDeployment, deploy_stdout]
+  stderr:
+    condition: is_host_config_required
+    value:
+      get_attr: [HostParametersDeployment, deploy_stderr]
+  status_code:
+    condition: is_host_config_required
+    value:
+      get_attr: [HostParametersDeployment, deploy_status_code]
index f17a073..d1dd5d1 100755 (executable)
@@ -11,7 +11,7 @@ function log_debug {
 }
 
 function is_bootstrap_node {
-  if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+  if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid | tr '[:upper:]' '[:lower:]')" = "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]; then
     log_debug "Node is bootstrap"
     echo "true"
   fi
index 7fc258d..6bf5afb 100644 (file)
@@ -10,8 +10,8 @@ parameters:
 
 resources:
 
-{%- for role in roles -%}
-{% if "controller" in role.tags %}
+{%- for role in roles %}
+  {%- if 'controller' in role.tags %}
   {{role.name}}PostPuppetMaintenanceModeConfig:
     type: OS::Heat::SoftwareConfig
     properties:
@@ -37,6 +37,6 @@ resources:
     properties:
       servers: {get_param: [servers, {{role.name}}]}
       input_values: {get_param: input_values}
-{%- endif -%}
-{% endfor %}
+  {%- endif %}
+{%- endfor %}
 
index cb9cc5b..0c4a792 100755 (executable)
@@ -49,7 +49,7 @@ fi
 # of packages to update (the check for -z "$update_identifier" guarantees that this
 # is run only on overcloud stack update -i)
 if [[ "$pacemaker_status" == "active" && \
-        "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name)" == "$(facter hostname)" ]] ; then \
+        "$(hiera -c /etc/puppet/hiera.yaml pacemaker_short_bootstrap_node_name | tr '[:upper:]' '[:lower:]')" == "$(facter hostname | tr '[:upper:]' '[:lower:]')" ]] ; then \
     # OCF scripts don't cope with -eu
     echo "Verifying if we need to fix up any IPv6 VIPs"
     set +eu
index f5f2b97..ece4008 100644 (file)
@@ -134,7 +134,7 @@ Panko:
         net_param: Public
     Admin:
         net_param: PankoApi
-    port: 8779
+    port: 8977
 
 Cinder:
     Internal:
index 4509bca..42d1fbd 100644 (file)
@@ -117,9 +117,9 @@ parameters:
       OctaviaAdmin: {protocol: http, port: '9876', host: IP_ADDRESS}
       OctaviaInternal: {protocol: http, port: '9876', host: IP_ADDRESS}
       OctaviaPublic: {protocol: http, port: '9876', host: IP_ADDRESS}
-      PankoAdmin: {protocol: http, port: '8779', host: IP_ADDRESS}
-      PankoInternal: {protocol: http, port: '8779', host: IP_ADDRESS}
-      PankoPublic: {protocol: http, port: '8779', host: IP_ADDRESS}
+      PankoAdmin: {protocol: http, port: '8977', host: IP_ADDRESS}
+      PankoInternal: {protocol: http, port: '8977', host: IP_ADDRESS}
+      PankoPublic: {protocol: http, port: '8977', host: IP_ADDRESS}
       SaharaAdmin: {protocol: http, port: '8386', host: IP_ADDRESS}
       SaharaInternal: {protocol: http, port: '8386', host: IP_ADDRESS}
       SaharaPublic: {protocol: http, port: '8386', host: IP_ADDRESS}
index d4c301b..4aee571 100644 (file)
@@ -106,7 +106,7 @@ resource_registry:
   OS::TripleO::UpgradeConfig: puppet/upgrade_config.yaml
 
   # services
-  OS::TripleO::Services: puppet/services/services.yaml
+  OS::TripleO::Services: services.yaml
   OS::TripleO::Services::Apache: puppet/services/apache.yaml
   OS::TripleO::Services::CACerts: puppet/services/ca-certs.yaml
   OS::TripleO::Services::CephMds: OS::Heat::None
@@ -200,6 +200,7 @@ resource_registry:
   # Undercloud Telemetry services
   OS::TripleO::Services::UndercloudCeilometerAgentCentral: OS::Heat::None
   OS::TripleO::Services::UndercloudCeilometerAgentNotification: OS::Heat::None
+  OS::TripleO::Services::UndercloudCeilometerAgentIpmi: OS::Heat::None
 
   #Gnocchi services
   OS::TripleO::Services::GnocchiApi: puppet/services/gnocchi-api.yaml
@@ -236,6 +237,7 @@ resource_registry:
   OS::TripleO::Services::MistralExecutor: OS::Heat::None
   OS::TripleO::Services::IronicApi: OS::Heat::None
   OS::TripleO::Services::IronicConductor: OS::Heat::None
+  OS::TripleO::Services::IronicInspector: OS::Heat::None
   OS::TripleO::Services::NovaIronic: OS::Heat::None
   OS::TripleO::Services::TripleoPackages: puppet/services/tripleo-packages.yaml
   OS::TripleO::Services::TripleoFirewall: puppet/services/tripleo-firewall.yaml
@@ -263,6 +265,7 @@ resource_registry:
   OS::TripleO::Services::NeutronVppAgent: OS::Heat::None
   OS::TripleO::Services::Docker: OS::Heat::None
   OS::TripleO::Services::CertmongerUser: OS::Heat::None
+  OS::TripleO::Services::Iscsid: OS::Heat::None
 
 parameter_defaults:
   EnablePackageInstall: false
index f8655b1..e4c04b4 100644 (file)
@@ -101,8 +101,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   ServerMetadata:
     default: {}
     description: >
@@ -446,6 +446,7 @@ resources:
           MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChainRoleData, value, monitoring_subscriptions]}
           ServiceMetadataSettings: {get_attr: [{{role.name}}ServiceChainRoleData, value, service_metadata_settings]}
           DeploymentServerBlacklistDict: {get_attr: [DeploymentServerBlacklistDict, value]}
+          RoleParameters: {get_param: {{role.name}}Parameters}
 {% endfor %}
 
 {% for role in roles %}
index baafe03..b128445 100644 (file)
@@ -12,10 +12,8 @@ parameters:
     type: string
   cloud_name_ctlplane:
     type: string
-  # FIXME(shardy) this can be comma_delimited_list when
-  # https://bugs.launchpad.net/heat/+bug/1617019 is fixed
   enabled_services:
-    type: string
+    type: comma_delimited_list
   controller_ips:
     type: comma_delimited_list
   logging_groups:
@@ -118,7 +116,10 @@ resources:
            map_merge:
               - tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
               - tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
-              - enabled_services: {get_param: enabled_services}
+              - enabled_services:
+                  yaql:
+                    expression: $.data.distinct()
+                    data: {get_param: enabled_services}
               # This writes out a mapping of service_name_enabled: 'true'
               # For any services not enabled, hiera foo_enabled will
               # return nil, as it's undefined
@@ -129,8 +130,7 @@ resources:
                       # https://bugs.launchpad.net/heat/+bug/1617203
                       SERVICE_enabled: 'true'
                     for_each:
-                      SERVICE:
-                        str_split: [',', {get_param: enabled_services}]
+                      SERVICE: {get_param: enabled_services}
               # Dynamically generate per-service network data
               # This works as follows (outer->inner functions)
               # yaql - filters services where no mapping exists in ServiceNetMap
@@ -150,8 +150,7 @@ resources:
                               template:
                                 SERVICE_network: SERVICE_network
                               for_each:
-                                SERVICE:
-                                  str_split: [',', {get_param: enabled_services}]
+                                SERVICE: {get_param: enabled_services}
                         - values: {get_param: ServiceNetMap}
               # Keystone doesn't provide separate entries for the public
               # and admin endpoints, so we need to add them here manually
@@ -203,8 +202,7 @@ resources:
                                   template:
                                     SERVICE_vip: SERVICE_network
                                   for_each:
-                                    SERVICE:
-                                      str_split: [',', {get_param: enabled_services}]
+                                    SERVICE: {get_param: enabled_services}
                             - values: {get_param: ServiceNetMap}
                         - values: {get_param: NetVipMap}
               - keystone_admin_api_vip:
index 60ddeb8..551a88c 100644 (file)
@@ -69,8 +69,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   BlockStorageServerMetadata:
     default: {}
     description: >
@@ -139,6 +139,10 @@ parameters:
       Map of server hostnames to blacklist from any triggered
       deployments. If the value is 1, the server will be blacklisted. This
       parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -372,6 +376,7 @@ resources:
     type: OS::TripleO::BlockStorage::PreNetworkConfig
     properties:
       server: {get_resource: BlockStorage}
+      RoleParameters: {get_param: RoleParameters}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -466,6 +471,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
index 9d30ab2..4336f3e 100644 (file)
@@ -75,8 +75,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   CephStorageServerMetadata:
     default: {}
     description: >
@@ -145,6 +145,10 @@ parameters:
       Map of server hostnames to blacklist from any triggered
       deployments. If the value is 1, the server will be blacklisted. This
       parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -378,6 +382,7 @@ resources:
     type: OS::TripleO::CephStorage::PreNetworkConfig
     properties:
       server: {get_resource: CephStorage}
+      RoleParameters: {get_param: RoleParameters}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -471,6 +476,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
index 06a31ec..7bcee47 100644 (file)
@@ -90,8 +90,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   NovaComputeServerMetadata:
     default: {}
     description: >
@@ -157,6 +157,10 @@ parameters:
       Map of server hostnames to blacklist from any triggered
       deployments. If the value is 1, the server will be blacklisted. This
       parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -381,6 +385,7 @@ resources:
     type: OS::TripleO::Compute::PreNetworkConfig
     properties:
       server: {get_resource: NovaCompute}
+      RoleParameters: {get_param: RoleParameters}
 
   NetworkConfig:
     type: OS::TripleO::Compute::Net::SoftwareConfig
@@ -479,6 +484,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   NovaComputeDeployment:
     type: OS::TripleO::SoftwareDeployment
index cccfdef..6a03265 100644 (file)
@@ -104,8 +104,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   ControllerServerMetadata:
     default: {}
     description: >
@@ -171,6 +171,10 @@ parameters:
       Map of server hostnames to blacklist from any triggered
       deployments. If the value is 1, the server will be blacklisted. This
       parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+    default: {}
 
 parameter_groups:
 - label: deprecated
@@ -401,6 +405,7 @@ resources:
     type: OS::TripleO::Controller::PreNetworkConfig
     properties:
       server: {get_resource: Controller}
+      RoleParameters: {get_param: RoleParameters}
 
   NetworkConfig:
     type: OS::TripleO::Controller::Net::SoftwareConfig
@@ -541,6 +546,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Hook for site-specific additional pre-deployment config, e.g extra hieradata
   ControllerExtraConfigPre:
index 19ea1b6..40a5d44 100644 (file)
@@ -69,8 +69,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   SwiftStorageServerMetadata:
     default: {}
     description: >
@@ -139,6 +139,10 @@ parameters:
       Map of server hostnames to blacklist from any triggered
       deployments. If the value is 1, the server will be blacklisted. This
       parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -372,6 +376,7 @@ resources:
     type: OS::TripleO::ObjectStorage::PreNetworkConfig
     properties:
       server: {get_resource: SwiftStorage}
+      RoleParameters: {get_param: RoleParameters}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -451,6 +456,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   SwiftStorageHieraDeploy:
     type: OS::Heat::StructuredDeployment
index 360c633..5567d65 100644 (file)
         update_identifier: {get_param: DeployIdentifier}
 {% endfor %}
 
-  {{role.name}}PostConfig:
-    type: OS::TripleO::Tasks::{{role.name}}PostConfig
+  # Note, this should be the last step to execute configuration changes.
+  # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+  # after all the previous deployment steps.
+  {{role.name}}ExtraConfigPost:
     depends_on:
   {% for dep in roles %}
       - {{dep.name}}Deployment_Step5
   {% endfor %}
+    type: OS::TripleO::NodeExtraConfigPost
     properties:
-      servers: {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
+        servers: {get_param: [servers, {{role.name}}]}
 
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  {{role.name}}ExtraConfigPost:
+  # The {{role.name}}PostConfig steps are in charge of
+  # quiescing all services, i.e. in the Controller case,
+  # we should run a full service reload.
+  {{role.name}}PostConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PostConfig
     depends_on:
   {% for dep in roles %}
-      - {{dep.name}}PostConfig
+      - {{dep.name}}ExtraConfigPost
   {% endfor %}
-    type: OS::TripleO::NodeExtraConfigPost
     properties:
-        servers: {get_param: [servers, {{role.name}}]}
+      servers: {get_param: servers}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
 
 {% endfor %}
index 7af90e2..f3b4b6b 100644 (file)
@@ -85,8 +85,8 @@ parameters:
     default: 'localdomain'
     type: string
     description: >
-      The DNS domain used for the hosts. This should match the dhcp_domain
-      configured in the Undercloud neutron. Defaults to localdomain.
+      The DNS domain used for the hosts. This must match the
+      overcloud_domain_name configured on the undercloud.
   {{role}}ServerMetadata:
     default: {}
     description: >
@@ -161,6 +161,10 @@ parameters:
       Map of server hostnames to blacklist from any triggered
       deployments. If the value is 1, the server will be blacklisted. This
       parameter is generated from the parent template.
+  RoleParameters:
+    type: json
+    description: Role Specific Parameters
+    default: {}
 
 conditions:
   server_not_blacklisted:
@@ -394,6 +398,7 @@ resources:
     type: OS::TripleO::{{role}}::PreNetworkConfig
     properties:
       server: {get_resource: {{role}}}
+      RoleParameters: {get_param: RoleParameters}
 
   NetworkDeployment:
     type: OS::TripleO::SoftwareDeployment
@@ -493,6 +498,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
index a201134..c0bffb1 100644 (file)
@@ -61,6 +61,9 @@ parameters:
   CinderDellScSecondaryScApiPort:
     type: number
     default: 3033
+  CinderDellScExcludedDomainIp:
+    type: string
+    default: ''
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -105,5 +108,6 @@ outputs:
                 cinder::backend::dellsc_iscsi::secondary_san_login: {get_param: CinderDellScSecondarySanLogin}
                 cinder::backend::dellsc_iscsi::secondary_san_password: {get_param: CinderDellScSecondarySanPassword}
                 cinder::backend::dellsc_iscsi::secondary_sc_api_port: {get_param: CinderDellScSecondaryScApiPort}
+                cinder::backend::dellsc_iscsi::excluded_domain_ip: {get_param: CinderDellScExcludedDomainIp}
       step_config: |
         include ::tripleo::profile::base::cinder::volume
index bddc8e1..fbde4c0 100644 (file)
@@ -93,6 +93,12 @@ parameters:
   CinderNetappWebservicePath:
     type: string
     default: '/devmgr/v2'
+  CinderNetappNasSecureFileOperations:
+    type: string
+    default: 'false'
+  CinderNetappNasSecureFilePermissions:
+    type: string
+    default: 'false'
   # DEPRECATED options for compatibility with older versions
   CinderNetappEseriesHostType:
     type: string
@@ -133,5 +139,7 @@ outputs:
         cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
         cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
         cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+        cinder::backend::netapp::nas_secure_file_operations: {get_param: CinderNetappNasSecureFileOperations}
+        cinder::backend::netapp::nas_secure_file_permissions: {get_param: CinderNetappNasSecureFilePermissions}
       step_config: |
         include ::tripleo::profile::base::cinder::volume
index fe95222..1f8c345 100644 (file)
@@ -40,6 +40,20 @@ parameters:
       NFS servers used by Cinder NFS backend. Effective when
       CinderEnableNfsBackend is true.
     type: comma_delimited_list
+  CinderNasSecureFileOperations:
+    default: false
+    description: >
+      Controls whether security enhanced NFS file operations are enabled.
+      Valid values are 'auto', 'true' or 'false'. Effective when
+      CinderEnableNfsBackend is true.
+    type: string
+  CinderNasSecureFilePermissions:
+    default: false
+    description: >
+      Controls whether security enhanced NFS file permissions are enabled.
+      Valid values are 'auto', 'true' or 'false'. Effective when
+      CinderEnableNfsBackend is true.
+    type: string
   CinderRbdPoolName:
     default: volumes
     type: string
@@ -105,6 +119,8 @@ outputs:
             tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
             tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
             tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
+            tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_operations: {get_param: CinderNasSecureFileOperations}
+            tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_permissions: {get_param: CinderNasSecureFilePermissions}
             tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
             tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
             tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
index f4067ef..b4af7e8 100644 (file)
@@ -34,6 +34,10 @@ parameters:
     default: 30
     description: Delay between processing metrics.
     type: number
+  NumberOfStorageSacks:
+    default: 128
+    description: Number of storage sacks to create.
+    type: number
   GnocchiPassword:
     description: The password for the gnocchi service and db account.
     type: string
@@ -87,7 +91,11 @@ outputs:
             query:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
-        gnocchi::db::sync::extra_opts: ''
+        gnocchi::db::sync::extra_opts:
+          str_replace:
+            template: " --sacks-number NUM_SACKS"
+            params:
+              NUM_SACKS: {get_param: NumberOfStorageSacks}
         gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
         gnocchi::storage::swift::swift_user: 'service:gnocchi'
         gnocchi::storage::swift::swift_auth_version: 3
index 93bced8..092d072 100644 (file)
@@ -55,7 +55,7 @@ parameters:
   HorizonSecureCookies:
     description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
     type: boolean
-    default: true
+    default: false
   MemcachedIPv6:
     default: false
     description: Enable IPv6 features in Memcached.
index b167671..0e8c8e1 100644 (file)
@@ -164,6 +164,12 @@ outputs:
             ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
             ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
             # Credentials to access other services
+            ironic::cinder::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::cinder::username: 'ironic'
+            ironic::cinder::password: {get_param: IronicPassword}
+            ironic::cinder::project_name: 'service'
+            ironic::cinder::user_domain_name: 'Default'
+            ironic::cinder::project_domain_name: 'Default'
             ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             ironic::glance::username: 'ironic'
             ironic::glance::password: {get_param: IronicPassword}
diff --git a/puppet/services/ironic-inspector.yaml b/puppet/services/ironic-inspector.yaml
new file mode 100644 (file)
index 0000000..e8537a2
--- /dev/null
@@ -0,0 +1,151 @@
+heat_template_version: ocata
+
+description: >
+  OpenStack Ironic Inspector configured with Puppet (EXPERIMENTAL)
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  MonitoringSubscriptionIronicInspector:
+    default: 'overcloud-ironic-inspector'
+    type: string
+  KeystoneRegion:
+    type: string
+    default: 'regionOne'
+    description: Keystone region for endpoint
+  Debug:
+    default: ''
+    description: Set to True to enable debugging on all services.
+    type: string
+  IronicInspectorInterface:
+    default: br-ex
+    description: |
+      Network interface on which inspection dnsmasq will listen. Should allow
+      access to untagged traffic from nodes booted for inspection. The default
+      value only makes sense if you don't modify any networking configuration.
+    type: string
+  IronicInspectorIPXEEnabled:
+    default: true
+    description: Whether to use iPXE for inspection.
+    type: boolean
+  IronicInspectorIpRange:
+    description: |
+        Temporary IP range that will be given to nodes during the inspection
+        process. This should not overlap with any range that Neutron's DHCP
+        gives away, but it has to be routeable back to ironic-inspector API.
+        This option has no meaningful defaults, and thus is required.
+    type: string
+  IronicInspectorUseSwift:
+    default: true
+    description: Whether to use Swift for storing introspection data.
+    type: boolean
+  IronicIPXEPort:
+    default: 8088
+    description: Port to use for serving images when iPXE is used.
+    type: string
+  IronicPassword:
+    description: The password for the Ironic service and db account, used by the Ironic services
+    type: string
+    hidden: true
+
+conditions:
+  enable_ipxe: {equals : [{get_param: IronicInspectorIPXEEnabled}, true]}
+  use_swift: {equals : [{get_param: IronicInspectorUseSwift}, true]}
+
+outputs:
+  role_data:
+    description: Role data for the Ironic Inspector role.
+    value:
+      service_name: ironic_inspector
+      monitoring_subscription: {get_param: MonitoringSubscriptionIronicInspector}
+      config_settings:
+        map_merge:
+          - ironic::inspector::listen_address: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+            ironic::inspector::dnsmasq_local_ip: {get_param: [ServiceNetMap, IronicInspectorNetwork]}
+            ironic::inspector::dnsmasq_ip_range: {get_param: IronicInspectorIpRange}
+            ironic::inspector::dnsmasq_interface: {get_param: IronicInspectorInterface}
+            ironic::inspector::debug: {get_param: Debug}
+            ironic::inspector::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
+            ironic::inspector::authtoken::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::inspector::authtoken::username: 'ironic'
+            ironic::inspector::authtoken::password: {get_param: IronicPassword}
+            ironic::inspector::authtoken::project_name: 'service'
+            ironic::inspector::authtoken::user_domain_name: 'Default'
+            ironic::inspector::authtoken::project_domain_name: 'Default'
+            tripleo.ironic_inspector.firewall_rules:
+              '137 ironic-inspector':
+                dport:
+                  - 5050
+            ironic::inspector::ironic_username: 'ironic'
+            ironic::inspector::ironic_password: {get_param: IronicPassword}
+            ironic::inspector::ironic_tenant_name: 'service'
+            ironic::inspector::ironic_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::inspector::ironic_max_retries: 6
+            ironic::inspector::ironic_retry_interval: 10
+            ironic::inspector::ironic_user_domain_name: 'Default'
+            ironic::inspector::ironic_project_domain_name: 'Default'
+            ironic::inspector::http_port: {get_param: IronicIPXEPort}
+            ironic::inspector::db::database_connection:
+              list_join:
+                - ''
+                - - {get_param: [EndpointMap, MysqlInternal, protocol]}
+                  - '://ironic-inspector:'
+                  - {get_param: IronicPassword}
+                  - '@'
+                  - {get_param: [EndpointMap, MysqlInternal, host]}
+                  - '/ironic-inspector'
+                  - '?read_default_file=/etc/my.cnf.d/tripleo.cnf&read_default_group=tripleo'
+          -
+            if:
+            - enable_ipxe
+            - ironic::inspector::pxe_transfer_protocol: 'http'
+            - {}
+          -
+            if:
+            - use_swift
+            - ironic::inspector::store_data: 'swift'
+              ironic::inspector::swift_username: 'ironic'
+              ironic::inspector::swift_password: {get_param: IronicPassword}
+              ironic::inspector::swift_tenant_name: 'service'
+              ironic::inspector::swift_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+              ironic::inspector::swift_user_domain_name: 'Default'
+              ironic::inspector::swift_project_domain_name: 'Default'
+            - {}
+      step_config: |
+        include ::tripleo::profile::base::ironic_inspector
+      service_config_settings:
+        keystone:
+          ironic::keystone::auth_inspector::tenant: 'service'
+          ironic::keystone::auth_inspector::public_url: {get_param: [EndpointMap, IronicInspectorPublic, uri]}
+          ironic::keystone::auth_inspector::internal_url: {get_param: [EndpointMap, IronicInspectorInternal, uri]}
+          ironic::keystone::auth_inspector::admin_url: {get_param: [EndpointMap, IronicInspectorAdmin, uri]}
+          ironic::keystone::auth_inspector::password: {get_param: IronicPassword}
+          ironic::keystone::auth_inspector::region: {get_param: KeystoneRegion}
+        mysql:
+          ironic::inspector::db::mysql::password: {get_param: IronicPassword}
+          ironic::inspector::db::mysql::user: ironic-inspector
+          ironic::inspector::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          ironic::inspector::db::mysql::dbname: ironic-inspector
+          ironic::inspector::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index f3a9cbc..60d194b 100644 (file)
@@ -113,10 +113,27 @@ parameters:
     description: The second Keystone credential key. Must be a valid key.
   KeystoneFernetKey0:
     type: string
-    description: The first Keystone fernet key. Must be a valid key.
+    default: ''
+    description: (DEPRECATED) The first Keystone fernet key. Must be a valid key.
   KeystoneFernetKey1:
     type: string
-    description: The second Keystone fernet key. Must be a valid key.
+    default: ''
+    description: (DEPRECATED) The second Keystone fernet key. Must be a valid key.
+  KeystoneFernetKeys:
+    type: json
+    description: Mapping containing keystone's fernet keys and their paths.
+  KeystoneFernetMaxActiveKeys:
+    type: number
+    description: The maximum active keys in the keystone fernet key repository.
+    default: 5
+  ManageKeystoneFernetKeys:
+    type: boolean
+    default: true
+    description: Whether TripleO should manage the keystone fernet keys or not.
+                 If set to true, the fernet keys will get the values from the
+                 saved keys repository in mistral (the KeystoneFernetKeys
+                 variable). If set to false, only the stack creation
+                 initializes the keys, but subsequent updates won't touch them.
   KeystoneLoggingSource:
     type: json
     default:
@@ -187,6 +204,17 @@ parameters:
     default: {}
     hidden: true
 
+parameter_groups:
+- label: deprecated
+  description: |
+   The following parameters are deprecated and will be removed. They should not
+   be relied on for new deployments. If you have concerns regarding deprecated
+   parameters, please contact the TripleO development team on IRC or the
+   OpenStack mailing list.
+  parameters:
+  - KeystoneFernetKey0
+  - KeystoneFernetKey1
+
 resources:
 
   ApacheServiceBase:
@@ -234,6 +262,7 @@ outputs:
             keystone_ssl_certificate_key: {get_param: KeystoneSSLCertificateKey}
             keystone::token_provider: {get_param: KeystoneTokenProvider}
             keystone::enable_fernet_setup: {if: [keystone_fernet_tokens, true, false]}
+            keystone::fernet_max_active_keys: {get_param: KeystoneFernetMaxActiveKeys}
             keystone::enable_proxy_headers_parsing: true
             keystone::enable_credential_setup: true
             keystone::credential_keys:
@@ -241,12 +270,8 @@ outputs:
                 content: {get_param: KeystoneCredential0}
               '/etc/keystone/credential-keys/1':
                 content: {get_param: KeystoneCredential1}
-            keystone::fernet_keys:
-              '/etc/keystone/fernet-keys/0':
-                content: {get_param: KeystoneFernetKey0}
-              '/etc/keystone/fernet-keys/1':
-                content: {get_param: KeystoneFernetKey1}
-            keystone::fernet_replace_keys: false
+            keystone::fernet_keys: {get_param: KeystoneFernetKeys}
+            keystone::fernet_replace_keys: {get_param: ManageKeystoneFernetKeys}
             keystone::debug:
               if:
               - service_debug_unset
index c124d1e..090640e 100644 (file)
@@ -65,6 +65,24 @@ resources:
       RoleName: {get_param: RoleName}
       RoleParameters: {get_param: RoleParameters}
 
+  # Merging role-specific parameters (RoleParameters) with the default parameters.
+  # RoleParameters will have the precedence over the default parameters.
+  RoleParametersValue:
+    type: OS::Heat::Value
+    properties:
+      type: json
+      value:
+        map_replace:
+          - map_replace:
+            - neutron::agents::ml2::sriov::physical_device_mappings: NeutronPhysicalDevMappings
+              neutron::agents::ml2::sriov::exclude_devices: NeutronExcludeDevices
+              tripleo::host::sriov::number_of_vfs: NeutronSriovNumVFs
+            - values: {get_param: [RoleParameters]}
+          - values:
+              NeutronPhysicalDevMappings: {get_param: NeutronPhysicalDevMappings}
+              NeutronExcludeDevices: {get_param: NeutronExcludeDevices}
+              NeutronSriovNumVFs: {get_param: NeutronSriovNumVFs}
+
 outputs:
   role_data:
     description: Role data for the Neutron SR-IOV nic agent service.
@@ -73,8 +91,6 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
-          - neutron::agents::ml2::sriov::physical_device_mappings: {get_param: NeutronPhysicalDevMappings}
-            neutron::agents::ml2::sriov::exclude_devices: {get_param: NeutronExcludeDevices}
-            tripleo::host::sriov::number_of_vfs: {get_param: NeutronSriovNumVFs}
+          - get_attr: [RoleParametersValue, value]
       step_config: |
         include ::tripleo::profile::base::neutron::sriov
index 835edf0..fe2f294 100644 (file)
@@ -210,7 +210,7 @@ outputs:
           register: bootstrap_node
         - name: set is_bootstrap_node fact
           tags: common
-          set_fact: is_bootstrap_node={{bootstrap_node.stdout == ansible_hostname}}
+          set_fact: is_bootstrap_node={{bootstrap_node.stdout|lower == ansible_hostname|lower}}
         - name: Extra migration for nova tripleo/+bug/1656791
           tags: step0,pre-upgrade
           when: is_bootstrap_node
index 20c38d8..df234c7 100644 (file)
@@ -44,6 +44,7 @@ outputs:
           ovn::northbound::port: {get_param: OVNNorthboundServerPort}
           ovn::southbound::port: {get_param: OVNSouthboundServerPort}
           ovn::northd::dbs_listen_ip: {get_param: [ServiceNetMap, OvnDbsNetwork]}
+          tripleo::haproxy::ovn_dbs_manage_lb: true
           tripleo.ovn_dbs.firewall_rules:
             '121 OVN DB server ports':
               proto: 'tcp'
diff --git a/puppet/services/pacemaker/ovn-dbs.yaml b/puppet/services/pacemaker/ovn-dbs.yaml
new file mode 100644 (file)
index 0000000..1cbb476
--- /dev/null
@@ -0,0 +1,61 @@
+heat_template_version: ocata
+
+description: >
+  OVN databases configured with puppet in HA mode
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  OVNNorthboundServerPort:
+    description: Port of the OVN Northbound DB server
+    type: number
+    default: 6641
+  OVNSouthboundServerPort:
+    description: Port of the OVN Southbound DB server
+    type: number
+    default: 6642
+
+resources:
+
+  OVNDBsBase:
+    type: ../ovn-dbs.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+      RoleName: {get_param: RoleName}
+      RoleParameters: {get_param: RoleParameters}
+
+outputs:
+  role_data:
+    description: Role data for the OVN northd service
+    value:
+      service_name: ovn_dbs
+      config_settings:
+        map_merge:
+          - get_attr: [OVNDBsBase, role_data, config_settings]
+          - tripleo::haproxy::ovn_dbs_manage_lb: false
+            tripleo::profile::pacemaker::ovn_northd::nb_db_port: {get_param: OVNNorthboundServerPort}
+            tripleo::profile::pacemaker::ovn_northd::sb_db_port: {get_param: OVNSouthboundServerPort}
+      step_config: |
+        include ::tripleo::profile::pacemaker::ovn_northd
index a41e34f..0289b7a 100644 (file)
@@ -84,8 +84,8 @@ outputs:
             tripleo.panko_api.firewall_rules:
               '140 panko-api':
                 dport:
-                  - 8779
-                  - 13779
+                  - 8977
+                  - 13977
             panko::api::host:
               str_replace:
                 template:
diff --git a/releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml b/releasenotes/notes/Introduce-ManageKeystoneFernetKeys-parameter-2478cf5fc5e64256.yaml
new file mode 100644 (file)
index 0000000..64a4d7e
--- /dev/null
@@ -0,0 +1,6 @@
+---
+features:
+  - This introduces the ManageKeystoneFernetKeys parameter, which tells
+    heat/puppet if it should replace the existing fernet keys on a stack
+    deployment or not. This is useful if the deployer wants to do key rotations
+    out of band.
diff --git a/releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml b/releasenotes/notes/Use-KeystoneFernetKeys-parameter-bd635a106bb8e00f.yaml
new file mode 100644 (file)
index 0000000..1e2673f
--- /dev/null
@@ -0,0 +1,10 @@
+---
+features:
+  - The KeystoneFernetKeys parameter was introduced, which is able to take any
+    amount of keys as long as it's in the right format. It's generated by the
+    same mechanism as the rest of the passwords; so it's value is also
+    available via mistral's "password" environment variable. This will also
+    allow for rotations to be made via mistral and via stack updates.
+deprecations:
+  - The individual keystone fernet key parameters (KeystoneFernetKey0 and
+    KeystoneFernetKey1) were deprecated in favor of KeystoneFernetKeys.
diff --git a/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml b/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml
new file mode 100644 (file)
index 0000000..73b9f9c
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Add parameters to control the Cinder NAS security settings associated
+    with the NFS and NetApp Cinder back ends. The settings are disabled
+    by default.
diff --git a/releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml b/releasenotes/notes/baremetal-role-34cb48cc30d7bdb4.yaml
new file mode 100644 (file)
index 0000000..5117642
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Add an example role ``roles/IronicConductor.yaml`` for a node with only
+    ironic-conductor and its (i)PXE service.
diff --git a/releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml b/releasenotes/notes/change-panko-api-port-fb8967420cd036b1.yaml
new file mode 100644 (file)
index 0000000..353d16d
--- /dev/null
@@ -0,0 +1,4 @@
+---
+fixes:
+  - Changed panko api port to run on 8977 instead of 8779. 8779 is reserved
+    for trove. Hence changing to avoid conflicts.
diff --git a/releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml b/releasenotes/notes/ironic-inspector-43441782bdf0f84e.yaml
new file mode 100644 (file)
index 0000000..1fbdd1f
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Add basic support for **ironic-inspector** in the overcloud. It is highly
+    experimental and is not yet recommended for production use.
diff --git a/releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml b/releasenotes/notes/max-active-fernet-keys-f960f08838a75eee.yaml
new file mode 100644 (file)
index 0000000..4c10753
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - KeystoneFernetMaxActiveKeys was introduced as a parameter to the keystone
+    profile. It sets the max_active_keys value of the keystone.conf file and
+    will subsequently be used by mistral to purge the keys in a mistral task.
diff --git a/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml b/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml
new file mode 100644 (file)
index 0000000..e5adb6a
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add support to configure number of sacks in gnocchi.
diff --git a/releasenotes/notes/ovn-ha-c0139ac519680872.yaml b/releasenotes/notes/ovn-ha-c0139ac519680872.yaml
new file mode 100644 (file)
index 0000000..d36f836
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Support HA for OVN db servers and ovn-northd using Pacemaker.
diff --git a/releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml b/releasenotes/notes/pre-network-config-role-specific-b36cc4bd6383e493.yaml
new file mode 100644 (file)
index 0000000..95e9260
--- /dev/null
@@ -0,0 +1,11 @@
+---
+features:
+  - PreNetworkConfig is modified to support role-specific parameters.
+upgrade:
+  - PreNetworkConfig takes a new parameter, RoleParameters. All the templates
+    associated with PreNetworkConfig should add this new parameter during
+    upgrade.
+deprecations:
+  - Parameters {{role}}KernelArgs, {{role}}TunedProfileName and
+    {{role}}HostCpusList are deprecated. Alternatively, role-specific
+    parameter support has been added with the same names.
index d242a5b..b011740 100644 (file)
@@ -4,6 +4,10 @@
 - name: BlockStorage
   description: |
     Cinder Block Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   ServicesDefault:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::BlockStorageCinderVolume
@@ -12,6 +16,7 @@
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::Ntp
index d3de6ba..647c4d5 100644 (file)
@@ -4,6 +4,9 @@
 - name: CephStorage
   description: |
     Ceph OSD Storage node role
+  networks:
+    - Storage
+    - StorageMgmt
   ServicesDefault:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::CACerts
index 73ec659..75a6f60 100644 (file)
@@ -5,6 +5,10 @@
   description: |
     Basic Compute Node role
   CountDefault: 1
+  networks:
+    - InternalApi
+    - Tenant
+    - Storage
   HostnameFormatDefault: '%stackname%-novacompute-%index%'
   disable_upgrade_deployment: True
   ServicesDefault:
@@ -21,6 +25,7 @@
     - OS::TripleO::Services::ComputeNeutronOvsAgent
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::NeutronLinuxbridgeAgent
index 7511d4c..b0a1313 100644 (file)
@@ -9,6 +9,12 @@
   tags:
     - primary
     - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
   HostnameFormatDefault: '%stackname%-controller-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AodhApi
@@ -57,6 +63,7 @@
     - OS::TripleO::Services::Horizon
     - OS::TripleO::Services::IronicApi
     - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Keepalived
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
index 2d1702e..6cf2120 100644 (file)
@@ -9,6 +9,12 @@
   tags:
     - primary
     - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
   HostnameFormatDefault: '%stackname%-controller-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AodhApi
index 3ef751a..75b26a8 100644 (file)
@@ -4,6 +4,8 @@
 - name: Database
   description: |
     Standalone database role with the database being managed via Pacemaker
+  networks:
+    - InternalApi
   HostnameFormatDefault: '%stackname%-database-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AuditD
diff --git a/roles/IronicConductor.yaml b/roles/IronicConductor.yaml
new file mode 100644 (file)
index 0000000..8a29b33
--- /dev/null
@@ -0,0 +1,21 @@
+###############################################################################
+# Role: IronicConductor                                                       #
+###############################################################################
+- name: IronicConductor
+  description: |
+    Ironic Conductor node role
+  HostnameFormatDefault: '%stackname%-ironic-%index%'
+  ServicesDefault:
+    - OS::TripleO::Services::AuditD
+    - OS::TripleO::Services::CACerts
+    - OS::TripleO::Services::Collectd
+    - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::Kernel
+    - OS::TripleO::Services::MySQLClient
+    - OS::TripleO::Services::Ntp
+    - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::Snmp
+    - OS::TripleO::Services::Timezone
+    - OS::TripleO::Services::TripleoFirewall
+    - OS::TripleO::Services::TripleoPackages
index cbef61a..5b06063 100644 (file)
@@ -4,6 +4,8 @@
 - name: Messaging
   description: |
     Standalone messaging role with RabbitMQ being managed via Pacemaker
+  networks:
+    - InternalApi
   HostnameFormatDefault: '%stackname%-messaging-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AuditD
index b393fa7..a28eaa6 100644 (file)
@@ -5,6 +5,8 @@
   description: |
     Standalone networking role to run Neutron services their own. Includes
     Pacemaker integration via PacemakerRemote
+  networks:
+    - InternalApi
   HostnameFormatDefault: '%stackname%-networker-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AuditD
index 3741ca6..27dc123 100644 (file)
@@ -4,6 +4,10 @@
 - name: ObjectStorage
   description: |
     Swift Object Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::AuditD
index 6c74233..cd1fcb4 100644 (file)
@@ -58,6 +58,10 @@ Role Options
 * description: (string) as few sentences describing the role and information
   pertaining to the usage of the role.
 
+ * networks: (list), optional list of networks which the role will have
+   access to when network isolation is enabled. The names should match
+   those defined in network_data.yaml.
+
 Working with Roles
 ==================
 The tripleoclient provides a series of commands that can be used to view
index 0f60364..d23ab6e 100644 (file)
@@ -4,6 +4,8 @@
 - name: Telemetry
   description: |
     Telemetry role that has all the telemetry services.
+  networks:
+    - InternalApi
   HostnameFormatDefault: '%stackname%-telemetry-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AodhApi
index 0a9bcad..bcdedc7 100644 (file)
@@ -19,6 +19,7 @@
     - OS::TripleO::Services::IronicApi
     - OS::TripleO::Services::IronicConductor
     - OS::TripleO::Services::IronicPxe
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::Memcached
     - OS::TripleO::Services::MistralApi
index c536e83..f96e562 100644 (file)
   tags:
     - primary
     - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
   HostnameFormatDefault: '%stackname%-controller-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AodhApi
@@ -60,6 +66,7 @@
     - OS::TripleO::Services::Horizon
     - OS::TripleO::Services::IronicApi
     - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Keepalived
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Keystone
   description: |
     Basic Compute Node role
   CountDefault: 1
+  networks:
+    - InternalApi
+    - Tenant
+    - Storage
   HostnameFormatDefault: '%stackname%-novacompute-%index%'
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::ComputeNeutronOvsAgent
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::NeutronLinuxbridgeAgent
 - name: BlockStorage
   description: |
     Cinder Block Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   ServicesDefault:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::BlockStorageCinderVolume
     - OS::TripleO::Services::Collectd
     - OS::TripleO::Services::Docker
     - OS::TripleO::Services::FluentdClient
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::MySQLClient
     - OS::TripleO::Services::Ntp
 - name: ObjectStorage
   description: |
     Swift Object Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::AuditD
 - name: CephStorage
   description: |
     Ceph OSD Storage node role
+  networks:
+    - Storage
+    - StorageMgmt
   ServicesDefault:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::CACerts
index ad760fd..783df91 100644 (file)
@@ -21,7 +21,9 @@
     - OS::TripleO::Services::HeatEngine
     - OS::TripleO::Services::IronicApi
     - OS::TripleO::Services::IronicConductor
+    - OS::TripleO::Services::IronicInspector
     - OS::TripleO::Services::IronicPxe
+    - OS::TripleO::Services::Iscsid
     - OS::TripleO::Services::Keystone
     - OS::TripleO::Services::Memcached
     - OS::TripleO::Services::MistralApi
@@ -50,6 +52,7 @@
     - OS::TripleO::Services::UndercloudAodhListener
     - OS::TripleO::Services::UndercloudAodhNotifier
     - OS::TripleO::Services::UndercloudCeilometerAgentCentral
+    - OS::TripleO::Services::UndercloudCeilometerAgentIpmi
     - OS::TripleO::Services::UndercloudCeilometerAgentNotification
     - OS::TripleO::Services::UndercloudGnocchiApi
     - OS::TripleO::Services::UndercloudGnocchiMetricd
similarity index 88%
rename from puppet/services/services.yaml
rename to services.yaml
index 0e7b6d2..724727b 100644 (file)
@@ -1,3 +1,4 @@
+#FIXME move into common when specfile adds it
 heat_template_version: pike
 
 description: >
@@ -127,3 +128,17 @@ outputs:
           expression: $.data.where($ != null).select($.get('upgrade_batch_tasks')).where($ != null).flatten().distinct()
           data: {get_attr: [ServiceChain, role_data]}
       service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
+
+      # Keys to support docker/services
+      puppet_config: {get_attr: [ServiceChain, role_data, puppet_config]}
+      kolla_config:
+        map_merge: {get_attr: [ServiceChain, role_data, kolla_config]}
+      docker_config:
+        {get_attr: [ServiceChain, role_data, docker_config]}
+      docker_puppet_tasks:
+        {get_attr: [ServiceChain, role_data, docker_puppet_tasks]}
+      host_prep_tasks:
+        yaql:
+          # Note we use distinct() here to filter any identical tasks
+          expression: $.data.where($ != null).select($.get('host_prep_tasks')).where($ != null).flatten().distinct()
+          data: {get_attr: [ServiceChain, role_data]}
index df5af85..8113635 100644 (file)
@@ -7,11 +7,11 @@ six>=1.9.0 # MIT
 sphinx!=1.6.1,>=1.5.1 # BSD
 oslosphinx>=4.7.0 # Apache-2.0
 reno!=2.3.1,>=1.8.0 # Apache-2.0
-coverage>=4.0,!=4.4  # Apache-2.0
-fixtures>=3.0.0  # Apache-2.0/BSD
-python-subunit>=0.0.18  # Apache-2.0/BSD
-testrepository>=0.0.18  # Apache-2.0/BSD
-testscenarios>=0.4  # Apache-2.0/BSD
-testtools>=1.4.0  # MIT
-mock>=2.0  # BSD
-oslotest>=1.10.0  # Apache-2.0
+coverage!=4.4,>=4.0 # Apache-2.0
+fixtures>=3.0.0 # Apache-2.0/BSD
+python-subunit>=0.0.18 # Apache-2.0/BSD
+testrepository>=0.0.18 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+testtools>=1.4.0 # MIT
+mock>=2.0 # BSD
+oslotest>=1.10.0 # Apache-2.0
index d7c0bd9..ff215fb 100755 (executable)
@@ -226,12 +226,10 @@ def validate(filename):
 
         # qdr aliases rabbitmq service to provide alternative messaging backend
         if (filename.startswith('./puppet/services/') and
-                filename not in ['./puppet/services/services.yaml',
-                                 './puppet/services/qdr.yaml']):
+                filename not in ['./puppet/services/qdr.yaml']):
             retval = validate_service(filename, tpl)
 
-        if (filename.startswith('./docker/services/') and
-                filename != './docker/services/services.yaml'):
+        if filename.startswith('./docker/services/'):
             retval = validate_docker_service(filename, tpl)
 
         if filename.endswith('hyperconverged-ceph.yaml'):