Merge "Docker services for Cinder Backup"
authorJenkins <jenkins@review.openstack.org>
Wed, 14 Jun 2017 13:49:46 +0000 (13:49 +0000)
committerGerrit Code Review <review@openstack.org>
Wed, 14 Jun 2017 13:49:46 +0000 (13:49 +0000)
50 files changed:
ci/environments/multinode-container-upgrade.yaml [deleted file]
docker/docker-puppet.py
docker/docker-steps.j2
docker/docker-toool
docker/services/manila-api.yaml [new file with mode: 0644]
docker/services/pacemaker/haproxy.yaml
docker/services/sahara-api.yaml [new file with mode: 0644]
docker/services/sahara-engine.yaml [new file with mode: 0644]
docker/services/sensu-client.yaml [new file with mode: 0644]
docker/services/swift-ringbuilder.yaml
docker/services/swift-storage.yaml
environments/docker.yaml
environments/enable-tls.yaml
environments/network-isolation.j2.yaml [new file with mode: 0644]
environments/network-isolation.yaml [deleted file]
environments/services-docker/manila.yaml [new file with mode: 0644]
environments/services-docker/sahara.yaml [new file with mode: 0644]
environments/services-docker/sensu-client.yaml [new file with mode: 0644]
environments/tls-everywhere-endpoints-dns.yaml
extraconfig/tasks/post_puppet_pacemaker.j2.yaml
plan-samples/README.rst [new file with mode: 0644]
plan-samples/plan-environment-derived-params.yaml [new file with mode: 0644]
puppet/blockstorage-role.yaml
puppet/cephstorage-role.yaml
puppet/compute-role.yaml
puppet/controller-role.yaml
puppet/objectstorage-role.yaml
puppet/puppet-steps.j2
puppet/role.role.j2.yaml
puppet/services/cinder-backend-netapp.yaml
puppet/services/cinder-volume.yaml
puppet/services/gnocchi-base.yaml
puppet/services/horizon.yaml
puppet/services/ironic-conductor.yaml
puppet/services/pacemaker_remote.yaml
releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml [new file with mode: 0644]
releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml [new file with mode: 0644]
releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml [new file with mode: 0644]
roles/BlockStorage.yaml
roles/CephStorage.yaml
roles/Compute.yaml
roles/Controller.yaml
roles/ControllerOpenstack.yaml
roles/Database.yaml
roles/Messaging.yaml
roles/Networker.yaml
roles/ObjectStorage.yaml
roles/README.rst
roles/Telemetry.yaml
roles_data.yaml

diff --git a/ci/environments/multinode-container-upgrade.yaml b/ci/environments/multinode-container-upgrade.yaml
deleted file mode 100644 (file)
index 24bb1f4..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-# NOTE: This is an environment specific for containers CI. Mainly we
-# deploy non-pacemakerized overcloud. Once we are able to deploy and
-# upgrade pacemakerized and containerized overcloud, we should remove
-# this file and use normal CI multinode environments/scenarios.
-
-resource_registry:
-  OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
-  OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode-os-net-config.yaml
-
-  # NOTE: This is needed because of upgrades from Ocata to Pike. We
-  # deploy the initial environment with Ocata templates, and
-  # overcloud-resource-registry.yaml there doesn't have this Docker
-  # mapping at all. After we stop CI'ing Ocata->Pike upgrade, we can
-  # remove this.
-  OS::TripleO::Services::Docker: OS::Heat::None
-
-parameter_defaults:
-  ControllerServices:
-    - OS::TripleO::Services::CephMon
-    - OS::TripleO::Services::CephOSD
-    - OS::TripleO::Services::CinderApi
-    - OS::TripleO::Services::CinderScheduler
-    - OS::TripleO::Services::CinderVolume
-    - OS::TripleO::Services::Docker
-    - OS::TripleO::Services::Kernel
-    - OS::TripleO::Services::Keystone
-    - OS::TripleO::Services::GlanceApi
-    - OS::TripleO::Services::HeatApi
-    - OS::TripleO::Services::HeatApiCfn
-    - OS::TripleO::Services::HeatApiCloudwatch
-    - OS::TripleO::Services::HeatEngine
-    - OS::TripleO::Services::MySQL
-    - OS::TripleO::Services::MySQLClient
-    - OS::TripleO::Services::NeutronDhcpAgent
-    - OS::TripleO::Services::NeutronL3Agent
-    - OS::TripleO::Services::NeutronMetadataAgent
-    - OS::TripleO::Services::NeutronServer
-    - OS::TripleO::Services::NeutronCorePlugin
-    - OS::TripleO::Services::NeutronOvsAgent
-    - OS::TripleO::Services::RabbitMQ
-    - OS::TripleO::Services::HAproxy
-    - OS::TripleO::Services::Keepalived
-    - OS::TripleO::Services::Memcached
-    - OS::TripleO::Services::Pacemaker
-    - OS::TripleO::Services::NovaConductor
-    - OS::TripleO::Services::NovaApi
-    - OS::TripleO::Services::NovaPlacement
-    - OS::TripleO::Services::NovaMetadata
-    - OS::TripleO::Services::NovaScheduler
-    - OS::TripleO::Services::Ntp
-    - OS::TripleO::Services::SwiftProxy
-    - OS::TripleO::Services::SwiftStorage
-    - OS::TripleO::Services::SwiftRingBuilder
-    - OS::TripleO::Services::Snmp
-    - OS::TripleO::Services::Timezone
-    - OS::TripleO::Services::TripleoPackages
-    - OS::TripleO::Services::NovaCompute
-    - OS::TripleO::Services::NovaLibvirt
-    - OS::TripleO::Services::Sshd
-  ControllerExtraConfig:
-    nova::compute::libvirt::services::libvirt_virt_type: qemu
-    nova::compute::libvirt::libvirt_virt_type: qemu
-    # Required for Centos 7.3 and Qemu 2.6.0
-    nova::compute::libvirt::libvirt_cpu_mode: 'none'
-    #NOTE(gfidente): not great but we need this to deploy on ext4
-    #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
-    ceph::profile::params::osd_max_object_name_len: 256
-    ceph::profile::params::osd_max_object_namespace_len: 64
-  SwiftCeilometerPipelineEnabled: False
-  Debug: True
index 340a9e9..4d9d40d 100755 (executable)
@@ -190,37 +190,23 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume
         if [ -n "$PUPPET_TAGS" ]; then
             TAGS="--tags \"$PUPPET_TAGS\""
         fi
+
+        # workaround LP1696283
+        mkdir -p /etc/ssh
+        touch /etc/ssh/ssh_known_hosts
+
         FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
 
         # Disables archiving
         if [ -z "$NO_ARCHIVE" ]; then
-            rm -Rf /var/lib/config-data/${NAME}
-
-            # copying etc should be enough for most services
-            mkdir -p /var/lib/config-data/${NAME}/etc
-            cp -a /etc/* /var/lib/config-data/${NAME}/etc/
-
-            # workaround LP1696283
-            mkdir -p /var/lib/config-data/${NAME}/etc/ssh
-            touch /var/lib/config-data/${NAME}/etc/ssh/ssh_known_hosts
-
-            if [ -d /root/ ]; then
-              cp -a /root/ /var/lib/config-data/${NAME}/root/
-            fi
-            if [ -d /var/lib/ironic/tftpboot/ ]; then
-              mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
-              cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/tftpboot/
-            fi
-            if [ -d /var/lib/ironic/httpboot/ ]; then
-              mkdir -p /var/lib/config-data/${NAME}/var/lib/ironic/
-              cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/${NAME}/var/lib/ironic/httpboot/
-            fi
-
-            # apache services may files placed in /var/www/
-            if [ -d /var/www/ ]; then
-             mkdir -p /var/lib/config-data/${NAME}/var/www
-             cp -a /var/www/* /var/lib/config-data/${NAME}/var/www/
-            fi
+            archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
+            rsync_srcs=""
+            for d in "${archivedirs[@]}"; do
+                if [ -d "$d" ]; then
+                    rsync_srcs+=" $d"
+                fi
+            done
+            rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
 
             # Write a checksum of the config-data dir, this is used as a
             # salt to trigger container restart when the config changes
index a56ca02..3dd963b 100644 (file)
@@ -139,10 +139,6 @@ resources:
                         - name: Write kolla config json files
                           copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes
                           with_dict: "{{kolla_config}}"
-                        - name: Install paunch FIXME remove when packaged
-                          shell: |
-                            yum -y install python-pip
-                            pip install paunch
                         ########################################################
                         # Bootstrap tasks, only performed on bootstrap_server_id
                         ########################################################
@@ -220,26 +216,31 @@ resources:
   {% endfor %}
   # END CONFIG STEPS
 
-  {{role.name}}PostConfig:
-    type: OS::TripleO::Tasks::{{role.name}}PostConfig
+  # Note, this should be the last step to execute configuration changes.
+  # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+  # after all the previous deployment steps.
+  {{role.name}}ExtraConfigPost:
     depends_on:
   {% for dep in roles %}
       - {{dep.name}}Deployment_Step5
   {% endfor %}
+    type: OS::TripleO::NodeExtraConfigPost
     properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
+        servers: {get_param: [servers, {{role.name}}]}
 
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  {{role.name}}ExtraConfigPost:
+  # The {{role.name}}PostConfig steps are in charge of
+  # quiescing all services, i.e. in the Controller case,
+  # we should run a full service reload.
+  {{role.name}}PostConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PostConfig
     depends_on:
   {% for dep in roles %}
-      - {{dep.name}}PostConfig
+      - {{dep.name}}ExtraConfigPost
   {% endfor %}
-    type: OS::TripleO::NodeExtraConfigPost
     properties:
-        servers: {get_param: [servers, {{role.name}}]}
+      servers:  {get_param: servers}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
 
 {% endfor %}
index 36aba4a..0b87ea9 100755 (executable)
@@ -75,6 +75,9 @@ def parse_opts(argv):
 
 def docker_arg_map(key, value):
     value = str(value).encode('ascii', 'ignore')
+    if len(value) == 0:
+        return ''
+
     return {
         'environment': "--env=%s" % value,
         # 'image': value,
diff --git a/docker/services/manila-api.yaml b/docker/services/manila-api.yaml
new file mode 100644 (file)
index 0000000..47d0f57
--- /dev/null
@@ -0,0 +1,112 @@
+heat_template_version: pike
+
+description: >
+  OpenStack containerized Manila API service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerManilaApiImage:
+    description: image
+    default: 'centos-binary-manila-api:latest'
+    type: string
+  DockerManilaConfigImage:
+    description: image
+    default: 'centos-binary-manila-base:latest'
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ManilaApiPuppetBase:
+    type: ../../puppet/services/manila-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Manila API role.
+    value:
+      service_name: {get_attr: [ManilaApiPuppetBase, role_data, service_name]}
+      config_settings: {get_attr: [ManilaApiPuppetBase, role_data, config_settings]}
+      step_config: &step_config
+        {get_attr: [ManilaApiPuppetBase, role_data, step_config]}
+      service_config_settings: {get_attr: [ManilaApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: manila
+        puppet_tags: manila_config,manila_api_paste_ini
+        step_config: *step_config
+        config_image:
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerManilaConfigImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/manila_api.json:
+          command: /usr/bin/manila-api --config-file /usr/share/manila/manila-dist.conf --config-file /etc/manila/manila.conf
+          permissions:
+            - path: /var/log/manila
+              owner: manila:manila
+              recurse: true
+      docker_config:
+        step_3:
+          manila_api_db_sync:
+            user: root
+            image: &manila_api_image
+              list_join:
+                - '/'
+                - [ {get_param: DockerNamespace}, {get_param: DockerManilaApiImage} ]
+            net: host
+            detach: false
+            volumes:
+              - /var/lib/config-data/manila/etc/manila/:/etc/manila:ro
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - logs:/var/log
+            command: "/usr/bin/bootstrap_host_exec manila_api su manila -s /bin/bash -c '/usr/bin/manila-manage db sync'"
+        step_4:
+          manila_api:
+            image: *manila_api_image
+            net: host
+            restart: always
+            volumes:
+              - /var/lib/kolla/config_files/manila_api.json:/var/lib/kolla/config_files/config.json:ro
+              - /var/lib/config-data/manila/etc/manila/:/etc/manila/:ro
+              - /etc/hosts:/etc/hosts:ro
+              - /etc/localtime:/etc/localtime:ro
+              - /var/log/containers/manila:/var/log/manila
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: Create persistent manila logs directory
+          file:
+            path: /var/log/containers/manila
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable manila_api service
+          tags: step2
+          service: name=openstack-manila-api state=stopped enabled=no
index ae19652..7557afd 100644 (file)
@@ -60,11 +60,7 @@ outputs:
               list_join:
                 - '/'
                 - [ {get_param: DockerNamespace}, {get_param: DockerHAProxyImage} ]
-      step_config:
-          list_join:
-            - "\n"
-            - - &noop_pcmk "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
-              - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
+      step_config: ""
       service_config_settings: {get_attr: [HAProxyBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
@@ -74,8 +70,8 @@ outputs:
           list_join:
             - "\n"
             - - "exec {'wait-for-settle': command => '/bin/true' }"
-              - &noop_firewall "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
-              - *noop_pcmk
+              - "class tripleo::firewall(){}; define tripleo::firewall::rule( $port = undef, $dport = undef, $sport = undef, $proto = undef, $action = undef, $state = undef, $source = undef, $iniface = undef, $chain = undef, $destination = undef, $extras = undef){}"
+              - "['pcmk_bundle', 'pcmk_resource', 'pcmk_property', 'pcmk_constraint', 'pcmk_resource_default'].each |String $val| { noop_resource($val) }"
               - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
         config_image: *haproxy_image
       kolla_config:
@@ -88,6 +84,7 @@ outputs:
             detach: false
             net: host
             user: root
+            privileged: true
             command:
               - '/bin/bash'
               - '-c'
@@ -98,14 +95,20 @@ outputs:
                       - - "cp -a /tmp/puppet-etc/* /etc/puppet; echo '{\"step\": 2}' > /etc/puppet/hieradata/docker.json"
                         - "FACTER_uuid=docker puppet apply --tags file,file_line,concat,augeas,TAGS -v -e 'CONFIG'"
                   params:
-                    TAGS: 'pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
+                    TAGS: 'tripleo::firewall::rule,pacemaker::resource::bundle,pacemaker::property,pacemaker::resource::ip,pacemaker::resource::ocf,pacemaker::constraint::order,pacemaker::constraint::colocation'
                     CONFIG:
                       list_join:
                         - ';'
-                        - - *noop_firewall
-                          - 'include ::tripleo::profile::base::pacemaker;include ::tripleo::profile::pacemaker::haproxy_bundle'
+                        - - 'include ::tripleo::profile::base::pacemaker'
+                          - 'include ::tripleo::profile::pacemaker::haproxy_bundle'
             image: *haproxy_image
             volumes:
+              # puppet saves iptables rules in /etc/sysconfig
+              - /etc/sysconfig:/etc/sysconfig:rw
+              # saving rules require accessing /usr/libexec/iptables/iptables.init, just bind-mount
+              # the necessary bit and prevent systemd to try to reload the service in the container
+              - /usr/libexec/iptables:/usr/libexec/iptables:ro
+              - /usr/libexec/initscripts/legacy-actions:/usr/libexec/initscripts/legacy-actions:ro
               - /etc/hosts:/etc/hosts:ro
               - /etc/localtime:/etc/localtime:ro
               - /etc/puppet:/tmp/puppet-etc:ro
diff --git a/docker/services/sahara-api.yaml b/docker/services/sahara-api.yaml
new file mode 100644 (file)
index 0000000..1067079
--- /dev/null
@@ -0,0 +1,119 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Sahara service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSaharaApiImage:
+    description: image
+    default: 'centos-binary-sahara-api:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SaharaApiPuppetBase:
+    type: ../../puppet/services/sahara-api.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sahara API role.
+    value:
+      service_name: {get_attr: [SaharaApiPuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SaharaApiPuppetBase, role_data, config_settings]
+          - sahara::sync_db: false
+      step_config: &step_config
+        get_attr: [SaharaApiPuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [SaharaApiPuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: sahara
+        puppet_tags: sahara_api_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+        step_config: *step_config
+        config_image: &sahara_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaApiImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sahara-api.json:
+          command: /usr/bin/sahara-api --config-file /etc/sahara/sahara.conf
+          permissions:
+            - path: /var/lib/sahara
+              owner: sahara:sahara
+              recurse: true
+            - path: /var/log/sahara
+              owner: sahara:sahara
+              recurse: true
+      docker_config:
+        step_3:
+          sahara_db_sync:
+            image: *sahara_image
+            net: host
+            privileged: false
+            detach: false
+            volumes: &sahara_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/sahara-api.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+                  - /lib/modules:/lib/modules:ro
+                  - /var/lib/sahara:/var/lib/sahara
+                  - /var/log/containers/sahara:/var/log/sahara
+            command: "/usr/bin/bootstrap_host_exec sahara_api su sahara -s /bin/bash -c 'sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head'"
+        step_4:
+          sahara_api:
+            image: *sahara_image
+            net: host
+            privileged: false
+            restart: always
+            volumes: *sahara_volumes
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /var/lib/sahara
+          file:
+            path: /var/lib/sahara
+            state: directory
+        - name: create persistent sahara logs directory
+          file:
+            path: /var/log/containers/sahara
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable sahara_api service
+          tags: step2
+          service: name=openstack-sahara-api state=stopped enabled=no
diff --git a/docker/services/sahara-engine.yaml b/docker/services/sahara-engine.yaml
new file mode 100644 (file)
index 0000000..41b5790
--- /dev/null
@@ -0,0 +1,110 @@
+heat_template_version: pike
+
+description: >
+  OpenStack Sahara service configured with Puppet
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSaharaEngineImage:
+    description: image
+    default: 'centos-binary-sahara-engine:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SaharaEnginePuppetBase:
+    type: ../../puppet/services/sahara-engine.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sahara Engine role.
+    value:
+      service_name: {get_attr: [SaharaEnginePuppetBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SaharaEnginePuppetBase, role_data, config_settings]
+          - sahara::sync_db: false
+      step_config: &step_config
+        get_attr: [SaharaEnginePuppetBase, role_data, step_config]
+      service_config_settings: {get_attr: [SaharaEnginePuppetBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS #
+      puppet_config:
+        config_volume: sahara
+        puppet_tags: sahara_engine_paste_ini,sahara_cluster_template,sahara_config,sahara_node_group_template
+        step_config: *step_config
+        config_image: &sahara_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSaharaEngineImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sahara-engine.json:
+          command: /usr/bin/sahara-engine --config-file /etc/sahara/sahara.conf
+          permissions:
+            - path: /var/lib/sahara
+              owner: sahara:sahara
+              recurse: true
+            - path: /var/log/sahara
+              owner: sahara:sahara
+              recurse: true
+      docker_config:
+        step_4:
+          sahara_engine:
+            image: *sahara_image
+            net: host
+            privileged: false
+            restart: always
+            volumes: &sahara_volumes
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/lib/kolla/config_files/sahara-engine.json:/var/lib/kolla/config_files/config.json
+                  - /var/lib/config-data/sahara/etc/sahara/:/etc/sahara/:ro
+                  - /var/lib/sahara:/var/lib/sahara
+                  - /var/log/containers/sahara:/var/log/sahara
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      host_prep_tasks:
+        - name: create /var/lib/sahara
+          file:
+            path: /var/lib/sahara
+            state: directory
+        - name: create persistent sahara logs directory
+          file:
+            path: /var/log/containers/sahara
+            state: directory
+      upgrade_tasks:
+        - name: Stop and disable sahara_engine service
+          tags: step2
+          service: name=openstack-sahara-engine state=stopped enabled=no
diff --git a/docker/services/sensu-client.yaml b/docker/services/sensu-client.yaml
new file mode 100644 (file)
index 0000000..e6bdf15
--- /dev/null
@@ -0,0 +1,131 @@
+heat_template_version: pike
+
+description: >
+  Containerized Sensu client service
+
+parameters:
+  DockerNamespace:
+    description: namespace
+    default: 'tripleoupstream'
+    type: string
+  DockerSensuClientImage:
+    description: image
+    default: 'centos-binary-sensu-client:latest'
+    type: string
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  RoleName:
+    default: ''
+    description: Role name on which the service is applied
+    type: string
+  RoleParameters:
+    default: {}
+    description: Parameters specific to the role
+    type: json
+  SensuDockerCheckCommand:
+    type: string
+    default: |
+      for i in $(docker ps --format '{{.ID}}'); do
+        if result=$(docker inspect --format='{{.State.Health.Status}}' $i 2>/dev/null); then
+          if [ "$result" != 'healthy' ]; then
+            echo "$(docker inspect --format='{{.Name}}' $i) ($i): $(docker inspect --format='{{json .State}}' $i)" && exit 2;
+          fi
+        fi
+      done
+  SensuDockerCheckInterval:
+    type: number
+    description: The frequency in seconds the docker health check is executed.
+    default: 10
+  SensuDockerCheckHandlers:
+    default: []
+    description: The Sensu event handler to use for events
+                 created by the docker health check.
+    type: comma_delimited_list
+  SensuDockerCheckOccurrences:
+    type: number
+    description: The number of event occurrences before sensu-plugin-aware handler should take action.
+    default: 3
+  SensuDockerCheckRefresh:
+    type: number
+    description: The number of seconds sensu-plugin-aware handlers should wait before taking second action.
+    default: 90
+
+resources:
+
+  ContainersCommon:
+    type: ./containers-common.yaml
+
+  SensuClientBase:
+    type: ../../puppet/services/monitoring/sensu-client.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+outputs:
+  role_data:
+    description: Role data for the Sensu client role.
+    value:
+      service_name: {get_attr: [SensuClientBase, role_data, service_name]}
+      config_settings:
+        map_merge:
+          - get_attr: [SensuClientBase, role_data, config_settings]
+          - sensu::checks:
+              check-docker-health:
+                standalone: true
+                command: {get_param: SensuDockerCheckCommand}
+                interval: {get_param: SensuDockerCheckInterval}
+                handlers: {get_param: SensuDockerCheckHandlers}
+                occurrences: {get_param: SensuDockerCheckOccurrences}
+                refresh: {get_param: SensuDockerCheckRefresh}
+      step_config: &step_config
+        get_attr: [SensuClientBase, role_data, step_config]
+      service_config_settings: {get_attr: [SensuClientBase, role_data, service_config_settings]}
+      # BEGIN DOCKER SETTINGS
+      puppet_config:
+        config_volume: sensu
+        puppet_tags:  sensu_rabbitmq_config,sensu_client_config,sensu_check_config,sensu_check
+        step_config: *step_config
+        config_image: &sensu_client_image
+          list_join:
+            - '/'
+            - [ {get_param: DockerNamespace}, {get_param: DockerSensuClientImage} ]
+      kolla_config:
+        /var/lib/kolla/config_files/sensu-client.json:
+          command: /usr/bin/sensu-client -d /etc/sensu/conf.d/
+      docker_config:
+        step_3:
+          sensu_client:
+            image: *sensu_client_image
+            net: host
+            privileged: true
+            # NOTE(mmagr) kolla image changes the user to 'sensu', we need it
+            # to be root have rw permission to docker.sock to run successfully
+            # "docker inspect" command
+            user: root
+            restart: always
+            volumes:
+              list_concat:
+                - {get_attr: [ContainersCommon, volumes]}
+                -
+                  - /var/run/docker.sock:/var/run/docker.sock:rw
+                  - /var/lib/kolla/config_files/sensu-client.json:/var/lib/kolla/config_files/config.json:ro
+                  - /var/lib/config-data/sensu/etc/sensu/:/etc/sensu/:ro
+            environment:
+              - KOLLA_CONFIG_STRATEGY=COPY_ALWAYS
+      upgrade_tasks:
+        - name: Stop and disable sensu-client service
+          tags: step2
+          service: name=sensu-client.service state=stopped enabled=no
index bfd445d..075d8d7 100644 (file)
@@ -58,6 +58,14 @@ parameters:
     default: true
     description: 'Use a local directory for Swift storage services when building rings'
     type: boolean
+  SwiftRingGetTempurl:
+    default: ''
+    description: A temporary Swift URL to download rings from.
+    type: string
+  SwiftRingPutTempurl:
+    default: ''
+    description: A temporary Swift URL to upload rings to.
+    type: string
 
 resources:
 
@@ -75,14 +83,17 @@ outputs:
     description: Role data for Swift Ringbuilder configuration in containers.
     value:
       service_name: {get_attr: [SwiftRingbuilderBase, role_data, service_name]}
-      config_settings: {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+      config_settings:
+        map_merge:
+          - {get_attr: [SwiftRingbuilderBase, role_data, config_settings]}
+          - tripleo::profile::base::swift::ringbuilder:skip_consistency_check: true
       step_config: &step_config
         get_attr: [SwiftRingbuilderBase, role_data, step_config]
       service_config_settings: {get_attr: [SwiftRingbuilderBase, role_data, service_config_settings]}
       # BEGIN DOCKER SETTINGS
       puppet_config:
         config_volume: 'swift'
-        puppet_tags: exec,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance
+        puppet_tags: exec,fetch_swift_ring_tarball,extract_swift_ring_tarball,ring_object_device,swift::ringbuilder::create,tripleo::profile::base::swift::add_devices,swift::ringbuilder::rebalance,create_swift_ring_tarball,upload_swift_ring_tarball
         step_config: *step_config
         config_image:
           list_join:
index 017fb12..55aea20 100644 (file)
@@ -46,6 +46,11 @@ parameters:
                  via parameter_defaults in the resource registry.  This
                  mapping overrides those in ServiceNetMapDefaults.
     type: json
+  SwiftRawDisks:
+    default: {}
+    description: 'A hash of additional raw devices to use as Swift backend (eg. {sdb: {}})'
+    type: json
+
 
 resources:
 
@@ -66,7 +71,11 @@ outputs:
     description: Role data for the swift storage services.
     value:
       service_name: {get_attr: [SwiftStorageBase, role_data, service_name]}
-      config_settings: {get_attr: [SwiftStorageBase, role_data, config_settings]}
+      config_settings:
+        map_merge:
+          - {get_attr: [SwiftStorageBase, role_data, config_settings]}
+          # FIXME (cschwede): re-enable this once checks works inside containers
+          - swift::storage::all::mount_check: false
       step_config: &step_config
         get_attr: [SwiftStorageBase, role_data, step_config]
       service_config_settings: {get_attr: [SwiftStorageBase, role_data, service_config_settings]}
@@ -348,6 +357,18 @@ outputs:
           with_items:
             - /var/log/containers/swift
             - /srv/node
+        - name: Format and mount devices defined in SwiftRawDisks
+          mount:
+            name: /srv/node/{{ item }}
+            src: /dev/{{ item }}
+            fstype: xfs
+            opts: noatime
+            state: mounted
+          with_items:
+            - repeat:
+                template: 'DEVICE'
+                for_each:
+                  DEVICE: {get_param: SwiftRawDisks}
       upgrade_tasks:
         - name: Stop and disable swift storage services
           tags: step2
index 02cc6a0..4151c0d 100644 (file)
@@ -27,7 +27,6 @@ resource_registry:
   OS::TripleO::Services::NeutronDhcpAgent: ../docker/services/neutron-dhcp.yaml
   OS::TripleO::Services::NeutronL3Agent: ../docker/services/neutron-l3.yaml
   OS::TripleO::Services::MySQL: ../docker/services/database/mysql.yaml
-  OS::TripleO::Services::HAProxy: ../docker/services/haproxy.yaml
   OS::TripleO::Services::RabbitMQ: ../docker/services/rabbitmq.yaml
   OS::TripleO::Services::MongoDb: ../docker/services/database/mongodb.yaml
   OS::TripleO::Services::Redis: ../docker/services/database/redis.yaml
index 39ded65..481459c 100644 (file)
@@ -2,6 +2,7 @@
 # For these values to take effect, one of the tls-endpoints-*.yaml environments
 # must also be used.
 parameter_defaults:
+  HorizonSecureCookies: True
   SSLCertificate: |
     The contents of your certificate go here
   SSLIntermediateCertificate: ''
diff --git a/environments/network-isolation.j2.yaml b/environments/network-isolation.j2.yaml
new file mode 100644 (file)
index 0000000..3ef9b27
--- /dev/null
@@ -0,0 +1,37 @@
+{%- set primary_role = [roles[0]] -%}
+{%- for role in roles -%}
+  {%- if 'primary' in role.tags and 'controller' in role.tags -%}
+    {%- set _ = primary_role.pop() -%}
+    {%- set _ = primary_role.append(role) -%}
+  {%- endif -%}
+{%- endfor -%}
+{%- set primary_role_name = primary_role[0].name -%}
+# Enable the creation of Neutron networks for isolated Overcloud
+# traffic and configure each role to assign ports (related
+# to that role) on these networks.
+# primary role is: {{primary_role_name}}
+resource_registry:
+  # networks as defined in network_data.yaml
+  {%- for network in networks if network.enabled|default(true) %}
+  OS::TripleO::Network::{{network.name}}: ../network/{{network.name.lower()}}.yaml
+  {%- endfor %}
+
+  # Port assignments for the VIPs
+  {%- for network in networks if network.vip %}
+  OS::TripleO::Network::Ports::{{network.name}}VipPort: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+  {%- endfor %}
+  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+
+  OS::TripleO::{{primary_role_name}}::Ports::RedisVipPort: ../network/ports/vip.yaml
+
+{%- for role in roles %}
+  # Port assignments for the {{role.name}}
+  {%- for network in networks %}
+    {%- if network.name in role.networks|default([]) and network.enabled|default(true) %}
+  OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/{{network.name_lower|default(network.name.lower())}}.yaml
+    {%- else %}
+  OS::TripleO::{{role.name}}::Ports::{{network.name}}Port: ../network/ports/noop.yaml
+    {%- endif %}
+  {%- endfor %}
+{%- endfor %}
diff --git a/environments/network-isolation.yaml b/environments/network-isolation.yaml
deleted file mode 100644 (file)
index a6b4b8a..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# Enable the creation of Neutron networks for isolated Overcloud
-# traffic and configure each role to assign ports (related
-# to that role) on these networks.
-resource_registry:
-  OS::TripleO::Network::External: ../network/external.yaml
-  OS::TripleO::Network::InternalApi: ../network/internal_api.yaml
-  OS::TripleO::Network::StorageMgmt: ../network/storage_mgmt.yaml
-  OS::TripleO::Network::Storage: ../network/storage.yaml
-  OS::TripleO::Network::Tenant: ../network/tenant.yaml
-  # Management network is optional and disabled by default.
-  # To enable it, include environments/network-management.yaml
-  #OS::TripleO::Network::Management: ../network/management.yaml
-
-  # Port assignments for the VIPs
-  OS::TripleO::Network::Ports::ExternalVipPort: ../network/ports/external.yaml
-  OS::TripleO::Network::Ports::InternalApiVipPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Network::Ports::StorageVipPort: ../network/ports/storage.yaml
-  OS::TripleO::Network::Ports::StorageMgmtVipPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::Network::Ports::RedisVipPort: ../network/ports/vip.yaml
-
-  # Port assignments for the controller role
-  OS::TripleO::Controller::Ports::ExternalPort: ../network/ports/external.yaml
-  OS::TripleO::Controller::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Controller::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::Controller::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::Controller::Ports::TenantPort: ../network/ports/tenant.yaml
-  #OS::TripleO::Controller::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the compute role
-  OS::TripleO::Compute::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::Compute::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::TenantPort: ../network/ports/tenant.yaml
-  #OS::TripleO::Compute::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the ceph storage role
-  OS::TripleO::CephStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::InternalApiPort: ../network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::CephStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::CephStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::CephStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the swift storage role
-  OS::TripleO::SwiftStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::SwiftStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::SwiftStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::SwiftStorage::Ports::ManagementPort: ../network/ports/management.yaml
-
-  # Port assignments for the block storage role
-  OS::TripleO::BlockStorage::Ports::ExternalPort: ../network/ports/noop.yaml
-  OS::TripleO::BlockStorage::Ports::InternalApiPort: ../network/ports/internal_api.yaml
-  OS::TripleO::BlockStorage::Ports::StoragePort: ../network/ports/storage.yaml
-  OS::TripleO::BlockStorage::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
-  OS::TripleO::BlockStorage::Ports::TenantPort: ../network/ports/noop.yaml
-  #OS::TripleO::BlockStorage::Ports::ManagementPort: ../network/ports/management.yaml
diff --git a/environments/services-docker/manila.yaml b/environments/services-docker/manila.yaml
new file mode 100644 (file)
index 0000000..6f7608c
--- /dev/null
@@ -0,0 +1,2 @@
+resource_registry:
+  OS::TripleO::Services::ManilaApi: ../../docker/services/manila-api.yaml
diff --git a/environments/services-docker/sahara.yaml b/environments/services-docker/sahara.yaml
new file mode 100644 (file)
index 0000000..d0bf9fe
--- /dev/null
@@ -0,0 +1,3 @@
+resource_registry:
+  OS::TripleO::Services::SaharaApi: ../../docker/services/sahara-api.yaml
+  OS::TripleO::Services::SaharaEngine: ../../docker/services/sahara-engine.yaml
diff --git a/environments/services-docker/sensu-client.yaml b/environments/services-docker/sensu-client.yaml
new file mode 100644 (file)
index 0000000..c03104d
--- /dev/null
@@ -0,0 +1,3 @@
+
+resource_registry:
+  OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
index 865ed4c..3f1bd04 100644 (file)
@@ -72,8 +72,8 @@ parameter_defaults:
     IronicAdmin: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
     IronicInternal: {protocol: 'https', port: '6385', host: 'CLOUDNAME'}
     IronicPublic: {protocol: 'https', port: '13385', host: 'CLOUDNAME'}
-    IronicInspectorAdmin: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
-    IronicInspectorInternal: {protocol: 'http', port: '5050', host: 'CLOUDNAME'}
+    IronicInspectorAdmin: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
+    IronicInspectorInternal: {protocol: 'https', port: '5050', host: 'CLOUDNAME'}
     IronicInspectorPublic: {protocol: 'https', port: '13050', host: 'CLOUDNAME'}
     KeystoneAdmin: {protocol: 'https', port: '35357', host: 'CLOUDNAME'}
     KeystoneInternal: {protocol: 'https', port: '5000', host: 'CLOUDNAME'}
index 7fc258d..6bf5afb 100644 (file)
@@ -10,8 +10,8 @@ parameters:
 
 resources:
 
-{%- for role in roles -%}
-{% if "controller" in role.tags %}
+{%- for role in roles %}
+  {%- if 'controller' in role.tags %}
   {{role.name}}PostPuppetMaintenanceModeConfig:
     type: OS::Heat::SoftwareConfig
     properties:
@@ -37,6 +37,6 @@ resources:
     properties:
       servers: {get_param: [servers, {{role.name}}]}
       input_values: {get_param: input_values}
-{%- endif -%}
-{% endfor %}
+  {%- endif %}
+{%- endfor %}
 
diff --git a/plan-samples/README.rst b/plan-samples/README.rst
new file mode 100644 (file)
index 0000000..44b9d0c
--- /dev/null
@@ -0,0 +1,22 @@
+=================================
+Samples for plan-environment.yaml
+=================================
+
+The ``plan-environment.yaml`` file provides the details of the plan to be
+deployed by TripleO. Along with the details of the heat environments and
+parameters, it is also possible to provide workflow specific parameters to the
+TripleO mistral workflows. A new section ``workflow_parameters`` has been
+added to provide workflow specific parameters. This provides a clear
+separation of heat environment parameters and the workflow only parameters.
+These customized plan environment files can be provided as with ``-p`` option
+to the ``openstack overcloud deploy`` and ``openstack overcloud plan create``
+commands. The sample format to provide the workflow specific parameters::
+
+  workflow_parameters:
+    tripleo.derive_params.v1.derive_parameters:
+      # DPDK Parameters
+      number_of_pmd_cpu_threads_per_numa_node: 2
+
+
+All the parameters specified under the workflow name will be passed as
+``user_input`` to the workflow, while invoking from the tripleoclient.
\ No newline at end of file
diff --git a/plan-samples/plan-environment-derived-params.yaml b/plan-samples/plan-environment-derived-params.yaml
new file mode 100644 (file)
index 0000000..964e57d
--- /dev/null
@@ -0,0 +1,35 @@
+version: 1.0
+
+name: overcloud
+description: >
+  Default Deployment plan
+template: overcloud.yaml
+environments:
+  - path: overcloud-resource-registry-puppet.yaml
+workflow_parameters:
+  tripleo.derive_params.v1.derive_parameters:
+    ######### DPDK Parameters #########
+    # Specifices the minimum number of CPU threads to be allocated for DPDK
+    # PMD threads. The actual allocation will be based on network config, if
+    # the a DPDK port is associated with a numa node, then this configuration
+    # will be used, else 0.
+    number_of_pmd_cpu_threads_per_numa_node: 4
+    # Amount of memory to be configured as huge pages in percentage. Ouf the
+    # total available memory (excluding the NovaReservedHostMemory), the
+    # specified percentage of the remaining is configured as huge pages.
+    huge_page_allocation_percentage: 90
+    ######### HCI Parameters #########
+    hci_profile: default
+    hci_profile_config:
+      default:
+        average_guest_memory_size_in_mb: 2048
+        average_guest_cpu_utilization_percentage: 50
+      many_small_vms:
+        average_guest_memory_size_in_mb: 1024
+        average_guest_cpu_utilization_percentage: 20
+      few_large_vms:
+        average_guest_memory_size_in_mb: 4096
+        average_guest_cpu_utilization_percentage: 80
+      nfv_default:
+        average_guest_memory_size_in_mb: 8192
+        average_guest_cpu_utilization_percentage: 90
index 60ddeb8..25e509b 100644 (file)
@@ -466,6 +466,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
index 9d30ab2..ca89be8 100644 (file)
@@ -471,6 +471,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
index 06a31ec..abfacd3 100644 (file)
@@ -479,6 +479,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   NovaComputeDeployment:
     type: OS::TripleO::SoftwareDeployment
index cccfdef..396eda8 100644 (file)
@@ -541,6 +541,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Hook for site-specific additional pre-deployment config, e.g extra hieradata
   ControllerExtraConfigPre:
index 19ea1b6..42b1a78 100644 (file)
@@ -451,6 +451,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   SwiftStorageHieraDeploy:
     type: OS::Heat::StructuredDeployment
index 360c633..5567d65 100644 (file)
         update_identifier: {get_param: DeployIdentifier}
 {% endfor %}
 
-  {{role.name}}PostConfig:
-    type: OS::TripleO::Tasks::{{role.name}}PostConfig
+  # Note, this should be the last step to execute configuration changes.
+  # Ensure that all {{role.name}}ExtraConfigPost steps are executed
+  # after all the previous deployment steps.
+  {{role.name}}ExtraConfigPost:
     depends_on:
   {% for dep in roles %}
       - {{dep.name}}Deployment_Step5
   {% endfor %}
+    type: OS::TripleO::NodeExtraConfigPost
     properties:
-      servers: {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
+        servers: {get_param: [servers, {{role.name}}]}
 
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  {{role.name}}ExtraConfigPost:
+  # The {{role.name}}PostConfig steps are in charge of
+  # quiescing all services, i.e. in the Controller case,
+  # we should run a full service reload.
+  {{role.name}}PostConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PostConfig
     depends_on:
   {% for dep in roles %}
-      - {{dep.name}}PostConfig
+      - {{dep.name}}ExtraConfigPost
   {% endfor %}
-    type: OS::TripleO::NodeExtraConfigPost
     properties:
-        servers: {get_param: [servers, {{role.name}}]}
+      servers: {get_param: servers}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
 
 {% endfor %}
index 7af90e2..409b0a3 100644 (file)
@@ -493,6 +493,7 @@ resources:
             fqdn_tenant: {get_attr: [NetHostMap, value, tenant, fqdn]}
             fqdn_management: {get_attr: [NetHostMap, value, management, fqdn]}
             fqdn_ctlplane: {get_attr: [NetHostMap, value, ctlplane, fqdn]}
+            fqdn_external: {get_attr: [NetHostMap, value, external, fqdn]}
 
   # Resource for site-specific injection of root certificate
   NodeTLSCAData:
index bddc8e1..fbde4c0 100644 (file)
@@ -93,6 +93,12 @@ parameters:
   CinderNetappWebservicePath:
     type: string
     default: '/devmgr/v2'
+  CinderNetappNasSecureFileOperations:
+    type: string
+    default: 'false'
+  CinderNetappNasSecureFilePermissions:
+    type: string
+    default: 'false'
   # DEPRECATED options for compatibility with older versions
   CinderNetappEseriesHostType:
     type: string
@@ -133,5 +139,7 @@ outputs:
         cinder::backend::netapp::netapp_storage_pools: {get_param: CinderNetappStoragePools}
         cinder::backend::netapp::netapp_host_type: {get_param: CinderNetappHostType}
         cinder::backend::netapp::netapp_webservice_path: {get_param: CinderNetappWebservicePath}
+        cinder::backend::netapp::nas_secure_file_operations: {get_param: CinderNetappNasSecureFileOperations}
+        cinder::backend::netapp::nas_secure_file_permissions: {get_param: CinderNetappNasSecureFilePermissions}
       step_config: |
         include ::tripleo::profile::base::cinder::volume
index fe95222..1f8c345 100644 (file)
@@ -40,6 +40,20 @@ parameters:
       NFS servers used by Cinder NFS backend. Effective when
       CinderEnableNfsBackend is true.
     type: comma_delimited_list
+  CinderNasSecureFileOperations:
+    default: false
+    description: >
+      Controls whether security enhanced NFS file operations are enabled.
+      Valid values are 'auto', 'true' or 'false'. Effective when
+      CinderEnableNfsBackend is true.
+    type: string
+  CinderNasSecureFilePermissions:
+    default: false
+    description: >
+      Controls whether security enhanced NFS file permissions are enabled.
+      Valid values are 'auto', 'true' or 'false'. Effective when
+      CinderEnableNfsBackend is true.
+    type: string
   CinderRbdPoolName:
     default: volumes
     type: string
@@ -105,6 +119,8 @@ outputs:
             tripleo::profile::base::cinder::volume::cinder_enable_rbd_backend: {get_param: CinderEnableRbdBackend}
             tripleo::profile::base::cinder::volume::nfs::cinder_nfs_mount_options: {get_param: CinderNfsMountOptions}
             tripleo::profile::base::cinder::volume::nfs::cinder_nfs_servers: {get_param: CinderNfsServers}
+            tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_operations: {get_param: CinderNasSecureFileOperations}
+            tripleo::profile::base::cinder::volume::nfs::cinder_nas_secure_file_permissions: {get_param: CinderNasSecureFilePermissions}
             tripleo::profile::base::cinder::volume::iscsi::cinder_lvm_loop_device_size: {get_param: CinderLVMLoopDeviceSize}
             tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_helper: {get_param: CinderISCSIHelper}
             tripleo::profile::base::cinder::volume::iscsi::cinder_iscsi_protocol: {get_param: CinderISCSIProtocol}
index f4067ef..b4af7e8 100644 (file)
@@ -34,6 +34,10 @@ parameters:
     default: 30
     description: Delay between processing metrics.
     type: number
+  NumberOfStorageSacks:
+    default: 128
+    description: Number of storage sacks to create.
+    type: number
   GnocchiPassword:
     description: The password for the gnocchi service and db account.
     type: string
@@ -87,7 +91,11 @@ outputs:
             query:
               read_default_file: /etc/my.cnf.d/tripleo.cnf
               read_default_group: tripleo
-        gnocchi::db::sync::extra_opts: ''
+        gnocchi::db::sync::extra_opts:
+          str_replace:
+            template: " --sacks-number NUM_SACKS"
+            params:
+              NUM_SACKS: {get_param: NumberOfStorageSacks}
         gnocchi::storage::metric_processing_delay: {get_param: MetricProcessingDelay}
         gnocchi::storage::swift::swift_user: 'service:gnocchi'
         gnocchi::storage::swift::swift_auth_version: 3
index 93bced8..092d072 100644 (file)
@@ -55,7 +55,7 @@ parameters:
   HorizonSecureCookies:
     description: Set CSRF_COOKIE_SECURE / SESSION_COOKIE_SECURE in Horizon
     type: boolean
-    default: true
+    default: false
   MemcachedIPv6:
     default: false
     description: Enable IPv6 features in Memcached.
index b167671..0e8c8e1 100644 (file)
@@ -164,6 +164,12 @@ outputs:
             ironic::my_ip: {get_param: [ServiceNetMap, IronicNetwork]}
             ironic::pxe::common::http_port: {get_param: IronicIPXEPort}
             # Credentials to access other services
+            ironic::cinder::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
+            ironic::cinder::username: 'ironic'
+            ironic::cinder::password: {get_param: IronicPassword}
+            ironic::cinder::project_name: 'service'
+            ironic::cinder::user_domain_name: 'Default'
+            ironic::cinder::project_domain_name: 'Default'
             ironic::glance::auth_url: {get_param: [EndpointMap, KeystoneInternal, uri_no_suffix]}
             ironic::glance::username: 'ironic'
             ironic::glance::password: {get_param: IronicPassword}
index 74aaf59..c49b084 100644 (file)
@@ -34,6 +34,42 @@ parameters:
   MonitoringSubscriptionPacemakerRemote:
     default: 'overcloud-pacemaker_remote'
     type: string
+  EnableFencing:
+    default: false
+    description: Whether to enable fencing in Pacemaker or not.
+    type: boolean
+  FencingConfig:
+    default: {}
+    description: |
+      Pacemaker fencing configuration. The JSON should have
+      the following structure:
+        {
+          "devices": [
+            {
+              "agent": "AGENT_NAME",
+              "host_mac": "HOST_MAC_ADDRESS",
+              "params": {"PARAM_NAME": "PARAM_VALUE"}
+            }
+          ]
+        }
+      For instance:
+        {
+          "devices": [
+            {
+              "agent": "fence_xvm",
+              "host_mac": "52:54:00:aa:bb:cc",
+              "params": {
+                "multicast_address": "225.0.0.12",
+                "port": "baremetal_0",
+                "manage_fw": true,
+                "manage_key_file": true,
+                "key_file": "/etc/fence_xvm.key",
+                "key_file_password": "abcdef"
+              }
+            }
+          ]
+        }
+    type: json
   PacemakerRemoteLoggingSource:
     type: json
     default:
@@ -60,6 +96,8 @@ outputs:
             proto: 'tcp'
             dport:
               - 3121
+        tripleo::fencing::config: {get_param: FencingConfig}
+        enable_fencing: {get_param: EnableFencing}
         tripleo::profile::base::pacemaker_remote::remote_authkey: {get_param: PacemakerRemoteAuthkey}
       step_config: |
         include ::tripleo::profile::base::pacemaker_remote
diff --git a/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml b/releasenotes/notes/add-cinder-nas-secure-parameters-53f9d6a6e9bc129b.yaml
new file mode 100644 (file)
index 0000000..73b9f9c
--- /dev/null
@@ -0,0 +1,5 @@
+---
+features:
+  - Add parameters to control the Cinder NAS security settings associated
+    with the NFS and NetApp Cinder back ends. The settings are disabled
+    by default.
diff --git a/releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml b/releasenotes/notes/derive-params-custom-plan-env-3a810ff58a68e0ad.yaml
new file mode 100644 (file)
index 0000000..d8fcbfe
--- /dev/null
@@ -0,0 +1,4 @@
+---
+features:
+  - Added a custom plan-environment file for providing workflow specific
+    inputs for the derived parameters workflow.
diff --git a/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml b/releasenotes/notes/num-storage-sacks-f640be5fcd374a6b.yaml
new file mode 100644 (file)
index 0000000..e5adb6a
--- /dev/null
@@ -0,0 +1,3 @@
+---
+features:
+  - Add support to configure number of sacks in gnocchi.
index 6d77247..b011740 100644 (file)
@@ -4,6 +4,10 @@
 - name: BlockStorage
   description: |
     Cinder Block Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   ServicesDefault:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::BlockStorageCinderVolume
index d3de6ba..647c4d5 100644 (file)
@@ -4,6 +4,9 @@
 - name: CephStorage
   description: |
     Ceph OSD Storage node role
+  networks:
+    - Storage
+    - StorageMgmt
   ServicesDefault:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::CACerts
index c7e2b27..75a6f60 100644 (file)
@@ -5,6 +5,10 @@
   description: |
     Basic Compute Node role
   CountDefault: 1
+  networks:
+    - InternalApi
+    - Tenant
+    - Storage
   HostnameFormatDefault: '%stackname%-novacompute-%index%'
   disable_upgrade_deployment: True
   ServicesDefault:
index 36c46c8..b0a1313 100644 (file)
@@ -9,6 +9,12 @@
   tags:
     - primary
     - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
   HostnameFormatDefault: '%stackname%-controller-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AodhApi
index 2d1702e..6cf2120 100644 (file)
@@ -9,6 +9,12 @@
   tags:
     - primary
     - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
   HostnameFormatDefault: '%stackname%-controller-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AodhApi
index 3ef751a..75b26a8 100644 (file)
@@ -4,6 +4,8 @@
 - name: Database
   description: |
     Standalone database role with the database being managed via Pacemaker
+  networks:
+    - InternalApi
   HostnameFormatDefault: '%stackname%-database-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AuditD
index cbef61a..5b06063 100644 (file)
@@ -4,6 +4,8 @@
 - name: Messaging
   description: |
     Standalone messaging role with RabbitMQ being managed via Pacemaker
+  networks:
+    - InternalApi
   HostnameFormatDefault: '%stackname%-messaging-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AuditD
index b393fa7..a28eaa6 100644 (file)
@@ -5,6 +5,8 @@
   description: |
     Standalone networking role to run Neutron services their own. Includes
     Pacemaker integration via PacemakerRemote
+  networks:
+    - InternalApi
   HostnameFormatDefault: '%stackname%-networker-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AuditD
index 3741ca6..27dc123 100644 (file)
@@ -4,6 +4,10 @@
 - name: ObjectStorage
   description: |
     Swift Object Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::AuditD
index 6c74233..cd1fcb4 100644 (file)
@@ -58,6 +58,10 @@ Role Options
 * description: (string) as few sentences describing the role and information
   pertaining to the usage of the role.
 
+ * networks: (list), optional list of networks which the role will have
+   access to when network isolation is enabled. The names should match
+   those defined in network_data.yaml.
+
 Working with Roles
 ==================
 The tripleoclient provides a series of commands that can be used to view
index 0f60364..d23ab6e 100644 (file)
@@ -4,6 +4,8 @@
 - name: Telemetry
   description: |
     Telemetry role that has all the telemetry services.
+  networks:
+    - InternalApi
   HostnameFormatDefault: '%stackname%-telemetry-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AodhApi
index a00f89b..f96e562 100644 (file)
   tags:
     - primary
     - controller
+  networks:
+    - External
+    - InternalApi
+    - Storage
+    - StorageMgmt
+    - Tenant
   HostnameFormatDefault: '%stackname%-controller-%index%'
   ServicesDefault:
     - OS::TripleO::Services::AodhApi
   description: |
     Basic Compute Node role
   CountDefault: 1
+  networks:
+    - InternalApi
+    - Tenant
+    - Storage
   HostnameFormatDefault: '%stackname%-novacompute-%index%'
   disable_upgrade_deployment: True
   ServicesDefault:
 - name: BlockStorage
   description: |
     Cinder Block Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   ServicesDefault:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::BlockStorageCinderVolume
 - name: ObjectStorage
   description: |
     Swift Object Storage node role
+  networks:
+    - InternalApi
+    - Storage
+    - StorageMgmt
   disable_upgrade_deployment: True
   ServicesDefault:
     - OS::TripleO::Services::AuditD
 - name: CephStorage
   description: |
     Ceph OSD Storage node role
+  networks:
+    - Storage
+    - StorageMgmt
   ServicesDefault:
     - OS::TripleO::Services::AuditD
     - OS::TripleO::Services::CACerts