Merge "Specify the Ceph packages to be installed"
authorJenkins <jenkins@review.openstack.org>
Fri, 7 Oct 2016 00:26:06 +0000 (00:26 +0000)
committerGerrit Code Review <review@openstack.org>
Fri, 7 Oct 2016 00:26:06 +0000 (00:26 +0000)
171 files changed:
capabilities-map.yaml
environments/cinder-netapp-config.yaml
environments/hyperconverged-ceph.yaml [new file with mode: 0644]
environments/logging-environment.yaml [new file with mode: 0644]
environments/low-memory-usage.yaml [new file with mode: 0644]
environments/major-upgrade-aodh-migration.yaml
environments/major-upgrade-pacemaker-converge.yaml
environments/major-upgrade-pacemaker-init.yaml
environments/major-upgrade-pacemaker.yaml
environments/major-upgrade-remove-sahara.yaml [new file with mode: 0644]
environments/manage-firewall.yaml [deleted file]
environments/manila-cephfsnative-config.yaml [new file with mode: 0644]
environments/manila-generic-config.yaml
environments/manila-netapp-config.yaml [new file with mode: 0644]
environments/monitoring-environment.yaml
environments/neutron-opendaylight-l3.yaml
environments/neutron-opendaylight.yaml
extraconfig/all_nodes/mac_hostname.j2.yaml [moved from extraconfig/all_nodes/mac_hostname.yaml with 65% similarity]
extraconfig/all_nodes/random_string.j2.yaml [moved from extraconfig/all_nodes/random_string.yaml with 84% similarity]
extraconfig/all_nodes/swap-partition.j2.yaml [new file with mode: 0644]
extraconfig/all_nodes/swap-partition.yaml [deleted file]
extraconfig/all_nodes/swap.j2.yaml [new file with mode: 0644]
extraconfig/all_nodes/swap.yaml [deleted file]
extraconfig/tasks/major_upgrade_ceph_mon.sh
extraconfig/tasks/major_upgrade_ceph_storage.sh
extraconfig/tasks/major_upgrade_check.sh [new file with mode: 0755]
extraconfig/tasks/major_upgrade_compute.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_1.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_2.sh
extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh [new file with mode: 0755]
extraconfig/tasks/major_upgrade_object_storage.sh
extraconfig/tasks/major_upgrade_pacemaker.yaml
extraconfig/tasks/major_upgrade_pacemaker_init.j2.yaml [moved from extraconfig/tasks/major_upgrade_pacemaker_init.yaml with 52% similarity]
extraconfig/tasks/major_upgrade_pacemaker_migrations.sh
extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
extraconfig/tasks/pacemaker_common_functions.sh
extraconfig/tasks/pacemaker_resource_restart.sh
j2_excludes.yaml [new file with mode: 0644]
network/external.yaml
network/external_v6.yaml
network/internal_api.yaml
network/internal_api_v6.yaml
network/management.yaml
network/ports/external_from_pool_v6.yaml
network/ports/from_service.yaml
network/ports/from_service_v6.yaml
network/ports/internal_api_from_pool_v6.yaml
network/ports/management_from_pool_v6.yaml
network/ports/net_ip_list_map.yaml
network/ports/storage_from_pool_v6.yaml
network/ports/storage_mgmt_from_pool_v6.yaml
network/ports/tenant_from_pool_v6.yaml
network/service_net_map.j2.yaml [moved from network/service_net_map.yaml with 79% similarity]
network/storage.yaml
network/storage_mgmt.yaml
network/storage_mgmt_v6.yaml
network/storage_v6.yaml
network/tenant.yaml
network/tenant_v6.yaml
overcloud-resource-registry-puppet.j2.yaml [moved from overcloud-resource-registry-puppet.yaml with 70% similarity]
overcloud.j2.yaml
puppet/all-nodes-config.yaml
puppet/blockstorage-role.yaml [moved from puppet/cinder-storage.yaml with 92% similarity]
puppet/cephstorage-config.yaml [deleted file]
puppet/cephstorage-role.yaml [moved from puppet/ceph-storage.yaml with 92% similarity]
puppet/compute-config.yaml [deleted file]
puppet/compute-role.yaml [moved from puppet/compute.yaml with 93% similarity]
puppet/config.role.j2.yaml [moved from puppet/blockstorage-config.yaml with 64% similarity]
puppet/controller-config.yaml [deleted file]
puppet/controller-role.yaml [moved from puppet/controller.yaml with 93% similarity]
puppet/extraconfig/all_nodes/neutron-midonet-all-nodes.yaml
puppet/extraconfig/all_nodes/neutron-ml2-cisco-nexus-ucsm.yaml
puppet/extraconfig/pre_deploy/compute/neutron-opencontrail.yaml
puppet/extraconfig/pre_deploy/controller/cinder-netapp.yaml
puppet/extraconfig/tls/tls-cert-inject.yaml
puppet/manifests/overcloud_cephstorage.pp [deleted file]
puppet/manifests/overcloud_compute.pp [deleted file]
puppet/manifests/overcloud_object.pp [deleted file]
puppet/manifests/overcloud_role.pp [moved from puppet/manifests/overcloud_controller.pp with 71% similarity]
puppet/manifests/overcloud_volume.pp [deleted file]
puppet/objectstorage-config.yaml [deleted file]
puppet/objectstorage-role.yaml [moved from puppet/swift-storage.yaml with 92% similarity]
puppet/post.j2.yaml [new file with mode: 0644]
puppet/post.yaml [deleted file]
puppet/role.role.j2.yaml [new file with mode: 0644]
puppet/services/aodh-api.yaml
puppet/services/aodh-base.yaml
puppet/services/apache.yaml
puppet/services/ceilometer-agent-central.yaml
puppet/services/ceilometer-agent-notification.yaml
puppet/services/ceilometer-api.yaml
puppet/services/ceilometer-base.yaml
puppet/services/ceilometer-collector.yaml
puppet/services/ceph-mon.yaml
puppet/services/ceph-rgw.yaml
puppet/services/cinder-api.yaml
puppet/services/cinder-base.yaml
puppet/services/cinder-scheduler.yaml
puppet/services/cinder-volume.yaml
puppet/services/database/mongodb.yaml
puppet/services/database/mysql.yaml
puppet/services/glance-api.yaml
puppet/services/glance-registry.yaml
puppet/services/gnocchi-api.yaml
puppet/services/gnocchi-base.yaml
puppet/services/gnocchi-metricd.yaml
puppet/services/heat-api-cfn.yaml
puppet/services/heat-api-cloudwatch.yaml
puppet/services/heat-api.yaml
puppet/services/heat-engine.yaml
puppet/services/ironic-api.yaml
puppet/services/ironic-base.yaml
puppet/services/keepalived.yaml
puppet/services/keystone.yaml
puppet/services/logging/fluentd-base.yaml [new file with mode: 0644]
puppet/services/logging/fluentd-client.yaml [new file with mode: 0644]
puppet/services/logging/fluentd-config.yaml [new file with mode: 0644]
puppet/services/manila-api.yaml
puppet/services/manila-backend-cephfs.yaml [new file with mode: 0644]
puppet/services/manila-backend-generic.yaml [new file with mode: 0644]
puppet/services/manila-backend-netapp.yaml [new file with mode: 0644]
puppet/services/manila-base.yaml
puppet/services/manila-scheduler.yaml
puppet/services/neutron-api.yaml
puppet/services/neutron-dhcp.yaml
puppet/services/neutron-l3-compute-dvr.yaml
puppet/services/neutron-l3.yaml
puppet/services/neutron-metadata.yaml
puppet/services/neutron-ovs-agent.yaml
puppet/services/nova-api.yaml
puppet/services/nova-base.yaml
puppet/services/nova-compute.yaml
puppet/services/nova-conductor.yaml
puppet/services/nova-consoleauth.yaml
puppet/services/nova-metadata.yaml [new file with mode: 0644]
puppet/services/nova-scheduler.yaml
puppet/services/nova-vnc-proxy.yaml
puppet/services/pacemaker.yaml
puppet/services/pacemaker/cinder-api.yaml
puppet/services/pacemaker/cinder-scheduler.yaml
puppet/services/pacemaker/cinder-volume.yaml
puppet/services/pacemaker/database/mongodb.yaml
puppet/services/pacemaker/database/mysql.yaml
puppet/services/pacemaker/database/redis.yaml
puppet/services/pacemaker/glance-api.yaml
puppet/services/pacemaker/glance-registry.yaml
puppet/services/pacemaker/heat-api-cfn.yaml
puppet/services/pacemaker/heat-api-cloudwatch.yaml
puppet/services/pacemaker/heat-api.yaml
puppet/services/pacemaker/heat-engine.yaml
puppet/services/pacemaker/keystone.yaml
puppet/services/pacemaker/neutron-dhcp.yaml
puppet/services/pacemaker/neutron-l3.yaml
puppet/services/pacemaker/neutron-metadata.yaml
puppet/services/pacemaker/neutron-ovs-agent.yaml
puppet/services/pacemaker/nova-api.yaml
puppet/services/pacemaker/nova-conductor.yaml
puppet/services/pacemaker/nova-consoleauth.yaml
puppet/services/pacemaker/nova-scheduler.yaml
puppet/services/pacemaker/nova-vnc-proxy.yaml
puppet/services/pacemaker/sahara-api.yaml
puppet/services/pacemaker/sahara-engine.yaml
puppet/services/rabbitmq.yaml
puppet/services/sahara-api.yaml
puppet/services/sahara-base.yaml
puppet/services/sahara-engine.yaml
puppet/services/services.yaml
puppet/services/swift-proxy.yaml
puppet/services/time/ntp.yaml
puppet/services/tripleo-firewall.yaml
roles_data.yaml

index 962dfb9..ae74762 100644 (file)
 root_template: overcloud.yaml
 root_environment: overcloud-resource-registry-puppet.yaml
 topics:
-  - title: Basic Configuration
+  - title: Base Resources Configuration
     description:
     environment_groups:
       - title:
-        description: Enable basic configuration required for OpenStack Deployment
+        description: Enable base configuration for all resources required for OpenStack Deployment
         environments:
           - file: overcloud-resource-registry-puppet.yaml
-            title: Default Configuration
+            title: Base resources configuration
             description:
 
-  - title: Deployment options
+  - title: Deployment Options
     description:
     environment_groups:
       - title: High Availability
@@ -62,6 +62,15 @@ topics:
             description: Enable configuration of an Overcloud controller with Pacemaker
             requires:
               - overcloud-resource-registry-puppet.yaml
+      - title: Pacemaker options
+        description:
+        environments:
+          - file: environments/puppet-pacemaker-no-restart.yaml
+            title: Pacemaker No Restart
+            description:
+            requires:
+              - environments/puppet-pacemaker.yaml
+              - overcloud-resource-registry-puppet.yaml
       - title: Docker RDO
         description: >
           Docker container with heat agents for containerized compute node
@@ -71,26 +80,114 @@ topics:
             description:
             requires:
               - overcloud-resource-registry-puppet.yaml
+      - title: Enable TLS
+        description: >
+        environments:
+          - file: environments/enable-tls.yaml
+            title: TLS
+            description: >
+              Use this option to pass in certificates for SSL deployments.
+              For these values to take effect, one of the TLS endpoints
+              environments must also be used.
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: TLS Endpoints
+        description: >
+        environments:
+          - file: environments/tls-endpoints-public-dns.yaml
+            title: SSL-enabled deployment with DNS name as public endpoint
+            description: >
+              Use this environment when deploying an SSL-enabled overcloud where the public
+              endpoint is a DNS name.
+            requires:
+              - environments/enable-tls.yaml
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/tls-endpoints-public-ip.yaml
+            title: SSL-enabled deployment with IP address as public endpoint
+            description: >
+              Use this environment when deploying an SSL-enabled overcloud where the public
+              endpoint is an IP address.
+            requires:
+              - environments/enable-tls.yaml
+              - overcloud-resource-registry-puppet.yaml
+      - title: External load balancer
+        description: >
+          Enable external load balancer
+        environments:
+          - file: environments/external-loadbalancer-vip-v6.yaml
+            title: External load balancer IPv6
+            description: >
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/external-loadbalancer-vip.yaml
+            title: External load balancer IPv4
+            description: >
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+
+  - title: Additional Services
+    description: Deploy additional Overcloud services
+    environment_groups:
+      - title: Manila
+        description:
+        environments:
+          - file: environments/manila-generic-config.yaml
+            title: Manila
+            description: Enable Manila generic driver backend
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Sahara
+        description:
+        environments:
+          - file: environments/services/sahara.yaml
+            title: Sahara
+            description: Deploy Sahara service
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Ironic
+        description:
+        environments:
+          - file: environments/services/ironic.yaml
+            title: Ironic
+            description: Deploy Ironic service
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Mistral
+        description:
+        environments:
+          - file: environments/services/mistral.yaml
+            title: Mistral
+            description: Deploy Mistral service
+            requires:
+              - overcloud-resource-registry-puppet.yaml
 
   # - title: Network Interface Configuration
   #   description:
   #   environment_groups:
 
-  - title: Overlay network Configuration
+  - title: Overlay Network Configuration
     description:
     environment_groups:
       - title: Network Isolation
-        description: >
-          Enable the creation of Neutron networks for
-          isolated Overcloud traffic and configure each role to assign ports
-          (related to that role) on these networks.
+        description:
         environments:
           - file: environments/network-isolation.yaml
             title: Network Isolation
-            description: Enable Network Isolation
+            description: >
+              Enable the creation of Neutron networks for
+              isolated Overcloud traffic and configure each role to assign ports
+              (related to that role) on these networks.
             requires:
               - overcloud-resource-registry-puppet.yaml
-      - title: Single nic or Bonding
+          - file: environments/network-isolation-v6.yaml
+            title: Network Isolation IPv6
+            description: >
+              Enable the creation of IPv6 Neutron networks for isolated Overcloud
+              traffic and configure each role to assign ports (related
+              to that role) on these networks.
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+      - title: Single NIC or Bonding
         description: >
           Configure roles to use pair of bonded nics or to use Vlans on a
           single nic. This option assumes use of Network Isolation.
@@ -104,23 +201,105 @@ topics:
             requires:
               - environments/network-isolation.yaml
               - overcloud-resource-registry-puppet.yaml
+          - file: environments/net-bond-with-vlans-no-external.yaml
+            title: Bond with Vlans No External Ports
+            description: >
+              Configure each role to use a pair of bonded nics (nic2 and
+              nic3) and configures an IP address on each relevant isolated network
+              for each role. This option assumes use of Network Isolation.
+              Sets external ports to noop.
+            requires:
+              - environments/network-isolation.yaml
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/net-bond-with-vlans-v6.yaml
+            title: Bond with Vlans IPv6
+            description: >
+              Configure each role to use a pair of bonded nics (nic2 and
+              nic3) and configures an IP address on each relevant isolated network
+              for each role, with IPv6 on the External network.
+              This option assumes use of Network Isolation IPv6.
+            requires:
+              - environments/network-isolation-v6.yaml
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/net-multiple-nics.yaml
+            title: Multiple NICs
+            description: >
+              Configures each role to use a separate NIC for
+              each isolated network.
+              This option assumes use of Network Isolation.
+            requires:
+              - environments/network-isolation.yaml
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/net-multiple-nics-v6.yaml
+            title: Multiple NICs IPv6
+            description: >
+              Configure each role to use a separate NIC for
+              each isolated network with IPv6 on the External network.
+              This option assumes use of Network Isolation IPv6.
+            requires:
+              - environments/network-isolation-v6.yaml
+              - overcloud-resource-registry-puppet.yaml
           - file: environments/net-single-nic-with-vlans.yaml
-            title: Single nic with Vlans
+            title: Single NIC with Vlans
             description: >
-              Configure each role to use Vlans on a single nic for
+              Configure each role to use Vlans on a single NIC for
               each isolated network. This option assumes use of Network Isolation.
             requires:
               - environments/network-isolation.yaml
               - overcloud-resource-registry-puppet.yaml
+          - file: environments/net-single-nic-with-vlans-no-external.yaml
+            title: Single NIC with Vlans No External Ports
+            description: >
+              Configure each role to use Vlans on a single NIC for
+              each isolated network. This option assumes use of Network Isolation.
+              Sets external ports to noop.
+            requires:
+              - environments/network-isolation.yaml
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/net-single-nic-linux-bridge-with-vlans.yaml
+            title: Single NIC with Linux Bridge Vlans
+            description: >
+              Configure each role to use Vlans on a single NIC for
+              each isolated network. This option assumes use of Network Isolation.
+            requires:
+              - environments/network-isolation.yaml
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/net-single-nic-with-vlans-v6.yaml
+            title: Single NIC with Vlans IPv6
+            description: >
+              Configures each role to use Vlans on a single NIC for
+              each isolated network with IPv6 on the External network.
+              This option assumes use of Network Isolation IPv6
+            requires:
+              - environments/network-isolation-v6.yaml
+              - overcloud-resource-registry-puppet.yaml
+      - title: Management Network
+        description: >
+          Enable the creation of a system management network. This
+          creates a Neutron network for isolated Overcloud
+          system management traffic and configures each role to
+          assign a port (related to that role) on that network.
+        environments:
+          - file: environments/network-management.yaml
+            title: Management Network
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/network-management-v6.yaml
+            title: Management Network IPv6
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
 
   - title: Neutron Plugin Configuration
     description:
     environment_groups:
-      - title: BigSwitch extensions or Cisco N1KV backend
-        description:
+      - title: Neutron Plugins
+        description: >
+          Enable various Neutron plugins and backends
         environments:
           - file: environments/neutron-ml2-bigswitch.yaml
-            title: BigSwitch extensions
+            title: BigSwitch Extensions
             description: >
               Enable Big Switch extensions, configured via puppet
             requires:
@@ -131,28 +310,101 @@ topics:
               Enable a Cisco N1KV backend, configured via puppet
             requires:
               - overcloud-resource-registry-puppet.yaml
-      - title: Cisco Neutron plugin
-        description: >
-          Enable a Cisco Neutron plugin
-        environments:
           - file: environments/neutron-ml2-cisco-nexus-ucsm.yaml
             title: Cisco Neutron plugin
             description:
             requires:
               - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-midonet.yaml
+            title: Deploy MidoNet Services
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-nuage-config.yaml
+            title: Neutron Nuage backend
+            description: Enables Neutron Nuage backend on the controller
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-opencontrail.yaml
+            title: OpenContrail Extensions
+            description: Enables OpenContrail extensions
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-opendaylight.yaml
+            title: OpenDaylight
+            description: Enables OpenDaylight
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-opendaylight-l3.yaml
+            title: OpenDaylight with L3 DVR
+            description: Enables OpenDaylight with L3 DVR
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-ovs-dpdk.yaml
+            title: DPDK with OVS
+            description: Deploy DPDK with OVS
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-ovs-dvr.yaml
+            title: DVR
+            description: Enables DVR in the Overcloud
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/neutron-plumgrid.yaml
+            title: PLUMgrid extensions
+            description: Enables PLUMgrid extensions
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+
+  - title: Nova Extensions
+    description:
+    environment_groups:
+      - title: Nova Extensions
+        description:
+        environments:
+          - file: environments/nova-nuage-config.yaml
+            title: Nuage backend
+            description: >
+              Enables Nuage backend on the Compute
+            requires:
+              - overcloud-resource-registry-puppet.yaml
 
   - title: Storage
     description:
     environment_groups:
-      - title: Cinder NetApp backend
+      - title: Cinder backup service
+        description:
+        environments:
+          - file: environments/cinder-backup.yaml
+            title: Cinder backup service
+            description: >
+              OpenStack Cinder Backup service with Pacemaker configured
+              with Puppet
+            requires:
+              - environments/puppet-pacemaker.yaml
+              - overcloud-resource-registry-puppet.yaml
+      - title: Cinder backend
         description: >
-          Enable a Cinder NetApp backend, configured via puppet
+          Enable various Cinder backends
         environments:
           - file: environments/cinder-netapp-config.yaml
             title: Cinder NetApp backend
             description:
             requires:
               - overcloud-resource-registry-puppet.yaml
+          - file: environments/cinder-dellsc-config.yaml
+            title: Cinder Dell Storage Center ISCSI backend
+            description: >
+              Enables a Cinder Dell Storage Center ISCSI backend, configured
+              via puppet
+            requires:
+              - overcloud-resource-registry-puppet.yaml
+          - file: environments/cinder-eqlx-config.yaml
+            title: Cinder EQLX backend
+            description: >
+              Enables a Cinder EQLX backend, configured via puppet
+            requires:
+              - overcloud-resource-registry-puppet.yaml
       - title: Externally managed Ceph
         description: >
           Enable the use of an externally managed Ceph cluster
@@ -224,6 +476,14 @@ topics:
             description:
             requires:
               - overcloud-resource-registry-puppet.yaml
+      - title: Manage Firewall
+        description:
+        environments:
+          - file: environments/manage-firewall.yaml
+            title: Manage Firewall
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
 
   - title: Operational Tools
     description:
@@ -236,3 +496,11 @@ topics:
             description:
             requires:
               - overcloud-resource-registry-puppet.yaml
+      - title: Centralized logging support
+        description: Enable centralized logging clients (fluentd)
+        environments:
+          - file: environments/logging-environment.yaml
+            title: Enable fluentd client
+            description:
+            requires:
+              - overcloud-resource-registry-puppet.yaml
index 0437cc6..b9a8434 100644 (file)
@@ -25,5 +25,5 @@ parameter_defaults:
   CinderNetappControllerIps: ''
   CinderNetappSaPassword: ''
   CinderNetappStoragePools: ''
-  CinderNetappEseriesHostType: 'linux_dm_mp'
+  CinderNetappHostType: ''
   CinderNetappWebservicePath: '/devmgr/v2'
diff --git a/environments/hyperconverged-ceph.yaml b/environments/hyperconverged-ceph.yaml
new file mode 100644 (file)
index 0000000..cee4ae4
--- /dev/null
@@ -0,0 +1,11 @@
+# If not using an isolated StorageMgmt network, the following regitry mapping
+# should be commented.
+resource_registry:
+  OS::TripleO::Compute::Ports::StorageMgmtPort: ../network/ports/storage_mgmt.yaml
+
+parameter_defaults:
+  ComputeServices:
+    - OS::TripleO::Services::CephOSD
+
+parameter_merge_strategies:
+  ComputeServices: merge
\ No newline at end of file
diff --git a/environments/logging-environment.yaml b/environments/logging-environment.yaml
new file mode 100644 (file)
index 0000000..c583ca7
--- /dev/null
@@ -0,0 +1,29 @@
+## A Heat environment file which can be used to set up
+## logging agents
+
+resource_registry:
+  OS::TripleO::Services::FluentdClient: ../puppet/services/logging/fluentd-client.yaml
+
+#parameter_defaults:
+
+## Simple configuration
+#
+# LoggingServers:
+#   - host: log0.example.com
+#     port: 24224
+#   - host: log1.example.com
+#     port: 24224
+#
+## Example SSL configuration
+## (note the use of port 24284 for ssl connections)
+#
+# LoggingServers:
+#   - host: 192.0.2.11
+#     port: 24284
+# LoggingUsesSSL: true
+# LoggingSharedKey: secret
+# LoggingSSLCertificate: |
+#   -----BEGIN CERTIFICATE-----
+#   ...certificate data here...
+#   -----END CERTIFICATE-----
+
diff --git a/environments/low-memory-usage.yaml b/environments/low-memory-usage.yaml
new file mode 100644 (file)
index 0000000..ad42868
--- /dev/null
@@ -0,0 +1,15 @@
+# Lower the memory usage of overcloud.
+parameter_defaults:
+  CeilometerWorkers: 1
+  CinderWorkers: 1
+  GlanceWorkers: 1
+  HeatWorkers: 1
+  KeystoneWorkers: 1
+  NeutronWorkers: 1
+  NovaWorkers: 1
+  SaharaWorkers: 1
+  SwiftWorkers: 1
+  GnocchiMetricdWorkers: 1
+
+  ApacheMaxRequestWorkers: 32
+  ApacheServerLimit: 32
index c1dbde4..9d6ce73 100644 (file)
@@ -3,8 +3,4 @@ resource_registry:
   OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/mitaka_to_newton_aodh_data_migration.yaml
 
   # no-op the rest
-  OS::TripleO::ControllerPostDeployment: OS::Heat::None
-  OS::TripleO::ComputePostDeployment: OS::Heat::None
-  OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None
-  OS::TripleO::BlockStoragePostDeployment: OS::Heat::None
-  OS::TripleO::CephStoragePostDeployment: OS::Heat::None
+  OS::TripleO::PostDeploySteps: OS::Heat::None
index f023cb3..e9a5f9b 100644 (file)
@@ -1,2 +1,6 @@
 parameter_defaults:
   UpgradeLevelNovaCompute: ''
+
+resource_registry:
+  OS::TripleO::Services::SaharaApi: ../puppet/services/sahara-api.yaml
+  OS::TripleO::Services::SaharaEngine: ../puppet/services/sahara-engine.yaml
index d97f8fc..f4f361d 100644 (file)
@@ -3,8 +3,4 @@ parameter_defaults:
 
 resource_registry:
   OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker_init.yaml
-  OS::TripleO::ControllerPostDeployment: OS::Heat::None
-  OS::TripleO::ComputePostDeployment: OS::Heat::None
-  OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None
-  OS::TripleO::BlockStoragePostDeployment: OS::Heat::None
-  OS::TripleO::CephStoragePostDeployment: OS::Heat::None
+  OS::TripleO::PostDeploySteps: OS::Heat::None
index 95f0966..9fb51a4 100644 (file)
@@ -3,8 +3,4 @@ parameter_defaults:
 
 resource_registry:
   OS::TripleO::Tasks::UpdateWorkflow: ../extraconfig/tasks/major_upgrade_pacemaker.yaml
-  OS::TripleO::ControllerPostDeployment: OS::Heat::None
-  OS::TripleO::ComputePostDeployment: OS::Heat::None
-  OS::TripleO::ObjectStoragePostDeployment: OS::Heat::None
-  OS::TripleO::BlockStoragePostDeployment: OS::Heat::None
-  OS::TripleO::CephStoragePostDeployment: OS::Heat::None
+  OS::TripleO::PostDeploySteps: OS::Heat::None
diff --git a/environments/major-upgrade-remove-sahara.yaml b/environments/major-upgrade-remove-sahara.yaml
new file mode 100644 (file)
index 0000000..e0aaf13
--- /dev/null
@@ -0,0 +1,6 @@
+parameter_defaults:
+  KeepSaharaServicesOnUpgrade: false
+resource_registry:
+  OS::TripleO::Services::SaharaApi: OS::Heat::None
+  OS::TripleO::Services::SaharaEngine: OS::Heat::None
+
diff --git a/environments/manage-firewall.yaml b/environments/manage-firewall.yaml
deleted file mode 100644 (file)
index 5d48698..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-parameter_defaults:
-  ManageFirewall: true
diff --git a/environments/manila-cephfsnative-config.yaml b/environments/manila-cephfsnative-config.yaml
new file mode 100644 (file)
index 0000000..c2f6580
--- /dev/null
@@ -0,0 +1,18 @@
+# A Heat environment file which can be used to enable a
+# a Manila CephFS Native driver backend.
+resource_registry:
+  OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
+  OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+  # Only manila-share is pacemaker managed:
+  OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+  OS::Tripleo::Services::ManilaBackendCephFs: ../puppet/services/manila-backend-cephfs.yaml
+
+
+parameter_defaults:
+  ManilaCephFSNativeEnableBackend: true
+  ManilaCephFSNativeBackendName: cephfsnative
+  ManilaCephFSNativeDriverHandlesShareServers: false
+  ManilaCephFSNativeCephFSConfPath: '/etc/ceph/ceph.conf'
+  ManilaCephFSNativeCephFSAuthId: 'manila'
+  ManilaCephFSNativeCephFSClusterName: 'ceph'
+  ManilaCephFSNativeCephFSEnableSnapshots: true
index 74011c6..a847a02 100644 (file)
@@ -1,13 +1,16 @@
-# A Heat environment file which can be used to enable a
-# a Manila generic driver backend.
+# This environment file enables Manila with the Generic backend.
 resource_registry:
   OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
   OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
   # Only manila-share is pacemaker managed:
   OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
-
+  OS::Tripleo::Services::ManilaBackendGeneric: ../puppet/services/manila-backend-generic.yaml
 
 parameter_defaults:
+  ManilaServiceInstanceUser: ''
+  ManilaServiceInstancePassword: ''
+  ManilaServiceInstanceFlavorId: 2
+  ManilaServiceNetworkCidr: '172.16.0.0/16'
   ManilaGenericEnableBackend: true
   ManilaGenericBackendName: tripleo_generic
   ManilaGenericDriverHandlesShareServers: true
@@ -20,7 +23,3 @@ parameter_defaults:
   ManilaGenericServiceInstanceSmbConfigPath: '$share_mount_path/smb.conf'
   ManilaGenericShareVolumeFsType: 'ext4'
   ManilaGenericCinderVolumeType: ''
-  ManilaGenericServiceInstanceUser: ''
-  ManilaGenericServiceInstancePassword: ''
-  ManilaGenericServiceInstanceFlavorId: 2
-  ManilaGenericServiceNetworkCidr: '172.16.0.0/16'
diff --git a/environments/manila-netapp-config.yaml b/environments/manila-netapp-config.yaml
new file mode 100644 (file)
index 0000000..98de6ad
--- /dev/null
@@ -0,0 +1,30 @@
+# This environment file enables Manila with the Netapp backend.
+resource_registry:
+  OS::Tripleo::Services::ManilaApi: ../puppet/services/manila-api.yaml
+  OS::Tripleo::Services::ManilaScheduler: ../puppet/services/manila-scheduler.yaml
+  # Only manila-share is pacemaker managed:
+  OS::Tripleo::Services::ManilaShare: ../puppet/services/pacemaker/manila-share.yaml
+  OS::Tripleo::Services::ManilaBackendNetapp: ../puppet/services/manila-backend-netapp.yaml
+
+parameter_defaults:
+  ManilaNetappEnableBackend: true
+  ManilaNetappBackendName: tripleo_netapp
+  ManilaNetappDriverHandlesShareServers: true
+  ManilaNetappLogin: ''
+  ManilaNetappPassword: ''
+  ManilaNetappServerHostname: ''
+  ManilaNetappTransportType: 'http'
+  ManilaNetappStorageFamily: 'ontap_cluster'
+  ManilaNetappServerPort: 80
+  ManilaNetappVolumeNameTemplate: 'share_%(share_id)s'
+  ManilaNetappVserver: ''
+  ManilaNetappVserverNameTemplate: 'os_%s'
+  ManilaNetappLifNameTemplate: 'os_%(net_allocation_id)s'
+  ManilaNetappAggrNameSearchPattern: '(.*)'
+  ManilaNetappRootVolumeAggr: ''
+  ManilaNetappRootVolume: 'root'
+  ManilaNetappPortNameSearchPattern: '(.*)'
+  ManilaNetappTraceFlags: ''
+  ManilaNetappEnabledShareProtocols: 'nfs3, nfs4.0'
+  ManilaNetappVolumeSnapshotReservePercent: 5
+  ManilaNetappSnapmirrorQuiesceTimeout: 3600
index a8ad208..62ab06d 100644 (file)
@@ -4,7 +4,7 @@
 resource_registry:
   OS::TripleO::Services::SensuClient: ../puppet/services/monitoring/sensu-client.yaml
 
-parameter_defaults:
+#parameter_defaults:
   #### Sensu settings ####
   ##MonitoringRabbitHost: 10.10.10.10
   ##MonitoringRabbitPort: 5672
index d61270b..da915bd 100644 (file)
@@ -2,8 +2,8 @@
 resource_registry:
   OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
-  OS::TripleO::Services::OpenDaylight: puppet/services/opendaylight-api.yaml
-  OS::TripleO::Services::OpenDaylightOvs: puppet/services/opendaylight-ovs.yaml
+  OS::TripleO::Services::OpenDaylight: ../puppet/services/opendaylight-api.yaml
+  OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
   OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
 
 parameter_defaults:
index 8fa2e54..88240ed 100644 (file)
@@ -2,8 +2,8 @@
 resource_registry:
   OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
-  OS::TripleO::Services::OpenDaylight: puppet/services/opendaylight-api.yaml
-  OS::TripleO::Services::OpenDaylightOvs: puppet/services/opendaylight-ovs.yaml
+  OS::TripleO::Services::OpenDaylight: ../puppet/services/opendaylight-api.yaml
+  OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
 
 parameter_defaults:
   EnableOpenDaylightOnController: true
similarity index 65%
rename from extraconfig/all_nodes/mac_hostname.yaml
rename to extraconfig/all_nodes/mac_hostname.j2.yaml
index 7d8704e..75ffc9e 100644 (file)
@@ -9,15 +9,7 @@ description: >
 # out-of-tree templates they may require additional parameters if the
 # in-tree templates add a new role.
 parameters:
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
+  servers:
     type: json
 # Note extra parameters can be defined, then passed data via the
 # environment parameter_defaults, without modifying the parent template
@@ -37,47 +29,17 @@ resources:
   # FIXME(shardy): Long term it'd be better if Heat SoftwareDeployments accepted
   # list instead of a map, then we could join the lists of servers into one
   # deployment instead of requiring one deployment per-role.
-  CollectMacDeploymentsController:
+{% for role in roles %}
+  CollectMacDeployments{{role.name}}:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: CollectMacDeploymentsController
-      servers:  {get_param: controller_servers}
-      config: {get_resource: CollectMacConfig}
-      actions: ['CREATE'] # Only do this on CREATE
-
-  CollectMacDeploymentsCompute:
-    type: OS::Heat::SoftwareDeployments
-    properties:
-      name: CollectMacDeploymentsCompute
-      servers:  {get_param: compute_servers}
-      config: {get_resource: CollectMacConfig}
-      actions: ['CREATE'] # Only do this on CREATE
-
-  CollectMacDeploymentsBlockStorage:
-    type: OS::Heat::SoftwareDeployments
-    properties:
-      name: CollectMacDeploymentsBlockStorage
-      servers:  {get_param: blockstorage_servers}
-      config: {get_resource: CollectMacConfig}
-      actions: ['CREATE'] # Only do this on CREATE
-
-  CollectMacDeploymentsObjectStorage:
-    type: OS::Heat::SoftwareDeployments
-    properties:
-      name: CollectMacDeploymentsObjectStorage
-      servers:  {get_param: objectstorage_servers}
-      config: {get_resource: CollectMacConfig}
-      actions: ['CREATE'] # Only do this on CREATE
-
-  CollectMacDeploymentsCephStorage:
-    type: OS::Heat::SoftwareDeployments
-    properties:
-      name: CollectMacDeploymentsCephStorage
-      servers:  {get_param: cephstorage_servers}
+      servers:  {get_param: [servers, {{role.name}}]}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
+{% endfor %}
 
-  # Now we distribute all-the-macs to all nodes
+  # Now we distribute all-the-macs to all Controller nodes
   DistributeMacConfig:
     type: OS::Heat::SoftwareConfig
     properties:
@@ -101,7 +63,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: DistributeMacDeploymentsController
-      servers:  {get_param: controller_servers}
+      servers:  {get_param: [servers, Controller]}
       config: {get_resource: DistributeMacConfig}
       input_values:
         # FIXME(shardy): It'd be more convenient if we could join these
similarity index 84%
rename from extraconfig/all_nodes/random_string.yaml
rename to extraconfig/all_nodes/random_string.j2.yaml
index d38701e..9ce2ca8 100644 (file)
@@ -10,15 +10,7 @@ description: >
 # out-of-tree templates they may require additional parameters if the
 # in-tree templates add a new role.
 parameters:
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
+  servers:
     type: json
 # Note extra parameters can be defined, then passed data via the
 # environment parameter_defaults, without modifying the parent template
@@ -42,7 +34,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: RandomDeploymentsController
-      servers:  {get_param: controller_servers}
+      servers:  {get_param: [servers, Controller]}
       config: {get_resource: RandomConfig}
       actions: ['CREATE'] # Only do this on CREATE
       input_values:
@@ -52,7 +44,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: RandomDeploymentsCompute
-      servers:  {get_param: compute_servers}
+      servers:  {get_param: [servers, Compute]}
       config: {get_resource: RandomConfig}
       actions: ['CREATE'] # Only do this on CREATE
       input_values:
diff --git a/extraconfig/all_nodes/swap-partition.j2.yaml b/extraconfig/all_nodes/swap-partition.j2.yaml
new file mode 100644 (file)
index 0000000..36076b0
--- /dev/null
@@ -0,0 +1,44 @@
+heat_template_version: 2014-10-16
+
+description: >
+  Extra config to add swap space to nodes.
+
+# Parameters passed from the parent template - note if you maintain
+# out-of-tree templates they may require additional parameters if the
+# in-tree templates add a new role.
+parameters:
+  servers:
+    type: json
+  swap_partition_label:
+    type: string
+    description: Swap partition label
+    default: 'swap1'
+
+
+resources:
+
+  SwapConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: |
+        #!/bin/bash
+        set -eux
+        swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
+        swapon $swap_partition
+        echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
+      inputs:
+        - name: swap_partition_label
+          description: Swap partition label
+          default: 'swap1'
+
+{% for role in roles %}
+  {{role.name}}SwapDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      config: {get_resource: SwapConfig}
+      servers: {get_param: [servers, {{role.name}}]}
+      input_values:
+        swap_partition_label: {get_param: swap_partition_label}
+      actions: ["CREATE"]
+{% endfor %}
diff --git a/extraconfig/all_nodes/swap-partition.yaml b/extraconfig/all_nodes/swap-partition.yaml
deleted file mode 100644 (file)
index e6fa9ec..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
-  Extra config to add swap space to nodes.
-
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
-parameters:
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
-    type: json
-  swap_partition_label:
-    type: string
-    description: Swap partition label
-    default: 'swap1'
-
-
-resources:
-
-  SwapConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: |
-        #!/bin/bash
-        set -eux
-        swap_partition=$(realpath /dev/disk/by-label/$swap_partition_label)
-        swapon $swap_partition
-        echo "$swap_partition swap swap defaults 0 0" >> /etc/fstab
-      inputs:
-        - name: swap_partition_label
-          description: Swap partition label
-          default: 'swap1'
-
-  ControllerSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: controller_servers}
-      input_values:
-        swap_partition_label: {get_param: swap_partition_label}
-      actions: ["CREATE"]
-
-  ComputeSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: compute_servers}
-      input_values:
-        swap_partition_label: {get_param: swap_partition_label}
-      actions: ["CREATE"]
-
-  BlockStorageSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: blockstorage_servers}
-      input_values:
-        swap_partition_label: {get_param: swap_partition_label}
-      actions: ["CREATE"]
-
-  ObjectStorageSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: objectstorage_servers}
-      input_values:
-        swap_partition_label: {get_param: swap_partition_label}
-      actions: ["CREATE"]
-
-  CephStorageSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: cephstorage_servers}
-      input_values:
-        swap_partition_label: {get_param: swap_partition_label}
-      actions: ["CREATE"]
diff --git a/extraconfig/all_nodes/swap.j2.yaml b/extraconfig/all_nodes/swap.j2.yaml
new file mode 100644 (file)
index 0000000..ce65dac
--- /dev/null
@@ -0,0 +1,58 @@
+heat_template_version: 2014-10-16
+
+description: >
+  Extra config to add swap space to nodes.
+
+# Parameters passed from the parent template - note if you maintain
+# out-of-tree templates they may require additional parameters if the
+# in-tree templates add a new role.
+parameters:
+  servers:
+    type: json
+  swap_size_megabytes:
+    type: string
+    description: Amount of swap space to allocate in megabytes
+    default: '4096'
+  swap_path:
+    type: string
+    description: Full path to location of swap file
+    default: '/swap'
+
+
+resources:
+
+  SwapConfig:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config: |
+        #!/bin/bash
+        set -eux
+        if [ ! -f $swap_path ]; then
+          dd if=/dev/zero of=$swap_path count=$swap_size_megabytes bs=1M
+          chmod 0600 $swap_path
+          mkswap $swap_path
+          swapon $swap_path
+        else
+          echo "$swap_path already exists"
+        fi
+        echo "$swap_path swap swap defaults 0 0" >> /etc/fstab
+      inputs:
+        - name: swap_size_megabytes
+          description: Amount of swap space to allocate in megabytes
+          default: '4096'
+        - name: swap_path
+          description: Full path to location of swap file
+          default: '/swap'
+
+{% for role in roles %}
+  {{role.name}}SwapDeployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      config: {get_resource: SwapConfig}
+      servers: {get_param: [servers, {{role.name}}]}
+      input_values:
+        swap_size_megabytes: {get_param: swap_size_megabytes}
+        swap_path: {get_param: swap_path}
+      actions: ["CREATE"]
+{% endfor %}
diff --git a/extraconfig/all_nodes/swap.yaml b/extraconfig/all_nodes/swap.yaml
deleted file mode 100644 (file)
index 5383ffc..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-heat_template_version: 2014-10-16
-
-description: >
-  Extra config to add swap space to nodes.
-
-# Parameters passed from the parent template - note if you maintain
-# out-of-tree templates they may require additional parameters if the
-# in-tree templates add a new role.
-parameters:
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
-    type: json
-  swap_size_megabytes:
-    type: string
-    description: Amount of swap space to allocate in megabytes
-    default: '4096'
-  swap_path:
-    type: string
-    description: Full path to location of swap file
-    default: '/swap'
-
-
-resources:
-
-  SwapConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: script
-      config: |
-        #!/bin/bash
-        set -eux
-        if [ ! -f $swap_path ]; then
-          dd if=/dev/zero of=$swap_path count=$swap_size_megabytes bs=1M
-          chmod 0600 $swap_path
-          mkswap $swap_path
-          swapon $swap_path
-        else
-          echo "$swap_path already exists"
-        fi
-        echo "$swap_path swap swap defaults 0 0" >> /etc/fstab
-      inputs:
-        - name: swap_size_megabytes
-          description: Amount of swap space to allocate in megabytes
-          default: '4096'
-        - name: swap_path
-          description: Full path to location of swap file
-          default: '/swap'
-
-  ControllerSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: controller_servers}
-      input_values:
-        swap_size_megabytes: {get_param: swap_size_megabytes}
-        swap_path: {get_param: swap_path}
-      actions: ["CREATE"]
-
-  ComputeSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: compute_servers}
-      input_values:
-        swap_size_megabytes: {get_param: swap_size_megabytes}
-        swap_path: {get_param: swap_path}
-      actions: ["CREATE"]
-
-  BlockStorageSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: blockstorage_servers}
-      input_values:
-        swap_size_megabytes: {get_param: swap_size_megabytes}
-        swap_path: {get_param: swap_path}
-      actions: ["CREATE"]
-
-  ObjectStorageSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: objectstorage_servers}
-      input_values:
-        swap_size_megabytes: {get_param: swap_size_megabytes}
-        swap_path: {get_param: swap_path}
-      actions: ["CREATE"]
-
-  CephStorageSwapDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      config: {get_resource: SwapConfig}
-      servers: {get_param: cephstorage_servers}
-      input_values:
-        swap_size_megabytes: {get_param: swap_size_megabytes}
-        swap_path: {get_param: swap_path}
-      actions: ["CREATE"]
index b76dd7c..e0d160f 100755 (executable)
@@ -5,7 +5,7 @@ set -o pipefail
 echo INFO: starting $(basename "$0")
 
 # Exit if not running
-if ! pidof ceph-mon; then
+if ! pidof ceph-mon &> /dev/null; then
     echo INFO: ceph-mon is not running, skipping
     exit 0
 fi
@@ -18,13 +18,13 @@ if ! [[ "$INSTALLED_VERSION" =~ ^0\.94.* ]]; then
 fi
 
 CEPH_STATUS=$(ceph health | awk '{print $1}')
-if [ ${CEPH_STATUS} = HEALTH_ERR ]; do
+if [ ${CEPH_STATUS} = HEALTH_ERR ]; then
     echo ERROR: Ceph cluster status is HEALTH_ERR, cannot be upgraded
     exit 1
 fi
 
 # Useful when upgrading with OSDs num < replica size
-if [ $ignore_ceph_upgrade_warnings != "true" ]; then
+if [[ ${ignore_ceph_upgrade_warnings:-False} != [Tt]rue ]]; then
     timeout 300 bash -c "while [ ${CEPH_STATUS} != HEALTH_OK ]; do
       echo WARNING: Waiting for Ceph cluster status to go HEALTH_OK;
       sleep 30;
@@ -44,7 +44,7 @@ timeout 60 bash -c "while kill -0 ${MON_PID} 2> /dev/null; do
 done"
 
 # Update to Jewel
-yum -y -q update ceph-mon
+yum -y -q update ceph-mon ceph
 
 # Restart/Exit if not on Jewel, only in that case we need the changes
 UPDATED_VERSION=$(ceph --version | awk '{print $3}')
@@ -54,7 +54,7 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
 elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
     # RPM could own some of these but we can't take risks on the pre-existing files
     for d in /var/lib/ceph/mon /var/log/ceph /var/run/ceph /etc/ceph; do
-        chown -R ceph:ceph $d
+        chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed
     done
 
     # Replay udev events with newer rules
@@ -71,6 +71,10 @@ elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
       sleep 10;
     done"
 
+    # if tunables become legacy, cluster status will be HEALTH_WARN causing
+    # upgrade to fail on following node
+    ceph osd crush tunables default
+
     echo INFO: Ceph was upgraded to Jewel
 else
     echo ERROR: Ceph was upgraded to an unknown release, daemon is stopped, need manual intervention
index 03a1c1c..56b54e2 100644 (file)
@@ -18,7 +18,7 @@ set -eu
 echo INFO: starting $(basename "$0")
 
 # Exit if not running
-if ! pidof ceph-osd; then
+if ! pidof ceph-osd &> /dev/null; then
     echo INFO: ceph-osd is not running, skipping
     exit 0
 fi
@@ -63,12 +63,22 @@ if [[ "$UPDATED_VERSION" =~ ^0\.94.* ]]; then
 elif [[ "$UPDATED_VERSION" =~ ^10\.2.* ]]; then
     # RPM could own some of these but we can't take risks on the pre-existing files
     for d in /var/lib/ceph/osd /var/log/ceph /var/run/ceph /etc/ceph; do
-        chown -R ceph:ceph $d
+        chown -L -R ceph:ceph $d || echo WARNING: chown of $d failed
     done
 
     # Replay udev events with newer rules
     udevadm trigger && udevadm settle
 
+    # If on ext4, we need to enforce lower values for name and namespace len
+    # or ceph-osd will refuse to start, see: http://tracker.ceph.com/issues/16187
+    for OSD_ID in $OSD_IDS; do
+      OSD_FS=$(findmnt -n -o FSTYPE -T /var/lib/ceph/osd/ceph-${OSD_ID})
+      if [ ${OSD_FS} = ext4 ]; then
+        crudini --set /etc/ceph/ceph.conf global osd_max_object_name_len 256
+        crudini --set /etc/ceph/ceph.conf global osd_max_object_namespace_len 64
+      fi
+    done
+
     # Enable systemd unit
     systemctl enable ceph-osd.target
     for OSD_ID in $OSD_IDS; do
diff --git a/extraconfig/tasks/major_upgrade_check.sh b/extraconfig/tasks/major_upgrade_check.sh
new file mode 100755 (executable)
index 0000000..b65f691
--- /dev/null
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+set -eu
+
+check_cluster()
+{
+    if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
+        echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
+        exit 1
+    fi
+}
+
+check_pcsd()
+{
+    if pcs status 2>&1 | grep -E 'Offline'; then
+        echo_error "ERROR: upgrade cannot start with some pcsd daemon offline"
+        exit 1
+    fi
+}
+
+check_disk_for_mysql_dump()
+{
+    # Where to backup current database if mysql need to be upgraded
+    MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
+    MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
+    # Spare disk ratio for extra safety
+    MYSQL_BACKUP_SIZE_RATIO=1.2
+
+    # Shall we upgrade mysql data directory during the stack upgrade?
+    if [ "$mariadb_do_major_upgrade" = "auto" ]; then
+        ret=$(is_mysql_upgrade_needed)
+        if [ $ret = "1" ]; then
+            DO_MYSQL_UPGRADE=1
+        else
+            DO_MYSQL_UPGRADE=0
+        fi
+        echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
+    elif [ "$mariadb_do_major_upgrade" = "no" ]; then
+        DO_MYSQL_UPGRADE=0
+    else
+        DO_MYSQL_UPGRADE=1
+    fi
+
+    if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+        if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
+
+            if [ -d "$MYSQL_BACKUP_DIR" ]; then
+                echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
+                exit 1
+            fi
+            mkdir "$MYSQL_BACKUP_DIR"
+            if [ $? -ne 0 ]; then
+                echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
+                exit 1
+            fi
+
+            # the /root/.my.cnf is needed because we set the mysql root
+            # password from liberty onwards
+            backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
+            # While not ideal, this step allows us to calculate exactly how much space the dump
+            # will need. Our main goal here is avoiding any chance of corruption due to disk space
+            # exhaustion
+            backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
+            database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
+            free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
+
+            # we need at least space for a new mysql database + dump of the existing one,
+            # times a small factor for additional safety room
+            # note: bash doesn't do floating point math or floats in if statements,
+            # so use python to apply the ratio and cast it back to integer
+            required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
+            if [ $required_space -ge $free_space ]; then
+                echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
+                exit 1
+            fi
+        fi
+    fi
+}
+
+check_python_rpm()
+{
+    # If for some reason rpm-python are missing we want to error out early enough
+    if ! rpm -q rpm-python &> /dev/null; then
+        echo_error "ERROR: upgrade cannot start without rpm-python installed"
+        exit 1
+    fi
+}
+
+check_clean_cluster()
+{
+    if pcs status | grep -q Stopped:; then
+        echo_error "ERROR: upgrade cannot start with stopped resources on the cluster. Make sure that all the resources are up and running."
+        exit 1
+    fi
+}
+
+check_galera_root_password()
+{
+    # BZ: 1357112
+    if [ ! -e /root/.my.cnf ]; then
+        echo_error "ERROR: upgrade cannot be started, the galera password is missing. The overcloud needs update."
+        exit 1
+    fi
+}
index 78628c8..a1df695 100644 (file)
@@ -12,6 +12,8 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
 ### This file is automatically delivered to the compute nodes as part of the
 ### tripleo upgrades workflow
 
+set -eu
+
 # pin nova to kilo (messaging +-1) for the nova-compute service
 
 crudini  --set /etc/nova/nova.conf upgrade_levels compute $upgrade_level_nova_compute
index 0b70263..23074fc 100755 (executable)
@@ -4,11 +4,14 @@ set -eu
 
 cluster_sync_timeout=1800
 
-if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
-    echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
-    exit 1
+check_cluster
+check_pcsd
+if [[ -n $(is_bootstrap_node) ]]; then
+    check_clean_cluster
 fi
-
+check_python_rpm
+check_galera_root_password
+check_disk_for_mysql_dump
 
 # We want to disable fencing during the cluster --stop as it might fence
 # nodes where a service fails to stop, which could be fatal during an upgrade
@@ -17,12 +20,43 @@ fi
 STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
 pcs property set stonith-enabled=false
 
-# If for some reason rpm-python are missing we want to error out early enough
-if ! rpm -q rpm-python &> /dev/null; then
-    echo_error "ERROR: upgrade cannot start without rpm-python installed"
-    exit 1
+# Migrate to HA NG and fix up rabbitmq queues
+# We fix up the rabbitmq ha queues after the migration because it will
+# restart the rabbitmq resource. Doing it after the migration means no other
+# services will be restart as there are no other constraints
+if [[ -n $(is_bootstrap_node) ]]; then
+    migrate_full_to_ng_ha
+    rabbitmq_mitaka_newton_upgrade
 fi
 
+# After migrating the cluster to HA-NG the services not under pacemaker's control
+# are still up and running. We need to stop them explicitely otherwise during the yum
+# upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
+# is going to take a long time because rabbit is down. By having the service stopped
+# systemctl try-restart is a noop
+
+for service in $(services_to_migrate); do
+    manage_systemd_service stop "${service%%-clone}"
+    # So the reason for not reusing check_resource_systemd is that
+    # I have observed systemctl is-active returning unknown with at least
+    # one service that was stopped (See LP 1627254)
+    timeout=600
+    tstart=$(date +%s)
+    tend=$(( $tstart + $timeout ))
+    check_interval=3
+    while (( $(date +%s) < $tend )); do
+      if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
+        echo "$service still active, sleeping $check_interval seconds."
+        sleep $check_interval
+      else
+        # we do not care if it is inactive, unknown or failed as long as it is
+        # not running
+        break
+      fi
+
+    done
+done
+
 # In case the mysql package is updated, the database on disk must be
 # upgraded as well. This typically needs to happen during major
 # version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
@@ -35,77 +69,20 @@ fi
 # on mysql package versionning, but this can be overriden manually
 # to support specific upgrade scenario
 
-# Where to backup current database if mysql need to be upgraded
-MYSQL_BACKUP_DIR=/var/tmp/mysql_upgrade_osp
-MYSQL_TEMP_UPGRADE_BACKUP_DIR=/var/lib/mysql-temp-upgrade-backup
-# Spare disk ratio for extra safety
-MYSQL_BACKUP_SIZE_RATIO=1.2
-
-# Shall we upgrade mysql data directory during the stack upgrade?
-if [ "$mariadb_do_major_upgrade" = "auto" ]; then
-    ret=$(is_mysql_upgrade_needed)
-    if [ $ret = "1" ]; then
-        DO_MYSQL_UPGRADE=1
-    else
-        DO_MYSQL_UPGRADE=0
-    fi
-    echo "mysql upgrade required: $DO_MYSQL_UPGRADE"
-elif [ "$mariadb_do_major_upgrade" = "no" ]; then
-    DO_MYSQL_UPGRADE=0
-else
-    DO_MYSQL_UPGRADE=1
-fi
-
-if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+if [[ -n $(is_bootstrap_node) ]]; then
     if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
-        if [ -d "$MYSQL_BACKUP_DIR" ]; then
-            echo_error "Error: $MYSQL_BACKUP_DIR exists already. Likely an upgrade failed previously"
-            exit 1
-        fi
-        mkdir "$MYSQL_BACKUP_DIR"
-        if [ $? -ne 0 ]; then
-                echo_error "Error: could not create temporary backup directory $MYSQL_BACKUP_DIR"
-                exit 1
-        fi
-
-        # the /root/.my.cnf is needed because we set the mysql root
-        # password from liberty onwards
-        backup_flags="--defaults-extra-file=/root/.my.cnf -u root --flush-privileges --all-databases --single-transaction"
-        # While not ideal, this step allows us to calculate exactly how much space the dump
-        # will need. Our main goal here is avoiding any chance of corruption due to disk space
-        # exhaustion
-        backup_size=$(mysqldump $backup_flags 2>/dev/null | wc -c)
-        database_size=$(du -cb /var/lib/mysql | tail -1 | awk '{ print $1 }')
-        free_space=$(df -B1 --output=avail "$MYSQL_BACKUP_DIR" | tail -1)
-
-        # we need at least space for a new mysql database + dump of the existing one,
-        # times a small factor for additional safety room
-        # note: bash doesn't do floating point math or floats in if statements,
-        # so use python to apply the ratio and cast it back to integer
-        required_space=$(python -c "from __future__ import print_function; print(\"%d\" % int((($database_size + $backup_size) * $MYSQL_BACKUP_SIZE_RATIO)))")
-        if [ $required_space -ge $free_space ]; then
-                echo_error "Error: not enough free space in $MYSQL_BACKUP_DIR ($required_space bytes required)"
-                exit 1
-        fi
-
         mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
         cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
     fi
 
-    pcs resource disable httpd
-    check_resource httpd stopped 1800
-    pcs resource disable openstack-core
-    check_resource openstack-core stopped 1800
     pcs resource disable redis
     check_resource redis stopped 600
-    pcs resource disable mongod
-    check_resource mongod stopped 600
     pcs resource disable rabbitmq
     check_resource rabbitmq stopped 600
-    pcs resource disable memcached
-    check_resource memcached stopped 600
     pcs resource disable galera
     check_resource galera stopped 600
+    pcs resource disable openstack-cinder-volume
+    check_resource openstack-cinder-volume stopped 600
     # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
     #   https://bugzilla.redhat.com/show_bug.cgi?id=1330688
     for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
@@ -115,7 +92,8 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
     pcs cluster stop --all
 fi
 
-# Swift isn't controled by pacemaker
+
+# Swift isn't controlled by pacemaker
 systemctl_swift stop
 
 tstart=$(date +%s)
@@ -206,3 +184,7 @@ crudini  --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
 crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
 # LP: 1615035, required only for M/N upgrade.
 crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
+# LP: 1627450, required only for M/N upgrade
+crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
+
+crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm
index bc708cc..b3a0098 100755 (executable)
@@ -3,10 +3,10 @@
 set -eu
 
 cluster_form_timeout=600
-cluster_settle_timeout=600
+cluster_settle_timeout=1800
 galera_sync_timeout=600
 
-if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+if [[ -n $(is_bootstrap_node) ]]; then
     pcs cluster start --all
 
     tstart=$(date +%s)
@@ -26,14 +26,23 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
 
     for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Stopped | awk '{ print $1 }'); do
       pcs resource enable $vip
-      check_resource $vip started 60
+      check_resource_pacemaker $vip started 60
     done
+fi
 
-    pcs resource enable galera
-    check_resource galera started 600
-    pcs resource enable mongod
-    check_resource mongod started 600
+start_or_enable_service galera
+check_resource galera started 600
+start_or_enable_service redis
+check_resource redis started 600
+# We need mongod which is now a systemd service up and running before calling
+# ceilometer-dbsync. There is still a race here: mongod might not be up on all nodes
+# so ceilometer-dbsync will fail a couple of times before that. As it retries indefinitely
+# we should be good.
+# Due to LP Bug https://bugs.launchpad.net/tripleo/+bug/1627254 am using systemctl directly atm
+systemctl start mongod
+check_resource mongod started 600
 
+if [[ -n $(is_bootstrap_node) ]]; then
     tstart=$(date +%s)
     while ! clustercheck; do
         sleep 5
@@ -54,18 +63,7 @@ if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)
     neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini upgrade head
     nova-manage db sync
     nova-manage api_db sync
-
-    pcs resource enable memcached
-    check_resource memcached started 600
-    pcs resource enable rabbitmq
-    check_resource rabbitmq started 600
-    pcs resource enable redis
-    check_resource redis started 600
-    pcs resource enable openstack-core
-    check_resource openstack-core started 1800
-    pcs resource enable httpd
-    check_resource httpd started 1800
+    nova-manage db online_data_migrations
+    gnocchi-upgrade
+    sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head
 fi
-
-# Swift isn't controled by heat
-systemctl_swift start
diff --git a/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh b/extraconfig/tasks/major_upgrade_controller_pacemaker_3.sh
new file mode 100755 (executable)
index 0000000..49c045b
--- /dev/null
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+set -eu
+
+start_or_enable_service rabbitmq
+check_resource rabbitmq started 600
+start_or_enable_service redis
+check_resource redis started 600
+start_or_enable_service openstack-cinder-volume
+check_resource openstack-cinder-volume started 600
+
+
+# Swift isn't controled by pacemaker
+systemctl_swift start
+
+# We need to start the systemd services we explicitely stopped at step _1.sh
+# FIXME: Should we let puppet during the convergence step do the service enabling or
+# should we add it here?
+services=$(services_to_migrate)
+if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
+    services=${services%%openstack-sahara*}
+fi
+for service in $(services); do
+    manage_systemd_service start "${service%%-clone}"
+    check_resource_systemd "${service%%-clone}" started 600
+done
index 931f4f4..f82457c 100644 (file)
@@ -12,6 +12,7 @@ cat > $UPGRADE_SCRIPT << ENDOFCAT
 ### This file is automatically delivered to the swift-storage nodes as part of the
 ### tripleo upgrades workflow
 
+set -eu
 
 function systemctl_swift {
     action=\$1
index 598d22d..7c78d5a 100644 (file)
@@ -1,16 +1,8 @@
-heat_template_version: 2014-10-16
+heat_template_version: 2016-10-14
 description: 'Upgrade for Pacemaker deployments'
 
 parameters:
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
+  servers:
     type: json
   input_values:
     type: json
@@ -30,6 +22,11 @@ parameters:
     type: boolean
     default: false
     description: If enabled, Ceph upgrade will be forced even though cluster or PGs status is not clean
+  KeepSaharaServicesOnUpgrade:
+    type: boolean
+    default: true
+    description: Whether to keep Sahara services when upgrading controller nodes from mitaka to newton
+
 
 resources:
   # TODO(jistr): for Mitaka->Newton upgrades and further we can use
@@ -54,9 +51,10 @@ resources:
   CephMonUpgradeDeployment:
     type: OS::Heat::SoftwareDeploymentGroup
     properties:
-      servers: {get_param: controller_servers}
+      servers: {get_param: [servers, Controller]}
       config: {get_resource: CephMonUpgradeConfig}
       input_values: {get_param: input_values}
+    update_policy:
       batch_create:
         max_batch_size: 1
       rolling_update:
@@ -82,6 +80,7 @@ resources:
               params:
                 MYSQL_MAJOR_UPGRADE: {get_param: MySqlMajorUpgrade}
           - get_file: pacemaker_common_functions.sh
+          - get_file: major_upgrade_check.sh
           - get_file: major_upgrade_pacemaker_migrations.sh
           - get_file: major_upgrade_controller_pacemaker_1.sh
 
@@ -89,7 +88,7 @@ resources:
     type: OS::Heat::SoftwareDeploymentGroup
     depends_on: CephMonUpgradeDeployment
     properties:
-      servers:  {get_param: controller_servers}
+      servers:  {get_param: [servers, Controller]}
       config: {get_resource: ControllerPacemakerUpgradeConfig_Step1}
       input_values: {get_param: input_values}
 
@@ -103,7 +102,7 @@ resources:
   BlockStorageUpgradeDeployment:
     type: OS::Heat::SoftwareDeploymentGroup
     properties:
-      servers:  {get_param: blockstorage_servers}
+      servers:  {get_param: [servers, BlockStorage]}
       config: {get_resource: BlockStorageUpgradeConfig}
       input_values: {get_param: input_values}
 
@@ -122,7 +121,32 @@ resources:
     type: OS::Heat::SoftwareDeploymentGroup
     depends_on: BlockStorageUpgradeDeployment
     properties:
-      servers:  {get_param: controller_servers}
+      servers:  {get_param: [servers, Controller]}
       config: {get_resource: ControllerPacemakerUpgradeConfig_Step2}
       input_values: {get_param: input_values}
 
+  ControllerPacemakerUpgradeConfig_Step3:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: script
+      config:
+        list_join:
+        - ''
+        - - str_replace:
+              template: |
+                #!/bin/bash
+                keep_sahara_services_on_upgrade='KEEP_SAHARA_SERVICES_ON_UPGRADE'
+              params:
+                KEEP_SAHARA_SERVICES_ON_UPGRADE: {get_param: KeepSaharaServicesOnUpgrade}
+          - get_file: pacemaker_common_functions.sh
+          - get_file: major_upgrade_pacemaker_migrations.sh
+          - get_file: major_upgrade_controller_pacemaker_3.sh
+
+  ControllerPacemakerUpgradeDeployment_Step3:
+    type: OS::Heat::SoftwareDeploymentGroup
+    depends_on: ControllerPacemakerUpgradeDeployment_Step2
+    properties:
+      servers:  {get_param: [servers, Controller]}
+      config: {get_resource: ControllerPacemakerUpgradeConfig_Step3}
+      input_values: {get_param: input_values}
+
@@ -3,15 +3,7 @@ description: 'Upgrade for Pacemaker deployments'
 
 parameters:
 
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
+  servers:
     type: json
   input_values:
     type: json
@@ -43,45 +35,12 @@ resources:
           - "if [[ -f /etc/resolv.conf.save ]] ; then rm /etc/resolv.conf.save; fi\n\n"
           - get_param: UpgradeInitCommand
 
-  UpgradeInitControllerDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: controller_servers}
-      config: {get_resource: UpgradeInitConfig}
-      input_values: {get_param: input_values}
-
-  UpgradeInitComputeDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: compute_servers}
-      config: {get_resource: UpgradeInitConfig}
-      input_values: {get_param: input_values}
-
-  UpgradeInitBlockStorageDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: blockstorage_servers}
-      config: {get_resource: UpgradeInitConfig}
-      input_values: {get_param: input_values}
-
-  UpgradeInitObjectStorageDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: objectstorage_servers}
-      config: {get_resource: UpgradeInitConfig}
-      input_values: {get_param: input_values}
-
-  UpgradeInitCephStorageDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: cephstorage_servers}
-      config: {get_resource: UpgradeInitConfig}
-      input_values: {get_param: input_values}
-
   # TODO(jistr): for Mitaka->Newton upgrades and further we can use
   # map_merge with input_values instead of feeding params into scripts
   # via str_replace on bash snippets
 
+  # FIXME(shardy) we have hard-coded per-role *ScriptConfig's here
+  # Would be better to have a common config for all roles
   ComputeDeliverUpgradeScriptConfig:
     type: OS::Heat::SoftwareConfig
     properties:
@@ -97,35 +56,32 @@ resources:
                 UPGRADE_LEVEL_NOVA_COMPUTE: {get_param: UpgradeLevelNovaCompute}
           - get_file: major_upgrade_compute.sh
 
-  ComputeDeliverUpgradeScriptDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: compute_servers}
-      config: {get_resource: ComputeDeliverUpgradeScriptConfig}
-      input_values: {get_param: input_values}
-
   ObjectStorageDeliverUpgradeScriptConfig:
     type: OS::Heat::SoftwareConfig
     properties:
       group: script
       config: {get_file: major_upgrade_object_storage.sh}
 
-  ObjectStorageDeliverUpgradeScriptDeployment:
-    type: OS::Heat::SoftwareDeploymentGroup
-    properties:
-      servers:  {get_param: objectstorage_servers}
-      config: {get_resource: ObjectStorageDeliverUpgradeScriptConfig}
-      input_values: {get_param: input_values}
-
   CephStorageDeliverUpgradeScriptConfig:
     type: OS::Heat::SoftwareConfig
     properties:
       group: script
       config: {get_file: major_upgrade_ceph_storage.sh}
 
-  CephStorageDeliverUpgradeScriptDeployment:
+{% for role in roles %}
+  UpgradeInit{{role.name}}Deployment:
+    type: OS::Heat::SoftwareDeploymentGroup
+    properties:
+      servers:  {get_param: [servers, {{role.name}}]}
+      config: {get_resource: UpgradeInitConfig}
+      input_values: {get_param: input_values}
+
+  {% if not role.name in ['Controller', 'BlockStorage'] %}
+  {{role.name}}DeliverUpgradeScriptDeployment:
     type: OS::Heat::SoftwareDeploymentGroup
     properties:
-      servers:  {get_param: cephstorage_servers}
-      config: {get_resource: CephStorageDeliverUpgradeScriptConfig}
+      servers:  {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}DeliverUpgradeScriptConfig}
       input_values: {get_param: input_values}
+  {% endif %}
+{% endfor %}
index 7ed7012..df87c93 100644 (file)
@@ -56,3 +56,135 @@ function is_mysql_upgrade_needed {
     fi
     echo "1"
 }
+
+# This function returns the list of services to be migrated away from pacemaker
+# and to systemd. The reason to have these services in a separate function is because
+# this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
+# and in the function to migrate the cluster from full HA to HA NG
+function services_to_migrate {
+    # The following PCMK resources the ones the we are going to delete
+    PCMK_RESOURCE_TODELETE="
+    httpd-clone
+    memcached-clone
+    mongod-clone
+    neutron-dhcp-agent-clone
+    neutron-l3-agent-clone
+    neutron-metadata-agent-clone
+    neutron-netns-cleanup-clone
+    neutron-openvswitch-agent-clone
+    neutron-ovs-cleanup-clone
+    neutron-server-clone
+    openstack-aodh-evaluator-clone
+    openstack-aodh-listener-clone
+    openstack-aodh-notifier-clone
+    openstack-ceilometer-api-clone
+    openstack-ceilometer-central-clone
+    openstack-ceilometer-collector-clone
+    openstack-ceilometer-notification-clone
+    openstack-cinder-api-clone
+    openstack-cinder-scheduler-clone
+    openstack-glance-api-clone
+    openstack-glance-registry-clone
+    openstack-gnocchi-metricd-clone
+    openstack-gnocchi-statsd-clone
+    openstack-heat-api-cfn-clone
+    openstack-heat-api-clone
+    openstack-heat-api-cloudwatch-clone
+    openstack-heat-engine-clone
+    openstack-nova-api-clone
+    openstack-nova-conductor-clone
+    openstack-nova-consoleauth-clone
+    openstack-nova-novncproxy-clone
+    openstack-nova-scheduler-clone
+    openstack-sahara-api-clone
+    openstack-sahara-engine-clone
+    "
+    echo $PCMK_RESOURCE_TODELETE
+}
+
+# This function will migrate a mitaka system where all the resources are managed
+# via pacemaker to a newton setup where only a few services will be managed by pacemaker
+# On a high-level it will operate as follows:
+# 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
+#    during the conversion
+# 2. Remove all the colocation constraints and then the ordering constraints, except the
+#    ones related to haproxy/VIPs which exist in Newton as well
+# 3. Take the cluster out of maintenance-mode
+# 4. Remove all the resources that won't be managed by pacemaker in newton. The
+#    outcome will be
+#    that they are stopped and removed from pacemakers control
+# 5. Do a resource cleanup to make sure the cluster is in a clean state
+function migrate_full_to_ng_ha {
+    if [[ -n $(pcmk_running) ]]; then
+        pcs property set maintenance-mode=true
+
+        # First we go through all the colocation constraints (except the ones
+        # we want to keep, i.e. the haproxy/ip ones) and we remove those
+        COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
+        for constraint in $COL_CONSTRAINTS; do
+            log_debug "Deleting colocation constraint $constraint from CIB"
+            pcs constraint remove "$constraint"
+        done
+
+        # Now we kill all the ordering constraints (except the haproxy/ip ones)
+        ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:"  | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
+        for constraint in $ORD_CONSTRAINTS; do
+            log_debug "Deleting ordering constraint $constraint from CIB"
+            pcs constraint remove "$constraint"
+        done
+        # At this stage all the pacemaker resources are removed from the CIB.
+        # Once we remove the maintenance-mode those systemd resources will keep
+        # on running. They shall be systemd enabled via the puppet converge
+        # step later on
+        pcs property set maintenance-mode=false
+
+        # At this stage there are no constraints whatsoever except the haproxy/ip ones
+        # which we want to keep. We now disable and then delete each resource
+        # that will move to systemd.
+        # We want the systemd resources be stopped before doing "yum update",
+        # that way "systemctl try-restart <service>" is no-op because the
+        # service was down already 
+        PCS_STATUS_OUTPUT="$(pcs status)"
+        for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
+             if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
+                 log_debug "Deleting $resource from the CIB"
+                 if ! pcs resource disable "$resource" --wait=600; then
+                     echo_error "ERROR: resource $resource failed to be disabled"
+                     exit 1
+                 fi
+                 pcs resource delete --force "$resource"
+             else
+                 log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
+             fi
+        done
+
+        # We need to do a pcs resource cleanup here + crm_resource --wait to
+        # make sure the cluster is in a clean state before we stop everything,
+        # upgrade and restart everything
+        pcs resource cleanup
+        # We are making sure here that the cluster is stable before proceeding
+        if ! timeout -k 10 600 crm_resource --wait; then
+            echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
+            exit 1
+        fi
+    fi
+}
+
+# This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
+# In mitaka we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
+# In newton we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
+# The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
+# Note that changing an attribute like this makes the rabbitmq resource restart
+function rabbitmq_mitaka_newton_upgrade {
+    if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
+        # Number of controller is obtained by counting how many hostnames we
+        # have in controller_node_names hiera key
+        nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
+        nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
+        if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
+            echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
+            exit 1
+        fi
+        pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600
+    fi
+}
index 9414ac1..b9a87d3 100644 (file)
@@ -4,15 +4,7 @@ description: >
   Software-config for performing aodh data migration
 
 parameters:
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
+  servers:
     type: json
   input_values:
     type: json
@@ -28,6 +20,6 @@ resources:
   AodhMysqlMigrationScriptDeployment:
     type: OS::Heat::SoftwareDeploymentGroup
     properties:
-      servers:  {get_param: controller_servers}
+      servers:  {get_param: [servers, Controller]}
       config: {get_resource: AodhMysqlMigrationScriptConfig}
       input_values: {get_param: input_values}
index 7d794c9..4f17b69 100755 (executable)
 
 set -eu
 
-function check_resource {
+DEBUG="true" # set false if the verbosity is a problem
+SCRIPT_NAME=$(basename $0)
+function log_debug {
+  if [[ $DEBUG = "true" ]]; then
+    echo "`date` $SCRIPT_NAME tripleo-upgrade $(facter hostname) $1"
+  fi
+}
+
+function is_bootstrap_node {
+  if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
+    log_debug "Node is bootstrap"
+    echo "true"
+  fi
+}
 
+function check_resource_pacemaker {
   if [ "$#" -ne 3 ]; then
-      echo_error "ERROR: check_resource function expects 3 parameters, $# given"
-      exit 1
+    echo_error "ERROR: check_resource function expects 3 parameters, $# given"
+    exit 1
   fi
 
-  service=$1
-  state=$2
-  timeout=$3
+  local service=$1
+  local state=$2
+  local timeout=$3
+
+  if [[ -z $(is_bootstrap_node) ]] ; then
+    log_debug "Node isn't bootstrap, skipping check for $service to be $state here "
+    return
+  else
+    log_debug "Node is bootstrap checking $service to be $state here"
+  fi
 
   if [ "$state" = "stopped" ]; then
-      match_for_incomplete='Started'
+    match_for_incomplete='Started'
   else # started
-      match_for_incomplete='Stopped'
+    match_for_incomplete='Stopped'
   fi
 
   nodes_local=$(pcs status  | grep ^Online | sed 's/.*\[ \(.*\) \]/\1/g' | sed 's/ /\|/g')
   if timeout -k 10 $timeout crm_resource --wait; then
-      node_states=$(pcs status --full | grep "$service" | grep -v Clone | { egrep "$nodes_local" || true; } )
-      if echo "$node_states" | grep -q "$match_for_incomplete"; then
-          echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting."
-          exit 1
-      else
-        echo "$service has $state"
-      fi
-  else
-      echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting."
+    node_states=$(pcs status --full | grep "$service" | grep -v Clone | { egrep "$nodes_local" || true; } )
+    if echo "$node_states" | grep -q "$match_for_incomplete"; then
+      echo_error "ERROR: cluster finished transition but $service was not in $state state, exiting."
       exit 1
+    else
+      echo "$service has $state"
+    fi
+  else
+    echo_error "ERROR: cluster remained unstable for more than $timeout seconds, exiting."
+    exit 1
+  fi
+
+}
+
+function pcmk_running {
+  if [[ $(systemctl is-active pacemaker) = "active" ]] ; then
+    echo "true"
+  fi
+}
+
+function is_systemd_unknown {
+  local service=$1
+  if [[ $(systemctl is-active "$service") = "unknown" ]]; then
+    log_debug "$service found to be unkown to systemd"
+    echo "true"
+  fi
+}
+
+function grep_is_cluster_controlled {
+  local service=$1
+  if [[ -n $(systemctl status $service -l | grep Drop-In -A 5 | grep pacemaker) ||
+      -n $(systemctl status $service -l | grep "Cluster Controlled $service") ]] ; then
+    log_debug "$service is pcmk managed from systemctl grep"
+    echo "true"
+  fi
+}
+
+
+function is_systemd_managed {
+  local service=$1
+  #if we have pcmk check to see if it is managed there
+  if [[ -n $(pcmk_running) ]]; then
+    if [[ -z $(pcs status --full | grep $service)  && -z $(is_systemd_unknown $service) ]] ; then
+      log_debug "$service found to be systemd managed from pcs status"
+      echo "true"
+    fi
+  else
+    # if it is "unknown" to systemd, then it is pacemaker managed
+    if [[  -n $(is_systemd_unknown $service) ]] ; then
+      return
+    elif [[ -z $(grep_is_cluster_controlled $service) ]] ; then
+      echo "true"
+    fi
+  fi
+}
+
+function is_pacemaker_managed {
+  local service=$1
+  #if we have pcmk check to see if it is managed there
+  if [[ -n $(pcmk_running) ]]; then
+    if [[ -n $(pcs status --full | grep $service) ]]; then
+      log_debug "$service found to be pcmk managed from pcs status"
+      echo "true"
+    fi
+  else
+    # if it is unknown to systemd, then it is pcmk managed
+    if [[ -n $(is_systemd_unknown $service) ]]; then
+      echo "true"
+    elif [[ -n $(grep_is_cluster_controlled $service) ]] ; then
+      echo "true"
+    fi
+  fi
+}
+
+function is_managed {
+  local service=$1
+  if [[ -n $(is_pacemaker_managed $service) || -n $(is_systemd_managed $service) ]]; then
+    echo "true"
+  fi
+}
+
+function check_resource_systemd {
+
+  if [ "$#" -ne 3 ]; then
+    echo_error "ERROR: check_resource function expects 3 parameters, $# given"
+    exit 1
   fi
 
+  local service=$1
+  local state=$2
+  local timeout=$3
+  local check_interval=3
+
+  if [ "$state" = "stopped" ]; then
+    match_for_incomplete='active'
+  else # started
+    match_for_incomplete='inactive'
+  fi
+
+  log_debug "Going to check_resource_systemd for $service to be $state"
+
+  #sanity check is systemd managed:
+  if [[ -z $(is_systemd_managed $service) ]]; then
+    echo "ERROR - $service not found to be systemd managed."
+    exit 1
+  fi
+
+  tstart=$(date +%s)
+  tend=$(( $tstart + $timeout ))
+  while (( $(date +%s) < $tend )); do
+    if [[ "$(systemctl is-active $service)" = $match_for_incomplete ]]; then
+      echo "$service not yet $state, sleeping $check_interval seconds."
+      sleep $check_interval
+    else
+      echo "$service is $state"
+      return
+    fi
+  done
+
+  echo "Timed out waiting for $service to go to $state after $timeout seconds"
+  exit 1
+}
+
+
+function check_resource {
+  local service=$1
+  local pcmk_managed=$(is_pacemaker_managed $service)
+  local systemd_managed=$(is_systemd_managed $service)
+
+  if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+    log_debug "ERROR $service managed by both systemd and pcmk - SKIPPING"
+    return
+  fi
+
+  if [[ -n $pcmk_managed ]]; then
+    check_resource_pacemaker $@
+    return
+  elif [[ -n $systemd_managed ]]; then
+    check_resource_systemd $@
+    return
+  fi
+  log_debug "ERROR cannot check_resource for $service, not managed here?"
+}
+
+function manage_systemd_service {
+  local action=$1
+  local service=$2
+  log_debug "Going to systemctl $action $service"
+  systemctl $action $service
+}
+
+function manage_pacemaker_service {
+  local action=$1
+  local service=$2
+  # not if pacemaker isn't running!
+  if [[ -z $(pcmk_running) ]]; then
+    echo "$(facter hostname) pacemaker not active, skipping $action $service here"
+  elif [[ -n $(is_bootstrap_node) ]]; then
+    log_debug "Going to pcs resource $action $service"
+    pcs resource $action $service
+  fi
+}
+
+function stop_or_disable_service {
+  local service=$1
+  local pcmk_managed=$(is_pacemaker_managed $service)
+  local systemd_managed=$(is_systemd_managed $service)
+
+  if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+    log_debug "Skipping stop_or_disable $service due to management conflict"
+    return
+  fi
+
+  log_debug "Stopping or disabling $service"
+  if [[ -n $pcmk_managed ]]; then
+    manage_pacemaker_service disable $service
+    return
+  elif [[ -n $systemd_managed ]]; then
+    manage_systemd_service stop $service
+    return
+  fi
+  log_debug "ERROR: $service not managed here?"
+}
+
+function start_or_enable_service {
+  local service=$1
+  local pcmk_managed=$(is_pacemaker_managed $service)
+  local systemd_managed=$(is_systemd_managed $service)
+
+  if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+    log_debug "Skipping start_or_enable $service due to management conflict"
+    return
+  fi
+
+  log_debug "Starting or enabling $service"
+  if [[ -n $pcmk_managed ]]; then
+    manage_pacemaker_service enable $service
+    return
+  elif [[ -n $systemd_managed ]]; then
+    manage_systemd_service start $service
+    return
+  fi
+  log_debug "ERROR $service not managed here?"
+}
+
+function restart_service {
+  local service=$1
+  local pcmk_managed=$(is_pacemaker_managed $service)
+  local systemd_managed=$(is_systemd_managed $service)
+
+  if [[ -n $pcmk_managed && -n $systemd_managed ]] ; then
+    log_debug "ERROR $service managed by both systemd and pcmk - SKIPPING"
+    return
+  fi
+
+  log_debug "Restarting $service"
+  if [[ -n $pcmk_managed ]]; then
+    manage_pacemaker_service restart $service
+    return
+  elif [[ -n $systemd_managed ]]; then
+    manage_systemd_service restart $service
+    return
+  fi
+  log_debug "ERROR $service not managed here?"
 }
 
 function echo_error {
     echo "$@" | tee /dev/fd2
 }
 
+# swift is a special case because it is/was never handled by pacemaker
+# when stand-alone swift is used, only swift-proxy is running on controllers
 function systemctl_swift {
     services=( openstack-swift-account-auditor openstack-swift-account-reaper openstack-swift-account-replicator openstack-swift-account \
                openstack-swift-container-auditor openstack-swift-container-replicator openstack-swift-container-updater openstack-swift-container \
                openstack-swift-object-auditor openstack-swift-object-replicator openstack-swift-object-updater openstack-swift-object openstack-swift-proxy )
-    action=$1
+    local action=$1
     case $action in
         stop)
-            services=$(systemctl | grep swift | grep running | awk '{print $1}')
+            services=$(systemctl | grep openstack-swift- | grep running | awk '{print $1}')
             ;;
         start)
             enable_swift_storage=$(hiera -c /etc/puppet/hiera.yaml 'enable_swift_storage')
@@ -54,9 +289,11 @@ function systemctl_swift {
                 services=( openstack-swift-proxy )
             fi
             ;;
-        *)  services=() ;;  # for safetly, should never happen
+        *)  echo "Unknown action $action passed to systemctl_swift"
+            exit 1
+            ;; # shouldn't ever happen...
     esac
-    for S in ${services[@]}; do
-        systemctl $action $S
+    for service in ${services[@]}; do
+        manage_systemd_service $action $service
     done
 }
index fd1fd0d..3da7efe 100755 (executable)
@@ -2,12 +2,9 @@
 
 set -eux
 
-pacemaker_status=$(systemctl is-active pacemaker)
-
 # Run if pacemaker is running, we're the bootstrap node,
 # and we're updating the deployment (not creating).
-if [ "$pacemaker_status" = "active" -a \
-     "$(hiera bootstrap_nodeid)" = "$(facter hostname)" ]; then
+if [[ -n $(pcmk_running) && -n $(is_bootstrap_node) ]]; then
 
     TIMEOUT=600
     SERVICES_TO_RESTART="$(ls /var/lib/tripleo/pacemaker-restarts)"
@@ -25,5 +22,4 @@ if [ "$pacemaker_status" = "active" -a \
         pcs resource restart --wait=$TIMEOUT $service
         rm -f /var/lib/tripleo/pacemaker-restarts/$service
     done
-
 fi
diff --git a/j2_excludes.yaml b/j2_excludes.yaml
new file mode 100644 (file)
index 0000000..063e63d
--- /dev/null
@@ -0,0 +1,10 @@
+# This template specifies which j2 rendered templates
+# should be excluded in the render process from
+# tripleo-common/tripleo_common/actions/templates.py
+
+name:
+  - puppet/controller-role.yaml
+  - puppet/compute-role.yaml
+  - puppet/blockstorage-role.yaml
+  - puppet/objectstorage-role.yaml
+  - puppet/cephstorage-role.yaml
index 3b24da7..4dfbc77 100644 (file)
@@ -37,6 +37,10 @@ parameters:
     default: [{'start': '10.0.0.4', 'end': '10.0.0.250'}]
     description: Ip allocation pool range for the external network.
     type: json
+  ExternalInterfaceDefaultRoute:
+    default: '10.0.0.1'
+    description: default route for the external network
+    type: string
 
 resources:
   ExternalNetwork:
@@ -55,6 +59,7 @@ resources:
       name: {get_param: ExternalSubnetName}
       network: {get_resource: ExternalNetwork}
       allocation_pools: {get_param: ExternalAllocationPools}
+      gateway_ip: {get_param: ExternalInterfaceDefaultRoute}
 
 outputs:
   OS::stack_id:
index 3e120f2..e0736ab 100644 (file)
@@ -42,6 +42,10 @@ parameters:
     default: dhcpv6-stateful
     description: Neutron subnet IPv6 router advertisement mode
     type: string
+  ExternalInterfaceDefaultRoute:
+    default: '2001:db8:fd00:1000::1'
+    description: default route for the external network
+    type: string
 
 resources:
   ExternalNetwork:
@@ -62,6 +66,7 @@ resources:
       name: {get_param: ExternalSubnetName}
       network: {get_resource: ExternalNetwork}
       allocation_pools: {get_param: ExternalAllocationPools}
+      gateway_ip: {get_param: ExternalInterfaceDefaultRoute}
 
 outputs:
   OS::stack_id:
index 6f8aa3a..090e38f 100644 (file)
@@ -55,6 +55,7 @@ resources:
       name: {get_param: InternalApiSubnetName}
       network: {get_resource: InternalApiNetwork}
       allocation_pools: {get_param: InternalApiAllocationPools}
+      gateway_ip: null
 
 outputs:
   OS::stack_id:
index 68c14fb..19d64b0 100644 (file)
@@ -62,6 +62,7 @@ resources:
       name: {get_param: InternalApiSubnetName}
       network: {get_resource: InternalApiNetwork}
       allocation_pools: {get_param: InternalApiAllocationPools}
+      gateway_ip: null
 
 outputs:
   OS::stack_id:
index 6878bac..6798e11 100644 (file)
@@ -13,7 +13,7 @@ parameters:
   ManagementNetValueSpecs:
     default: {'provider:physical_network': 'management', 'provider:network_type': 'flat'}
     description: Value specs for the management network.
-    type: json 
+    type: json
   ManagementNetAdminStateUp:
     default: false
     description: The admin state of the network.
@@ -38,6 +38,10 @@ parameters:
     default: [{'start': '10.0.1.4', 'end': '10.0.1.250'}]
     description: Ip allocation pool range for the management network.
     type: json
+  ManagementInterfaceDefaultRoute:
+    default: null
+    description: The default route of the management network.
+    type: string
 
 resources:
   ManagementNetwork:
@@ -56,6 +60,7 @@ resources:
       name: {get_param: ManagementSubnetName}
       network: {get_resource: ManagementNetwork}
       allocation_pools: {get_param: ManagementAllocationPools}
+      gateway_ip: {get_param: ManagementInterfaceDefaultRoute}
 
 outputs:
   OS::stack_id:
index baa544e..e541049 100644 (file)
@@ -49,4 +49,4 @@ outputs:
       - ''
       - - {get_param: [IPPool, {get_param: ExternalNetName}, {get_param: NodeIndex}]}
         - '/'
-        - {str_split: ['/', {get_attr: [ExternalPort, subnets, 0, cidr]}, 1]}
+        - {str_split: ['/', {get_param: ExternalNetCidr}, 1]}
index 3d61910..782b6b0 100644 (file)
@@ -24,6 +24,12 @@ parameters:
     description: The name of the undercloud Neutron control plane
     default: ctlplane
     type: string
+  FixedIPs: # Here for compatibility with ctlplane_vip.yaml
+    description: >
+        Control the IP allocation for the VIP port. E.g.
+        [{'ip_address':'1.2.3.4'}]
+    default: []
+    type: json
   ServiceVips:
     default: {}
     type: json
index 2dd0a0e..80060b5 100644 (file)
@@ -24,6 +24,12 @@ parameters:
     description: The name of the undercloud Neutron control plane
     default: ctlplane
     type: string
+  FixedIPs: # Here for compatibility with ctlplane_vip.yaml
+    description: >
+        Control the IP allocation for the VIP port. E.g.
+        [{'ip_address':'1.2.3.4'}]
+    default: []
+    type: json
   ServiceVips:
     default: {}
     type: json
index 8d0a91b..afb144b 100644 (file)
@@ -49,4 +49,4 @@ outputs:
       - ''
       - - {get_param: [IPPool, {get_param: InternalApiNetName}, {get_param: NodeIndex}]}
         - '/'
-        - {str_split: ['/', {get_attr: [InternalApiPort, subnets, 0, cidr]}, 1]}
+        - {str_split: ['/', {get_param: InternalApiNetCidr}, 1]}
index d9ac604..4c1cc21 100644 (file)
@@ -49,4 +49,4 @@ outputs:
       - ''
       - - {get_param: [IPPool, {get_param: ManagementNetName}, {get_param: NodeIndex}]}
         - '/'
-        - {str_split: ['/', {get_attr: [ManagementPort, subnets, 0, cidr]}, 1]}
+        - {str_split: ['/', {get_param: ManagementNetCidr}, 1]}
index 07e2de4..d7863e0 100644 (file)
@@ -31,6 +31,32 @@ parameters:
   ServiceHostnameList:
     default: []
     type: comma_delimited_list
+  NetworkHostnameMap:
+    default: []
+    type: json
+
+resources:
+   # This adds the extra "services" on for keystone
+   # so that keystone_admin_api_network and
+   # keystone_public_api_network point to the correct
+   # network on the nodes running the "keystone" service
+  EnabledServicesValue:
+    type: OS::Heat::Value
+    properties:
+      type: comma_delimited_list
+      value:
+        yaql:
+          expression: let(root => $) -> $.data.extra_services.items().where($[0] in $root.data.enabled_services).select($[1]).flatten() + $root.data.enabled_services
+          data:
+            enabled_services: {get_param: EnabledServices}
+            extra_services:
+              # If anything other than keystone needs this
+              # then we should add an extra_networks interface
+              # to the service templates role_data but for
+              # now we hard-code the keystone special case
+              keystone:
+                - keystone_admin_api
+                - keystone_public_api
 
 outputs:
   net_ip_map:
@@ -64,7 +90,7 @@ outputs:
                         template:
                           SERVICE_node_ips: SERVICE_network
                         for_each:
-                          SERVICE: {get_param: EnabledServices}
+                          SERVICE: {get_attr: [EnabledServicesValue, value]}
                   - values: {get_param: ServiceNetMap}
               - values:
                   ctlplane: {get_param: ControlPlaneIpList}
@@ -77,6 +103,28 @@ outputs:
   service_hostnames:
     description: >
       Map of enabled services to a list of hostnames where they're running
+    value:
+      map_replace:
+        - yaql:
+            # This filters any entries where the value hasn't been substituted for
+            # a list, e.g it's still $service_network.  This happens when there is
+            # no network defined for the service in the ServiceNetMap, which is OK
+            # as not all services have to be bound to a network, so we filter them
+            expression: dict($.data.map.items().where(not $[1].endsWith("_network")))
+            data:
+              map:
+                map_replace:
+                  - map_merge:
+                      repeat:
+                        template:
+                          SERVICE_node_names: SERVICE_network
+                        for_each:
+                          SERVICE: {get_attr: [EnabledServicesValue, value]}
+                  - values: {get_param: ServiceNetMap}
+        - values: {get_param: NetworkHostnameMap}
+  short_service_hostnames:
+    description: >
+      Map of enabled services to a list of hostnames where they're running regardless of the network
     value:
       yaql:
         # If ServiceHostnameList is empty the role is deployed with zero nodes
@@ -87,6 +135,6 @@ outputs:
             map_merge:
               repeat:
                 template:
-                  SERVICE_node_names: {get_param: ServiceHostnameList}
+                  SERVICE_short_node_names: {get_param: ServiceHostnameList}
                 for_each:
-                    SERVICE: {get_param: EnabledServices}
+                    SERVICE: {get_attr: [EnabledServicesValue, value]}
index 328f838..18faf1b 100644 (file)
@@ -49,4 +49,4 @@ outputs:
       - ''
       - - {get_param: [IPPool, {get_param: StorageNetName}, {get_param: NodeIndex}]}
         - '/'
-        - {str_split: ['/', {get_attr: [StoragePort, subnets, 0, cidr]}, 1]}
+        - {str_split: ['/', {get_param: StorageNetCidr}, 1]}
index 50470c9..e1145a3 100644 (file)
@@ -49,4 +49,4 @@ outputs:
       - ''
       - - {get_param: [IPPool, {get_param: StorageMgmtNetName}, {get_param: NodeIndex}]}
         - '/'
-        - {str_split: ['/', {get_attr: [StorageMgmtPort, subnets, 0, cidr]}, 1]}
+        - {str_split: ['/', {get_param: StorageMgmtNetCidr}, 1]}
index bbe6f73..d4f0d29 100644 (file)
@@ -48,4 +48,4 @@ outputs:
       - ''
       - - {get_param: [IPPool, {get_param: TenantNetName}, {get_param: NodeIndex}]}
         - '/'
-        - {str_split: ['/', {get_attr: [TenantPort, subnets, 0, cidr]}, 1]}
+        - {str_split: ['/', {get_param: TenantNetCidr}, 1]}
similarity index 79%
rename from network/service_net_map.yaml
rename to network/service_net_map.j2.yaml
index 6e5c244..c4d86fb 100644 (file)
@@ -8,9 +8,17 @@ parameters:
     description: Mapping of service_name -> network name. Typically set
                  via parameter_defaults in the resource registry.  This
                  mapping overrides those in ServiceNetMapDefaults.
+                 Note that the key in this map must match the service_name
+                 in the service template, e.g if the service_name is heat_api
+                 the key must be either heat_api_network, or optionally
+                 HeatApiNetwork (which will be internally converted to
+                 transform captalization to underscores).
     default: {}
     type: json
 
+  # Note that the key in this map must match the service_name
+  # see the description above about conversion from CamelCase to
+  # snake_case - the names must still match when converted
   ServiceNetMapDefaults:
     default:
       ApacheNetwork: internal_api
@@ -46,13 +54,14 @@ parameters:
       CephClusterNetwork: storage_mgmt
       CephMonNetwork: storage
       CephRgwNetwork: storage
-      ControllerHostnameResolveNetwork: internal_api
-      ComputeHostnameResolveNetwork: internal_api
-      BlockStorageHostnameResolveNetwork: internal_api
-      ObjectStorageHostnameResolveNetwork: internal_api
-      CephStorageHostnameResolveNetwork: storage
       PublicNetwork: external
-      OpenDaylightApiNetwork: internal_api
+      OpendaylightApiNetwork: internal_api
+      # We special-case the default ResolveNetwork for the CephStorage role
+      # for backwards compatibility, all other roles default to internal_api
+      CephStorageHostnameResolveNetwork: storage
+{% for role in roles if role.name != 'CephStorage' %}
+      {{role.name}}HostnameResolveNetwork: internal_api
+{% endfor %}
     description: Mapping of service_name -> network name. Typically set
                  via parameter_defaults in the resource registry.
     type: json
index dc9f35e..35dae17 100644 (file)
@@ -55,6 +55,7 @@ resources:
       name: {get_param: StorageSubnetName}
       network: {get_resource: StorageNetwork}
       allocation_pools: {get_param: StorageAllocationPools}
+      gateway_ip: null
 
 outputs:
   OS::stack_id:
index 59933c8..03cfd13 100644 (file)
@@ -55,6 +55,7 @@ resources:
       name: {get_param: StorageMgmtSubnetName}
       network: {get_resource: StorageMgmtNetwork}
       allocation_pools: {get_param: StorageMgmtAllocationPools}
+      gateway_ip: null
 
 outputs:
   OS::stack_id:
index f05644e..39c456d 100644 (file)
@@ -62,6 +62,7 @@ resources:
       name: {get_param: StorageMgmtSubnetName}
       network: {get_resource: StorageMgmtNetwork}
       allocation_pools: {get_param: StorageMgmtAllocationPools}
+      gateway_ip: null
 
 outputs:
   OS::stack_id:
index 36a6fae..5c8af9e 100644 (file)
@@ -62,6 +62,7 @@ resources:
       name: {get_param: StorageSubnetName}
       network: {get_resource: StorageNetwork}
       allocation_pools: {get_param: StorageAllocationPools}
+      gateway_ip: null
 
 outputs:
   OS::stack_id:
index 6fe9612..1045b81 100644 (file)
@@ -55,6 +55,7 @@ resources:
       name: {get_param: TenantSubnetName}
       network: {get_resource: TenantNetwork}
       allocation_pools: {get_param: TenantAllocationPools}
+      gateway_ip: null
 
 outputs:
   OS::stack_id:
index b653eaf..bf758a5 100644 (file)
@@ -62,6 +62,7 @@ resources:
       name: {get_param: TenantSubnetName}
       network: {get_resource: TenantNetwork}
       allocation_pools: {get_param: TenantAllocationPools}
+      gateway_ip: null
 
 outputs:
   OS::stack_id:
similarity index 70%
rename from overcloud-resource-registry-puppet.yaml
rename to overcloud-resource-registry-puppet.j2.yaml
index d5fdaa0..d4a5c6b 100644 (file)
@@ -1,39 +1,34 @@
 resource_registry:
-  OS::TripleO::BlockStorage: puppet/cinder-storage.yaml
-  OS::TripleO::BlockStorage::Net::SoftwareConfig: net-config-noop.yaml
-  OS::TripleO::Compute: puppet/compute.yaml
-  OS::TripleO::Compute::Net::SoftwareConfig: net-config-noop.yaml
+
   OS::TripleO::SoftwareDeployment: OS::Heat::StructuredDeployment
-  OS::TripleO::Controller: puppet/controller.yaml
-  OS::TripleO::Controller::Net::SoftwareConfig: net-config-bridge.yaml
-  OS::TripleO::ObjectStorage: puppet/swift-storage.yaml
-  OS::TripleO::ObjectStorage::Net::SoftwareConfig: net-config-noop.yaml
-  OS::TripleO::CephStorage: puppet/ceph-storage.yaml
-  OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml
-  # set to controller-config-pacemaker.yaml to enable pacemaker
-  OS::TripleO::ControllerConfig: puppet/controller-config.yaml
   OS::TripleO::PostDeploySteps: puppet/post.yaml
-  OS::TripleO::ComputeConfig: puppet/compute-config.yaml
-  OS::TripleO::BlockStorageConfig: puppet/blockstorage-config.yaml
-  OS::TripleO::ObjectStorageConfig: puppet/objectstorage-config.yaml
-  OS::TripleO::CephStorageConfig: puppet/cephstorage-config.yaml
   OS::TripleO::AllNodes::SoftwareConfig: puppet/all-nodes-config.yaml
   OS::TripleO::DefaultPasswords: default_passwords.yaml
 
   # Tasks (for internal TripleO usage)
   OS::TripleO::Tasks::UpdateWorkflow: OS::Heat::None
   OS::TripleO::Tasks::PackageUpdate: extraconfig/tasks/yum_update.yaml
-  OS::TripleO::Tasks::ControllerPreConfig: OS::Heat::None
-  OS::TripleO::Tasks::ControllerPostConfig: OS::Heat::None
-  OS::TripleO::Tasks::ComputePreConfig: OS::Heat::None
-  OS::TripleO::Tasks::ComputePostConfig: OS::Heat::None
-  OS::TripleO::Tasks::BlockStoragePreConfig: OS::Heat::None
-  OS::TripleO::Tasks::BlockStoragePostConfig: OS::Heat::None
-  OS::TripleO::Tasks::ObjectStoragePreConfig: OS::Heat::None
-  OS::TripleO::Tasks::ObjectStoragePostConfig: OS::Heat::None
-  OS::TripleO::Tasks::CephStoragePreConfig: OS::Heat::None
-  OS::TripleO::Tasks::CephStoragePostConfig: OS::Heat::None
 
+{% for role in roles %}
+  OS::TripleO::{{role.name}}: puppet/{{role.name.lower()}}-role.yaml
+  OS::TripleO::{{role.name}}Config: puppet/{{role.name.lower()}}-config.yaml
+  OS::TripleO::Tasks::{{role.name}}PreConfig: OS::Heat::None
+  OS::TripleO::Tasks::{{role.name}}PostConfig: OS::Heat::None
+  OS::TripleO::{{role.name}}ExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
+  # Port assignments for the {{role.name}} role
+  OS::TripleO::{{role.name}}::Ports::ExternalPort: network/ports/noop.yaml
+  OS::TripleO::{{role.name}}::Ports::InternalApiPort: network/ports/noop.yaml
+  OS::TripleO::{{role.name}}::Ports::StoragePort: network/ports/noop.yaml
+  OS::TripleO::{{role.name}}::Ports::StorageMgmtPort: network/ports/noop.yaml
+  OS::TripleO::{{role.name}}::Ports::TenantPort: network/ports/noop.yaml
+  OS::TripleO::{{role.name}}::Ports::ManagementPort: network/ports/noop.yaml
+  OS::TripleO::{{role.name}}::Net::SoftwareConfig: net-config-noop.yaml
+
+{% endfor %}
+
+  # This resource registry entry will override the one generated by default
+  # in the jinja loop
+  OS::TripleO::Controller::Net::SoftwareConfig: net-config-bridge.yaml
 
   OS::TripleO::Server: OS::Nova::Server
 
@@ -49,9 +44,6 @@ resource_registry:
   OS::TripleO::NodeUserData: firstboot/userdata_default.yaml
   OS::TripleO::NodeTLSCAData: OS::Heat::None
   OS::TripleO::NodeTLSData: OS::Heat::None
-  OS::TripleO::ControllerExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
-  OS::TripleO::ComputeExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
-  OS::TripleO::CephStorageExtraConfigPre: puppet/extraconfig/pre_deploy/default.yaml
   OS::TripleO::NodeExtraConfig: puppet/extraconfig/pre_deploy/default.yaml
   OS::TripleO::NodeExtraConfigPost: extraconfig/post_deploy/default.yaml
 
@@ -85,46 +77,6 @@ resource_registry:
   OS::TripleO::Network::Ports::StorageMgmtVipPort: network/ports/noop.yaml
   OS::TripleO::Network::Ports::RedisVipPort: network/ports/ctlplane_vip.yaml
 
-  # Port assignments for the controller role
-  OS::TripleO::Controller::Ports::ExternalPort: network/ports/noop.yaml
-  OS::TripleO::Controller::Ports::InternalApiPort: network/ports/noop.yaml
-  OS::TripleO::Controller::Ports::StoragePort: network/ports/noop.yaml
-  OS::TripleO::Controller::Ports::StorageMgmtPort: network/ports/noop.yaml
-  OS::TripleO::Controller::Ports::TenantPort: network/ports/noop.yaml
-  OS::TripleO::Controller::Ports::ManagementPort: network/ports/noop.yaml
-
-  # Port assignments for the compute role
-  OS::TripleO::Compute::Ports::ExternalPort: network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::InternalApiPort: network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::StoragePort: network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::StorageMgmtPort: network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::TenantPort: network/ports/noop.yaml
-  OS::TripleO::Compute::Ports::ManagementPort: network/ports/noop.yaml
-
-  # Port assignments for the ceph storage role
-  OS::TripleO::CephStorage::Ports::ExternalPort: network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::InternalApiPort: network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::StoragePort: network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::TenantPort: network/ports/noop.yaml
-  OS::TripleO::CephStorage::Ports::ManagementPort: network/ports/noop.yaml
-
-  # Port assignments for the swift storage role
-  OS::TripleO::SwiftStorage::Ports::ExternalPort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::InternalApiPort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::StoragePort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::TenantPort: network/ports/noop.yaml
-  OS::TripleO::SwiftStorage::Ports::ManagementPort: network/ports/noop.yaml
-
-  # Port assignments for the block storage role
-  OS::TripleO::BlockStorage::Ports::ExternalPort: network/ports/noop.yaml
-  OS::TripleO::BlockStorage::Ports::InternalApiPort: network/ports/noop.yaml
-  OS::TripleO::BlockStorage::Ports::StoragePort: network/ports/noop.yaml
-  OS::TripleO::BlockStorage::Ports::StorageMgmtPort: network/ports/noop.yaml
-  OS::TripleO::BlockStorage::Ports::TenantPort: network/ports/noop.yaml
-  OS::TripleO::BlockStorage::Ports::ManagementPort: network/ports/noop.yaml
-
   # Service to network Mappings
   OS::TripleO::ServiceNetMap: network/service_net_map.yaml
 
@@ -147,6 +99,7 @@ resource_registry:
   OS::TripleO::Services::CinderBackup: OS::Heat::None
   OS::TripleO::Services::CinderScheduler: puppet/services/cinder-scheduler.yaml
   OS::TripleO::Services::CinderVolume: puppet/services/cinder-volume.yaml
+  OS::TripleO::Services::BlockStorageCinderVolume: puppet/services/cinder-volume.yaml
   OS::TripleO::Services::Core: OS::Heat::None
   OS::TripleO::Services::Keystone: puppet/services/keystone.yaml
   OS::TripleO::Services::GlanceApi: puppet/services/glance-api.yaml
@@ -188,6 +141,7 @@ resource_registry:
   OS::TripleO::Services::NovaConductor: puppet/services/nova-conductor.yaml
   OS::TripleO::Services::MongoDb: puppet/services/database/mongodb.yaml
   OS::TripleO::Services::NovaApi: puppet/services/nova-api.yaml
+  OS::TripleO::Services::NovaMetadata: puppet/services/nova-metadata.yaml
   OS::TripleO::Services::NovaScheduler: puppet/services/nova-scheduler.yaml
   OS::TripleO::Services::NovaConsoleauth: puppet/services/nova-consoleauth.yaml
   OS::TripleO::Services::NovaVncProxy: puppet/services/nova-vnc-proxy.yaml
@@ -212,9 +166,14 @@ resource_registry:
   OS::TripleO::Services::GnocchiStatsd: puppet/services/gnocchi-statsd.yaml
   OS::TripleO::Services::VipHosts: puppet/services/vip-hosts.yaml
   # Services that are disabled by default (use relevant environment files):
+  OS::TripleO::Services::FluentdClient: OS::Heat::None
+  OS::TripleO::LoggingConfiguration: puppet/services/logging/fluentd-config.yaml
   OS::Tripleo::Services::ManilaApi: OS::Heat::None
   OS::Tripleo::Services::ManilaScheduler: OS::Heat::None
   OS::Tripleo::Services::ManilaShare: OS::Heat::None
+  OS::Tripleo::Services::ManilaBackendGeneric: OS::Heat::None
+  OS::Tripleo::Services::ManilaBackendNetapp: OS::Heat::None
+  OS::Tripleo::Services::ManilaBackendCephFs: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronL3Agent: OS::Heat::None
   OS::TripleO::Services::ComputeNeutronMetadataAgent: OS::Heat::None
   OS::TripleO::Services::AodhApi: puppet/services/aodh-api.yaml
index fa160e4..bb50395 100644 (file)
@@ -121,16 +121,12 @@ parameters:
                  resource_registry) which represent nested stacks
                  for each service that should get installed on the {{role.name}} role.
     type: comma_delimited_list
-  {% if role.ServicesDefault %}
-    default: {{role.ServicesDefault}}
-  {% endif %}
+    default: {{role.ServicesDefault|default([])}}
 
   {{role.name}}Count:
     description: Number of {{role.name}} nodes to deploy
     type: number
-  {% if role.CountDefault %}
-    default: {{role.CountDefault}}
-  {% endif %}
+    default: {{role.CountDefault|default(0)}}
 
   {{role.name}}HostnameFormat:
     type: string
@@ -140,6 +136,8 @@ parameters:
       and %stackname% is replaced with the stack name e.g overcloud
   {% if role.HostnameFormatDefault %}
     default: "{{role.HostnameFormatDefault}}"
+  {% else %}
+    default: "%stackname%-{{role.name.lower()}}-%index%"
   {% endif %}
 
   {{role.name}}RemovalPolicies:
@@ -149,6 +147,15 @@ parameters:
       List of resources to be removed from {{role.name}} ResourceGroup when
       doing an update which requires removal of specific resources.
       Example format ComputeRemovalPolicies: [{'resource_list': ['0']}]
+
+{% if role.name != 'Compute' %}
+  {{role.name}}SchedulerHints:
+{% else %}
+  NovaComputeSchedulerHints:
+{% endif %}
+    type: json
+    description: Optional scheduler hints to pass to nova
+    default: {}
 {% endfor %}
 
   # Identifiers to trigger tasks on nodes
@@ -238,6 +245,15 @@ resources:
       EnabledServices: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
       ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
       ServiceHostnameList: {get_attr: [{{role.name}}, hostname]}
+      NetworkHostnameMap:
+        # Note (shardy) this somewhat complex yaql may be replaced
+        # with a map_deep_merge function in ocata.  It merges the
+        # list of maps, but appends to colliding lists so we can
+        # create a map of lists for all nodes for each network
+        yaql:
+          expression: dict($.data.where($ != null).flatten().selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+          data:
+            - {get_attr: [{{role.name}}, hostname_map]}
 
   {{role.name}}:
     type: OS::Heat::ResourceGroup
@@ -257,12 +273,32 @@ resources:
               params:
                 '%stackname%': {get_param: 'OS::stack_name'}
           NodeIndex: '%index%'
+  {% if role.name != 'Compute' %}
+          {{role.name}}SchedulerHints: {get_param: {{role.name}}SchedulerHints}
+  {% else %}
+          NovaComputeSchedulerHints: {get_param: NovaComputeSchedulerHints}
+  {% endif %}
           ServiceConfigSettings:
             map_merge:
               -  get_attr: [{{role.name}}ServiceChain, role_data, config_settings]
           {% for r in roles %}
               - get_attr: [{{r.name}}ServiceChain, role_data, global_config_settings]
           {% endfor %}
+              # This next step combines two yaql passes:
+              # - The inner one does a deep merge on the service_config_settings for all roles
+              # - The outer one filters the map based on the services enabled for the role
+              #   then merges the result into one map.
+              - yaql:
+                  expression: let(root => $) -> $.data.map.items().where($[0] in $root.data.services).select($[1]).reduce($1.mergeWith($2), {})
+                  data:
+                    map:
+                      yaql:
+                        expression: $.data.where($ != null).reduce($1.mergeWith($2), {})
+                        data:
+                        {% for r in roles %}
+                          - get_attr: [{{r.name}}ServiceChain, role_data, service_config_settings]
+                        {% endfor %}
+                    services: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
           ServiceNames: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
           MonitoringSubscriptions: {get_attr: [{{role.name}}ServiceChain, role_data, monitoring_subscriptions]}
 {% endfor %}
@@ -286,6 +322,24 @@ resources:
           - ','
 {% for role in roles %}
           - {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+{% endfor %}
+      logging_groups:
+        yaql:
+          expression: >
+            $.data.groups.flatten()
+          data:
+            groups:
+{% for role in roles %}
+              - {get_attr: [{{role.name}}ServiceChain, role_data, logging_groups]}
+{% endfor %}
+      logging_sources:
+        yaql:
+          expression: >
+            $.data.sources.flatten()
+          data:
+            sources:
+{% for role in roles %}
+              - {get_attr: [{{role.name}}ServiceChain, role_data, logging_sources]}
 {% endfor %}
       controller_ips: {get_attr: [Controller, ip_address]}
       controller_names: {get_attr: [Controller, hostname]}
@@ -308,11 +362,17 @@ resources:
             l:
 {% for role in roles %}
               - {get_attr: [{{role.name}}IpListMap, service_hostnames]}
+{% endfor %}
+      short_service_node_names:
+        yaql:
+          expression: dict($.data.l.where($ != null).selectMany($.items()).groupBy($[0], $[1], [$[0], $[1].flatten()]))
+          data:
+            l:
+{% for role in roles %}
+              - {get_attr: [{{role.name}}IpListMap, short_service_hostnames]}
 {% endfor %}
       # FIXME(shardy): These require further work to move into service_ips
       memcache_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, MemcachedNetwork]}]}
-      keystone_public_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystonePublicApiNetwork]}]}
-      keystone_admin_api_node_ips: {get_attr: [ControllerIpListMap, net_ip_map, {get_attr: [ServiceNetMap, service_net_map, KeystoneAdminApiNetwork]}]}
       NetVipMap: {get_attr: [VipMap, net_ip_map]}
       RedisVirtualIP: {get_attr: [RedisVirtualIP, ip_address]}
       ServiceNetMap: {get_attr: [ServiceNetMap, service_net_map_lower]}
@@ -428,11 +488,10 @@ resources:
   UpdateWorkflow:
     type: OS::TripleO::Tasks::UpdateWorkflow
     properties:
-      controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
-      compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
-      blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
-      objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
-      cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+      servers:
+{% for role in roles %}
+        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
       input_values:
         deploy_identifier: {get_param: DeployIdentifier}
         update_identifier: {get_param: UpdateIdentifier}
@@ -443,34 +502,26 @@ resources:
     type: OS::TripleO::AllNodesExtraConfig
     depends_on:
       - UpdateWorkflow
-      - ComputeAllNodesValidationDeployment
-      - BlockStorageAllNodesValidationDeployment
-      - ObjectStorageAllNodesValidationDeployment
-      - CephStorageAllNodesValidationDeployment
-      - ControllerAllNodesValidationDeployment
+{% for role in roles %}
+      - {{role.name}}AllNodesValidationDeployment
+{% endfor %}
     properties:
-      controller_servers: {get_attr: [Controller, attributes, nova_server_resource]}
-      compute_servers: {get_attr: [Compute, attributes, nova_server_resource]}
-      blockstorage_servers: {get_attr: [BlockStorage, attributes, nova_server_resource]}
-      objectstorage_servers: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
-      cephstorage_servers: {get_attr: [CephStorage, attributes, nova_server_resource]}
+{% for role in roles %}
+      servers: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
 
   # Post deployment steps for all roles
   AllNodesDeploySteps:
     type: OS::TripleO::PostDeploySteps
     properties:
       servers:
-        Controller: {get_attr: [Controller, attributes, nova_server_resource]}
-        Compute: {get_attr: [Compute, attributes, nova_server_resource]}
-        BlockStorage: {get_attr: [BlockStorage, attributes, nova_server_resource]}
-        ObjectStorage: {get_attr: [ObjectStorage, attributes, nova_server_resource]}
-        CephStorage: {get_attr: [CephStorage, attributes, nova_server_resource]}
+{% for role in roles %}
+        {{role.name}}: {get_attr: [{{role.name}}, attributes, nova_server_resource]}
+{% endfor %}
       role_data:
-        Controller: {get_attr: [ControllerServiceChain, role_data]}
-        Compute: {get_attr: [ComputeServiceChain, role_data]}
-        BlockStorage: {get_attr: [BlockStorageServiceChain, role_data]}
-        ObjectStorage: {get_attr: [ObjectStorageServiceChain, role_data]}
-        CephStorage: {get_attr: [CephStorageServiceChain, role_data]}
+{% for role in roles %}
+        {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data]}
+{% endfor %}
 
 outputs:
   ManagedEndpoints:
@@ -545,8 +596,6 @@ outputs:
   EnabledServices:
     description: The services enabled on each role
     value:
-      Controller: {get_attr: [ControllerServiceChain, role_data, service_names]}
-      Compute: {get_attr: [ComputeServiceChain, role_data, service_names]}
-      BlockStorage: {get_attr: [BlockStorageServiceChain, role_data, service_names]}
-      ObjectStorage: {get_attr: [ObjectStorageServiceChain, role_data, service_names]}
-      CephStorage: {get_attr: [CephStorageServiceChain, role_data, service_names]}
+{% for role in roles %}
+      {{role.name}}: {get_attr: [{{role.name}}ServiceChain, role_data, service_names]}
+{% endfor %}
index c764d4e..cae60aa 100644 (file)
@@ -20,18 +20,20 @@ parameters:
     type: string
   controller_ips:
     type: comma_delimited_list
+  logging_groups:
+    type: json
+  logging_sources:
+    type: json
   service_ips:
     type: json
   service_node_names:
     type: json
+  short_service_node_names:
+    type: json
   controller_names:
     type: comma_delimited_list
   memcache_node_ips:
     type: comma_delimited_list
-  keystone_public_api_node_ips:
-    type: comma_delimited_list
-  keystone_admin_api_node_ips:
-    type: comma_delimited_list
   NetVipMap:
     type: json
   RedisVirtualIP:
@@ -56,6 +58,12 @@ parameters:
       Heat action on performed top-level stack.
     constraints:
     - allowed_values: ['CREATE', 'UPDATE']
+  # NOTE(jaosorior): This is being set as IPA as it's the first
+  # CA we'll actually be testing out. But we can change this if
+  # people request it.
+  CertmongerCA:
+    type: string
+    default: 'IPA'
 
 resources:
 
@@ -77,6 +85,8 @@ resources:
             all_nodes:
               mapped_data:
                 map_merge:
+                  - tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: logging_sources}
+                  - tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: logging_groups}
                   - enabled_services: {get_param: enabled_services}
                   # This writes out a mapping of service_name_enabled: 'true'
                   # For any services not enabled, hiera foo_enabled will
@@ -120,6 +130,7 @@ resources:
                   # provides a mapping of service_name_ips to a list of IPs
                   - {get_param: service_ips}
                   - {get_param: service_node_names}
+                  - {get_param: short_service_node_names}
                   - controller_node_ips:
                       list_join:
                       - ','
@@ -136,22 +147,6 @@ resources:
                             list_join:
                             - "]','inet6:["
                             - {get_param: memcache_node_ips}
-                    keystone_public_api_node_ips:
-                      str_replace:
-                        template: "['SERVERS_LIST']"
-                        params:
-                          SERVERS_LIST:
-                            list_join:
-                            - "','"
-                            - {get_param: keystone_public_api_node_ips}
-                    keystone_admin_api_node_ips:
-                      str_replace:
-                        template: "['SERVERS_LIST']"
-                        params:
-                          SERVERS_LIST:
-                            list_join:
-                            - "','"
-                            - {get_param: keystone_admin_api_node_ips}
 
                     deploy_identifier: {get_param: DeployIdentifier}
                     update_identifier: {get_param: UpdateIdentifier}
@@ -210,6 +205,8 @@ resources:
                     cloud_name_storage: {get_param: cloud_name_storage}
                     cloud_name_storage_mgmt: {get_param: cloud_name_storage_mgmt}
                     cloud_name_ctlplane: {get_param: cloud_name_ctlplane}
+                    # TLS parameters
+                    certmonger_ca: {get_param: CertmongerCA}
 
 outputs:
   config_id:
similarity index 92%
rename from puppet/cinder-storage.yaml
rename to puppet/blockstorage-role.yaml
index f5118c2..8b695ff 100644 (file)
@@ -307,6 +307,51 @@ outputs:
   hostname:
     description: Hostname of the server
     value: {get_attr: [BlockStorage, name]}
+  hostname_map:
+    description: Mapping of network names to hostnames
+    value:
+      external:
+        list_join:
+        - '.'
+        - - {get_attr: [BlockStorage, name]}
+          - external
+          - {get_param: CloudDomain}
+      internal_api:
+        list_join:
+        - '.'
+        - - {get_attr: [BlockStorage, name]}
+          - internalapi
+          - {get_param: CloudDomain}
+      storage:
+        list_join:
+        - '.'
+        - - {get_attr: [BlockStorage, name]}
+          - storage
+          - {get_param: CloudDomain}
+      storage_mgmt:
+        list_join:
+        - '.'
+        - - {get_attr: [BlockStorage, name]}
+          - storagemgmt
+          - {get_param: CloudDomain}
+      tenant:
+        list_join:
+        - '.'
+        - - {get_attr: [BlockStorage, name]}
+          - tenant
+          - {get_param: CloudDomain}
+      management:
+        list_join:
+        - '.'
+        - - {get_attr: [BlockStorage, name]}
+          - management
+          - {get_param: CloudDomain}
+      ctlplane:
+        list_join:
+        - '.'
+        - - {get_attr: [BlockStorage, name]}
+          - ctlplane
+          - {get_param: CloudDomain}
   hosts_entry:
     value:
       str_replace:
diff --git a/puppet/cephstorage-config.yaml b/puppet/cephstorage-config.yaml
deleted file mode 100644 (file)
index 4bad4a1..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
-  A software config which runs manifests/overcloud_cephstorage.pp
-
-parameters:
-  ConfigDebug:
-    default: false
-    description: Whether to run config management (e.g. Puppet) in debug mode.
-    type: boolean
-  StepConfig:
-     type: string
-     description: Config manifests that will be used to step through the deployment.
-     default: ''
-
-resources:
-
-  CephStoragePuppetConfigImpl:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: puppet
-      options:
-        enable_debug: {get_param: ConfigDebug}
-        enable_hiera: True
-        enable_facter: False
-        modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
-      outputs:
-      - name: result
-      config:
-        list_join:
-        - ''
-        - - get_file: manifests/overcloud_cephstorage.pp
-          - {get_param: StepConfig}
-
-outputs:
-  OS::stack_id:
-    description: The software config which runs overcloud_controller.pp
-    value: {get_resource: CephStoragePuppetConfigImpl}
similarity index 92%
rename from puppet/ceph-storage.yaml
rename to puppet/cephstorage-role.yaml
index 62748f9..55b2633 100644 (file)
@@ -317,6 +317,51 @@ outputs:
   hostname:
     description: Hostname of the server
     value: {get_attr: [CephStorage, name]}
+  hostname_map:
+    description: Mapping of network names to hostnames
+    value:
+      external:
+        list_join:
+        - '.'
+        - - {get_attr: [CephStorage, name]}
+          - external
+          - {get_param: CloudDomain}
+      internal_api:
+        list_join:
+        - '.'
+        - - {get_attr: [CephStorage, name]}
+          - internalapi
+          - {get_param: CloudDomain}
+      storage:
+        list_join:
+        - '.'
+        - - {get_attr: [CephStorage, name]}
+          - storage
+          - {get_param: CloudDomain}
+      storage_mgmt:
+        list_join:
+        - '.'
+        - - {get_attr: [CephStorage, name]}
+          - storagemgmt
+          - {get_param: CloudDomain}
+      tenant:
+        list_join:
+        - '.'
+        - - {get_attr: [CephStorage, name]}
+          - tenant
+          - {get_param: CloudDomain}
+      management:
+        list_join:
+        - '.'
+        - - {get_attr: [CephStorage, name]}
+          - management
+          - {get_param: CloudDomain}
+      ctlplane:
+        list_join:
+        - '.'
+        - - {get_attr: [CephStorage, name]}
+          - ctlplane
+          - {get_param: CloudDomain}
   hosts_entry:
     value:
       str_replace:
diff --git a/puppet/compute-config.yaml b/puppet/compute-config.yaml
deleted file mode 100644 (file)
index 9e128d3..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
-  A software config which runs manifests/overcloud_compute.pp
-
-parameters:
-  ConfigDebug:
-    default: false
-    description: Whether to run config management (e.g. Puppet) in debug mode.
-    type: boolean
-  StepConfig:
-     type: string
-     description: Config manifests that will be used to step through the deployment.
-     default: ''
-
-resources:
-
-  ComputePuppetConfigImpl:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: puppet
-      options:
-        enable_debug: {get_param: ConfigDebug}
-        enable_hiera: True
-        enable_facter: False
-        modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
-      outputs:
-      - name: result
-      config:
-        list_join:
-        - ''
-        - - get_file: manifests/overcloud_compute.pp
-          - {get_param: StepConfig}
-
-outputs:
-  OS::stack_id:
-    description: The software config which runs overcloud_controller.pp
-    value: {get_resource: ComputePuppetConfigImpl}
similarity index 93%
rename from puppet/compute.yaml
rename to puppet/compute-role.yaml
index 05b8d06..4d77d6d 100644 (file)
@@ -360,6 +360,51 @@ outputs:
   hostname:
     description: Hostname of the server
     value: {get_attr: [NovaCompute, name]}
+  hostname_map:
+    description: Mapping of network names to hostnames
+    value:
+      external:
+        list_join:
+        - '.'
+        - - {get_attr: [NovaCompute, name]}
+          - external
+          - {get_param: CloudDomain}
+      internal_api:
+        list_join:
+        - '.'
+        - - {get_attr: [NovaCompute, name]}
+          - internalapi
+          - {get_param: CloudDomain}
+      storage:
+        list_join:
+        - '.'
+        - - {get_attr: [NovaCompute, name]}
+          - storage
+          - {get_param: CloudDomain}
+      storage_mgmt:
+        list_join:
+        - '.'
+        - - {get_attr: [NovaCompute, name]}
+          - storagemgmt
+          - {get_param: CloudDomain}
+      tenant:
+        list_join:
+        - '.'
+        - - {get_attr: [NovaCompute, name]}
+          - tenant
+          - {get_param: CloudDomain}
+      management:
+        list_join:
+        - '.'
+        - - {get_attr: [NovaCompute, name]}
+          - management
+          - {get_param: CloudDomain}
+      ctlplane:
+        list_join:
+        - '.'
+        - - {get_attr: [NovaCompute, name]}
+          - ctlplane
+          - {get_param: CloudDomain}
   hosts_entry:
     description: >
       Server's IP address and hostname in the /etc/hosts format
similarity index 64%
rename from puppet/blockstorage-config.yaml
rename to puppet/config.role.j2.yaml
index 9b31b44..e59a021 100644 (file)
@@ -1,7 +1,7 @@
 heat_template_version: 2015-04-30
 
 description: >
-  A software config which runs manifests/overcloud_volume.pp
+  A software config which runs puppet on the {{role}} role
 
 parameters:
   ConfigDebug:
@@ -15,7 +15,7 @@ parameters:
 
 resources:
 
-  BlockStoragePuppetConfigImpl:
+  {{role}}PuppetConfigImpl:
     type: OS::Heat::SoftwareConfig
     properties:
       group: puppet
@@ -26,13 +26,19 @@ resources:
         modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
       outputs:
       - name: result
+      inputs:
+      - name: step
+        type: Number
       config:
         list_join:
         - ''
-        - - get_file: manifests/overcloud_volume.pp
+        - - str_replace:
+              template: {get_file: manifests/overcloud_role.pp}
+              params:
+                __ROLE__: {{role.lower()}}
           - {get_param: StepConfig}
 
 outputs:
   OS::stack_id:
-    description: The software config which runs overcloud_controller.pp
-    value: {get_resource: BlockStoragePuppetConfigImpl}
+    description: The software config which runs puppet on the {{role}} role
+    value: {get_resource: {{role}}PuppetConfigImpl}
diff --git a/puppet/controller-config.yaml b/puppet/controller-config.yaml
deleted file mode 100644 (file)
index 811c544..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
-  A software config which runs manifests/overcloud_controller.pp
-
-parameters:
-  ConfigDebug:
-    default: false
-    description: Whether to run config management (e.g. Puppet) in debug mode.
-    type: boolean
-  StepConfig:
-     type: string
-     description: Config manifests that will be used to step through the deployment.
-     default: ''
-
-resources:
-
-  ControllerPuppetConfigImpl:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: puppet
-      options:
-        enable_debug: {get_param: ConfigDebug}
-        enable_hiera: True
-        enable_facter: False
-        modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
-      outputs:
-      - name: result
-      config:
-        list_join:
-        - ''
-        - - get_file: manifests/overcloud_controller.pp
-          - {get_param: StepConfig}
-
-outputs:
-  OS::stack_id:
-    description: The software config which runs overcloud_controller.pp
-    value: {get_resource: ControllerPuppetConfigImpl}
similarity index 93%
rename from puppet/controller.yaml
rename to puppet/controller-role.yaml
index 33ed51c..b1433b0 100644 (file)
@@ -403,6 +403,51 @@ outputs:
   hostname:
     description: Hostname of the server
     value: {get_attr: [Controller, name]}
+  hostname_map:
+    description: Mapping of network names to hostnames
+    value:
+      external:
+        list_join:
+        - '.'
+        - - {get_attr: [Controller, name]}
+          - external
+          - {get_param: CloudDomain}
+      internal_api:
+        list_join:
+        - '.'
+        - - {get_attr: [Controller, name]}
+          - internalapi
+          - {get_param: CloudDomain}
+      storage:
+        list_join:
+        - '.'
+        - - {get_attr: [Controller, name]}
+          - storage
+          - {get_param: CloudDomain}
+      storage_mgmt:
+        list_join:
+        - '.'
+        - - {get_attr: [Controller, name]}
+          - storagemgmt
+          - {get_param: CloudDomain}
+      tenant:
+        list_join:
+        - '.'
+        - - {get_attr: [Controller, name]}
+          - tenant
+          - {get_param: CloudDomain}
+      management:
+        list_join:
+        - '.'
+        - - {get_attr: [Controller, name]}
+          - management
+          - {get_param: CloudDomain}
+      ctlplane:
+        list_join:
+        - '.'
+        - - {get_attr: [Controller, name]}
+          - ctlplane
+          - {get_param: CloudDomain}
   hosts_entry:
     description: >
       Server's IP address and hostname in the /etc/hosts format
index aa5c3c4..6a2ea4d 100644 (file)
@@ -4,15 +4,7 @@ description: Configure hieradata for all MidoNet nodes
 
 parameters:
   # Parameters passed from the parent template
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
+  servers:
     type: json
 
   EnableZookeeperOnController:
@@ -102,10 +94,10 @@ resources:
     type: OS::Heat::StructuredDeploymentGroup
     properties:
       config: {get_resource: NetworkMidoNetConfig}
-      servers: {get_param: controller_servers}
+      servers: {get_param: [servers, Controller]}
 
   NetworkMidonetDeploymentComputes:
     type: OS::Heat::StructuredDeploymentGroup
     properties:
       config: {get_resource: NetworkMidoNetConfig}
-      servers: {get_param: compute_servers}
+      servers: {get_param: [servers, Compute]}
index e924fc8..7bda0cd 100644 (file)
@@ -4,15 +4,7 @@ description: Configure hieradata for Network Cisco configuration
 
 parameters:
   # Parameters passed from the parent template
-  controller_servers:
-    type: json
-  compute_servers:
-    type: json
-  blockstorage_servers:
-    type: json
-  objectstorage_servers:
-    type: json
-  cephstorage_servers:
+  servers:
     type: json
 
   # extra parameters passed via parameter_defaults
@@ -140,7 +132,7 @@ resources:
     properties:
       name: NetworkCiscoDeployment
       config: {get_resource: NetworkCiscoConfig}
-      servers:  {get_param: controller_servers}
+      servers:  {get_param: [servers, Controller]}
       input_values:
         UCSM_ip: {get_param: NetworkUCSMIp}
         UCSM_username: {get_param: NetworkUCSMUsername}
@@ -187,7 +179,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: CollectMacDeploymentsController
-      servers:  {get_param: controller_servers}
+      servers:  {get_param: [servers, Controller]}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
 
@@ -195,7 +187,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: CollectMacDeploymentsCompute
-      servers:  {get_param: compute_servers}
+      servers:  {get_param: [servers, Compute]}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
 
@@ -203,7 +195,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: CollectMacDeploymentsBlockStorage
-      servers:  {get_param: blockstorage_servers}
+      servers:  {get_param: [servers, BlockStorage]}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
 
@@ -211,7 +203,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: CollectMacDeploymentsObjectStorage
-      servers:  {get_param: objectstorage_servers}
+      servers:  {get_param: [servers, ObjectStorage]}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
 
@@ -219,7 +211,7 @@ resources:
     type: OS::Heat::SoftwareDeployments
     properties:
       name: CollectMacDeploymentsCephStorage
-      servers:  {get_param: cephstorage_servers}
+      servers:  {get_param: [servers, CephStorage]}
       config: {get_resource: CollectMacConfig}
       actions: ['CREATE'] # Only do this on CREATE
 
@@ -294,7 +286,7 @@ resources:
     type: OS::Heat::SoftwareDeployment
     properties:
       name: MappingToNexusDeploymentsController
-      server:  {get_param: [controller_servers, '0']}
+      server:  {get_param: [servers, Controller, '0']}
       config: {get_resource: MappingToNexusConfig}
       input_values:
         # FIXME(shardy): It'd be more convenient if we could join these
@@ -338,7 +330,7 @@ resources:
     depends_on: MappingToNexusDeploymentsController
     properties:
       name: MappingToUCSMDeploymentsController
-      server:  {get_param: [controller_servers, '0']}
+      server:  {get_param: [servers, Controller, '0']}
       config: {get_resource: MappingToUCSMConfig}
       input_values:
         ucsm_config: {get_param: NetworkUCSMHostList}
index e496553..f5b1f0e 100644 (file)
@@ -32,6 +32,18 @@ resources:
                 contrail::vrouter::provision_vrouter::keystone_admin_tenant_name: admin
                 contrail::vrouter::provision_vrouter::keystone_admin_password: '"%{::admin_password}"'
 
+                contrail::vnc_api::vnc_api_config:
+                  'auth/AUTHN_TYPE':
+                    value: keystone
+                  'auth/AUTHN_PROTOCOL':
+                    value: http
+                  'auth/AUTHN_SERVER':
+                    value: "%{hiera('keystone_admin_api_vip')}"
+                  'auth/AUTHN_PORT':
+                    value: 35357
+                  'auth/AUTHN_URL':
+                    value: '/v2.0/tokens'
+
   ComputeContrailDeployment:
     type: OS::Heat::StructuredDeployment
     properties:
index 6ff9088..48446e5 100644 (file)
@@ -70,12 +70,22 @@ parameters:
   CinderNetappStoragePools:
     type: string
     default: ''
-  CinderNetappEseriesHostType:
+  CinderNetappHostType:
     type: string
-    default: 'linux_dm_mp'
+    default: ''
   CinderNetappWebservicePath:
     type: string
     default: '/devmgr/v2'
+  # DEPRECATED options for compatibility with older versions
+  CinderNetappEseriesHostType:
+    type: string
+    default: 'linux_dm_mp'
+
+parameter_groups:
+- label: deprecated
+  description: Do not use deprecated params, they will be removed.
+  parameters:
+  - CinderNetappEseriesHostType
 
 resources:
   CinderNetappConfig:
@@ -108,7 +118,7 @@ resources:
                 cinder::backend::netapp::netapp_controller_ips: {get_input: NetappControllerIps}
                 cinder::backend::netapp::netapp_sa_password: {get_input: NetappSaPassword}
                 cinder::backend::netapp::netapp_storage_pools: {get_input: NetappStoragePools}
-                cinder::backend::netapp::netapp_eseries_host_type: {get_input: NetappEseriesHostType}
+                cinder::backend::netapp::netapp_host_type: {get_input: NetappHostType}
                 cinder::backend::netapp::netapp_webservice_path: {get_input: NetappWebservicePath}
 
   CinderNetappDeployment:
@@ -139,7 +149,7 @@ resources:
         NetappControllerIps: {get_param: CinderNetappControllerIps}
         NetappSaPassword: {get_param: CinderNetappSaPassword}
         NetappStoragePools: {get_param: CinderNetappStoragePools}
-        NetappEseriesHostType: {get_param: CinderNetappEseriesHostType}
+        NetappHostType: {get_param: CinderNetappHostType}
         NetappWebservicePath: {get_param: CinderNetappWebservicePath}
 
 outputs:
index e281ef5..49d8457 100644 (file)
@@ -64,11 +64,9 @@ resources:
           | openssl md5 | cut -c 10- \
           > ${heat_outputs_path}.key_modulus
         # We need to reload haproxy in case the certificate changed because
-        # puppet doesn't know the contents of the cert file.  The pacemaker
-        # case is handled separately in a pacemaker-specific resource.
-        pacemaker_status=$(systemctl is-active pacemaker)
+        # puppet doesn't know the contents of the cert file.
         haproxy_status=$(systemctl is-active haproxy)
-        if [ "$pacemaker_status" != "active" -a "$haproxy_status" = "active"]; then
+        if [ "$haproxy_status" = "active" ]; then
             systemctl reload haproxy
         fi
 
diff --git a/puppet/manifests/overcloud_cephstorage.pp b/puppet/manifests/overcloud_cephstorage.pp
deleted file mode 100644 (file)
index 2653bad..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
-  hiera_include('ceph_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_ceph', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_compute.pp b/puppet/manifests/overcloud_compute.pp
deleted file mode 100644 (file)
index f96c193..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
-  hiera_include('compute_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_compute', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_object.pp b/puppet/manifests/overcloud_object.pp
deleted file mode 100644 (file)
index 414a06b..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
-  hiera_include('object_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_object', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
similarity index 71%
rename from puppet/manifests/overcloud_controller.pp
rename to puppet/manifests/overcloud_role.pp
index 25bdbfb..1a59620 100644 (file)
 # License for the specific language governing permissions and limitations
 # under the License.
 
+# The content of this file will be used to generate
+# the puppet manifests for all roles, the placeholder
+# __ROLE__ will be replaced by 'controller', 'blockstorage',
+# 'cephstorage' and all the deployed roles.
+
 if hiera('step') >= 4 {
-  hiera_include('controller_classes', [])
+  hiera_include('__ROLE___classes', [])
 }
 
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller', hiera('step')])
+$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud___ROLE__', hiera('step')])
 package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/manifests/overcloud_volume.pp b/puppet/manifests/overcloud_volume.pp
deleted file mode 100644 (file)
index e1cdadd..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2015 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-if hiera('step') >= 4 {
-  hiera_include('volume_classes', [])
-}
-
-$package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_volume', hiera('step')])
-package_manifest{$package_manifest_name: ensure => present}
diff --git a/puppet/objectstorage-config.yaml b/puppet/objectstorage-config.yaml
deleted file mode 100644 (file)
index 1dee8e6..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-heat_template_version: 2015-04-30
-
-description: >
-  A software config which runs manifests/overcloud_object.pp
-
-parameters:
-  ConfigDebug:
-    default: false
-    description: Whether to run config management (e.g. Puppet) in debug mode.
-    type: boolean
-  StepConfig:
-     type: string
-     description: Config manifests that will be used to step through the deployment.
-     default: ''
-
-resources:
-
-  ObjectStoragePuppetConfigImpl:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: puppet
-      options:
-        enable_debug: {get_param: ConfigDebug}
-        enable_hiera: True
-        enable_facter: False
-        modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
-      outputs:
-      - name: result
-      config:
-        list_join:
-        - ''
-        - - get_file: manifests/overcloud_object.pp
-          - {get_param: StepConfig}
-
-outputs:
-  OS::stack_id:
-    description: The software config which runs overcloud_controller.pp
-    value: {get_resource: ObjectStoragePuppetConfigImpl}
similarity index 92%
rename from puppet/swift-storage.yaml
rename to puppet/objectstorage-role.yaml
index 9eb6631..d7681d1 100644 (file)
@@ -306,6 +306,51 @@ outputs:
   hostname:
     description: Hostname of the server
     value: {get_attr: [SwiftStorage, name]}
+  hostname_map:
+    description: Mapping of network names to hostnames
+    value:
+      external:
+        list_join:
+        - '.'
+        - - {get_attr: [SwiftStorage, name]}
+          - external
+          - {get_param: CloudDomain}
+      internal_api:
+        list_join:
+        - '.'
+        - - {get_attr: [SwiftStorage, name]}
+          - internalapi
+          - {get_param: CloudDomain}
+      storage:
+        list_join:
+        - '.'
+        - - {get_attr: [SwiftStorage, name]}
+          - storage
+          - {get_param: CloudDomain}
+      storage_mgmt:
+        list_join:
+        - '.'
+        - - {get_attr: [SwiftStorage, name]}
+          - storagemgmt
+          - {get_param: CloudDomain}
+      tenant:
+        list_join:
+        - '.'
+        - - {get_attr: [SwiftStorage, name]}
+          - tenant
+          - {get_param: CloudDomain}
+      management:
+        list_join:
+        - '.'
+        - - {get_attr: [SwiftStorage, name]}
+          - management
+          - {get_param: CloudDomain}
+      ctlplane:
+        list_join:
+        - '.'
+        - - {get_attr: [SwiftStorage, name]}
+          - ctlplane
+          - {get_param: CloudDomain}
   hosts_entry:
     value:
       str_replace:
diff --git a/puppet/post.j2.yaml b/puppet/post.j2.yaml
new file mode 100644 (file)
index 0000000..65c96ac
--- /dev/null
@@ -0,0 +1,139 @@
+heat_template_version: 2016-10-14
+
+description: >
+  Post-deploy configuration steps via puppet for all roles,
+  as defined in ../roles_data.yaml
+
+parameters:
+  servers:
+    type: json
+    description: Mapping of Role name e.g Controller to a list of servers
+
+  role_data:
+    type: json
+    description: Mapping of Role name e.g Controller to the per-role data
+
+  DeployIdentifier:
+    default: ''
+    type: string
+    description: >
+      Setting this to a unique value will re-run any deployment tasks which
+      perform configuration on a Heat stack-update.
+
+resources:
+
+{% for role in roles %}
+  # Post deployment steps for all roles
+  # A single config is re-applied with an incrementing step number
+  # {{role.name}} Role steps
+  {{role.name}}ArtifactsConfig:
+    type: deploy-artifacts.yaml
+
+  {{role.name}}ArtifactsDeploy:
+    type: OS::Heat::StructuredDeployments
+    properties:
+      servers:  {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}ArtifactsConfig}
+
+  {{role.name}}PreConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PreConfig
+    properties:
+      servers: {get_param: [servers, {{role.name}}]}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}Config:
+    type: OS::TripleO::{{role.name}}Config
+    properties:
+      StepConfig: {get_param: [role_data, {{role.name}}, step_config]}
+
+  # Step through a series of configuration steps
+  {{role.name}}Deployment_Step1:
+    type: OS::Heat::StructuredDeploymentGroup
+    depends_on: [{{role.name}}PreConfig, {{role.name}}ArtifactsDeploy]
+    properties:
+      name: {{role.name}}Deployment_Step1
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}Config}
+      input_values:
+        step: 1
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}Deployment_Step2:
+    type: OS::Heat::StructuredDeploymentGroup
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step1
+  {% endfor %}
+    properties:
+      name: {{role.name}}Deployment_Step2
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}Config}
+      input_values:
+        step: 2
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}Deployment_Step3:
+    type: OS::Heat::StructuredDeploymentGroup
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step2
+  {% endfor %}
+    properties:
+      name: {{role.name}}Deployment_Step3
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}Config}
+      input_values:
+        step: 3
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}Deployment_Step4:
+    type: OS::Heat::StructuredDeploymentGroup
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step3
+  {% endfor %}
+    properties:
+      name: {{role.name}}Deployment_Step4
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}Config}
+      input_values:
+        step: 4
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}Deployment_Step5:
+    type: OS::Heat::StructuredDeploymentGroup
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step4
+  {% endfor %}
+    properties:
+      name: {{role.name}}Deployment_Step5
+      servers: {get_param: [servers, {{role.name}}]}
+      config: {get_resource: {{role.name}}Config}
+      input_values:
+        step: 5
+        update_identifier: {get_param: DeployIdentifier}
+
+  {{role.name}}PostConfig:
+    type: OS::TripleO::Tasks::{{role.name}}PostConfig
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}Deployment_Step5
+  {% endfor %}
+    properties:
+      servers:  {get_param: servers}
+      input_values:
+        update_identifier: {get_param: DeployIdentifier}
+
+  # Note, this should come last, so use depends_on to ensure
+  # this is created after any other resources.
+  {{role.name}}ExtraConfigPost:
+    depends_on:
+  {% for dep in roles %}
+      - {{dep.name}}PostConfig
+  {% endfor %}
+    type: OS::TripleO::NodeExtraConfigPost
+    properties:
+        servers: {get_param: [servers, {{role.name}}]}
+{% endfor %}
diff --git a/puppet/post.yaml b/puppet/post.yaml
deleted file mode 100644 (file)
index 8f57b34..0000000
+++ /dev/null
@@ -1,644 +0,0 @@
-heat_template_version: 2016-10-14
-
-description: >
-  Post-deploy configuration steps via puppet for all roles,
-  Controller, Compute, BlockStorage, SwiftStorage and CephStorage.
-
-parameters:
-  servers:
-    type: json
-    description: Mapping of Role name e.g Controller to a list of servers
-
-  role_data:
-    type: json
-    description: Mapping of Role name e.g Controller to the per-role data
-
-  DeployIdentifier:
-    default: ''
-    type: string
-    description: >
-      Setting this to a unique value will re-run any deployment tasks which
-      perform configuration on a Heat stack-update.
-
-resources:
-  # Post deployment steps for all roles
-  # A single config is re-applied with an incrementing step number
-  # Controller Role steps
-  ControllerArtifactsConfig:
-    type: deploy-artifacts.yaml
-
-  ControllerArtifactsDeploy:
-    type: OS::Heat::StructuredDeployments
-    properties:
-      servers:  {get_param: [servers, Controller]}
-      config: {get_resource: ControllerArtifactsConfig}
-
-  ControllerPreConfig:
-    type: OS::TripleO::Tasks::ControllerPreConfig
-    properties:
-      servers: {get_param: [servers, Controller]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  ControllerConfig:
-    type: OS::TripleO::ControllerConfig
-    properties:
-      StepConfig: {get_param: [role_data, Controller, step_config]}
-
-  # Step through a series of configuration steps
-  ControllerDeployment_Step1:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on: [ControllerPreConfig, ControllerArtifactsDeploy]
-    properties:
-      name: ControllerDeployment_Step1
-      servers: {get_param: [servers, Controller]}
-      config: {get_resource: ControllerConfig}
-      input_values:
-        step: 1
-        update_identifier: {get_param: DeployIdentifier}
-
-  ControllerDeployment_Step2:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step1
-      - ComputeDeployment_Step1
-      - BlockStorageDeployment_Step1
-      - ObjectStorageDeployment_Step1
-      - CephStorageDeployment_Step1
-    properties:
-      name: ControllerDeployment_Step2
-      servers: {get_param: [servers, Controller]}
-      config: {get_resource: ControllerConfig}
-      input_values:
-        step: 2
-        update_identifier: {get_param: DeployIdentifier}
-
-  ControllerDeployment_Step3:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step2
-      - ComputeDeployment_Step2
-      - BlockStorageDeployment_Step2
-      - ObjectStorageDeployment_Step2
-      - CephStorageDeployment_Step2
-    properties:
-      name: ControllerDeployment_Step3
-      servers: {get_param: [servers, Controller]}
-      config: {get_resource: ControllerConfig}
-      input_values:
-        step: 3
-        update_identifier: {get_param: DeployIdentifier}
-
-  ControllerDeployment_Step4:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step3
-      - ComputeDeployment_Step3
-      - BlockStorageDeployment_Step3
-      - ObjectStorageDeployment_Step3
-      - CephStorageDeployment_Step3
-    properties:
-      name: ControllerDeployment_Step4
-      servers: {get_param: [servers, Controller]}
-      config: {get_resource: ControllerConfig}
-      input_values:
-        step: 4
-        update_identifier: {get_param: DeployIdentifier}
-
-  ControllerDeployment_Step5:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step4
-      - ComputeDeployment_Step4
-      - BlockStorageDeployment_Step4
-      - ObjectStorageDeployment_Step4
-      - CephStorageDeployment_Step4
-    properties:
-      name: ControllerDeployment_Step5
-      servers: {get_param: [servers, Controller]}
-      config: {get_resource: ControllerConfig}
-      input_values:
-        step: 5
-        update_identifier: {get_param: DeployIdentifier}
-
-  ControllerPostConfig:
-    type: OS::TripleO::Tasks::ControllerPostConfig
-    depends_on:
-      - ControllerDeployment_Step5
-      - ComputeDeployment_Step5
-      - BlockStorageDeployment_Step5
-      - ObjectStorageDeployment_Step5
-      - CephStorageDeployment_Step5
-    properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  ControllerExtraConfigPost:
-    depends_on:
-      - ControllerPostConfig
-      - ComputePostConfig
-      - BlockStoragePostConfig
-      - ObjectStoragePostConfig
-      - CephStoragePostConfig
-    type: OS::TripleO::NodeExtraConfigPost
-    properties:
-        servers: {get_param: [servers, Controller]}
-
-  # Compute Role steps
-  ComputeArtifactsConfig:
-    type: deploy-artifacts.yaml
-
-  ComputeArtifactsDeploy:
-    type: OS::Heat::StructuredDeployments
-    properties:
-      servers:  {get_param: [servers, Compute]}
-      config: {get_resource: ComputeArtifactsConfig}
-
-  ComputePreConfig:
-    type: OS::TripleO::Tasks::ComputePreConfig
-    properties:
-      servers: {get_param: [servers, Compute]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  ComputeConfig:
-    type: OS::TripleO::ComputeConfig
-    properties:
-      StepConfig: {get_param: [role_data, Compute, step_config]}
-
-  # Step through a series of configuration steps
-  ComputeDeployment_Step1:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on: [ComputePreConfig, ComputeArtifactsDeploy]
-    properties:
-      name: ComputeDeployment_Step1
-      servers: {get_param: [servers, Compute]}
-      config: {get_resource: ComputeConfig}
-      input_values:
-        step: 1
-        update_identifier: {get_param: DeployIdentifier}
-
-  ComputeDeployment_Step2:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step1
-      - ComputeDeployment_Step1
-      - BlockStorageDeployment_Step1
-      - ObjectStorageDeployment_Step1
-      - CephStorageDeployment_Step1
-    properties:
-      name: ComputeDeployment_Step2
-      servers: {get_param: [servers, Compute]}
-      config: {get_resource: ComputeConfig}
-      input_values:
-        step: 2
-        update_identifier: {get_param: DeployIdentifier}
-
-  ComputeDeployment_Step3:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step2
-      - ComputeDeployment_Step2
-      - BlockStorageDeployment_Step2
-      - ObjectStorageDeployment_Step2
-      - CephStorageDeployment_Step2
-    properties:
-      name: ComputeDeployment_Step3
-      servers: {get_param: [servers, Compute]}
-      config: {get_resource: ComputeConfig}
-      input_values:
-        step: 3
-        update_identifier: {get_param: DeployIdentifier}
-
-  ComputeDeployment_Step4:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step3
-      - ComputeDeployment_Step3
-      - BlockStorageDeployment_Step3
-      - ObjectStorageDeployment_Step3
-      - CephStorageDeployment_Step3
-    properties:
-      name: ComputeDeployment_Step4
-      servers: {get_param: [servers, Compute]}
-      config: {get_resource: ComputeConfig}
-      input_values:
-        step: 4
-        update_identifier: {get_param: DeployIdentifier}
-
-  ComputeDeployment_Step5:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step4
-      - ComputeDeployment_Step4
-      - BlockStorageDeployment_Step4
-      - ObjectStorageDeployment_Step4
-      - CephStorageDeployment_Step4
-    properties:
-      name: ComputeDeployment_Step5
-      servers: {get_param: [servers, Compute]}
-      config: {get_resource: ComputeConfig}
-      input_values:
-        step: 5
-        update_identifier: {get_param: DeployIdentifier}
-
-  ComputePostConfig:
-    type: OS::TripleO::Tasks::ComputePostConfig
-    depends_on:
-      - ControllerDeployment_Step5
-      - ComputeDeployment_Step5
-      - BlockStorageDeployment_Step5
-      - ObjectStorageDeployment_Step5
-      - CephStorageDeployment_Step5
-    properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  ComputeExtraConfigPost:
-    depends_on:
-      - ControllerPostConfig
-      - ComputePostConfig
-      - BlockStoragePostConfig
-      - ObjectStoragePostConfig
-      - CephStoragePostConfig
-    type: OS::TripleO::NodeExtraConfigPost
-    properties:
-      servers: {get_param: [servers, Compute]}
-
-  # BlockStorage Role steps
-  BlockStorageArtifactsConfig:
-    type: deploy-artifacts.yaml
-
-  BlockStorageArtifactsDeploy:
-    type: OS::Heat::StructuredDeployments
-    properties:
-      servers:  {get_param: [servers, BlockStorage]}
-      config: {get_resource: BlockStorageArtifactsConfig}
-
-  BlockStoragePreConfig:
-    type: OS::TripleO::Tasks::BlockStoragePreConfig
-    properties:
-      servers: {get_param: [servers, BlockStorage]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  BlockStorageConfig:
-    type: OS::TripleO::BlockStorageConfig
-    properties:
-      StepConfig: {get_param: [role_data, BlockStorage, step_config]}
-
-  # Step through a series of configuration steps
-  BlockStorageDeployment_Step1:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on: [BlockStoragePreConfig, BlockStorageArtifactsDeploy]
-    properties:
-      name: BlockStorageDeployment_Step1
-      servers: {get_param: [servers, BlockStorage]}
-      config: {get_resource: BlockStorageConfig}
-      input_values:
-        step: 1
-        update_identifier: {get_param: DeployIdentifier}
-
-  BlockStorageDeployment_Step2:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step1
-      - ComputeDeployment_Step1
-      - BlockStorageDeployment_Step1
-      - ObjectStorageDeployment_Step1
-      - CephStorageDeployment_Step1
-    properties:
-      name: BlockStorageDeployment_Step2
-      servers: {get_param: [servers, BlockStorage]}
-      config: {get_resource: BlockStorageConfig}
-      input_values:
-        step: 2
-        update_identifier: {get_param: DeployIdentifier}
-
-  BlockStorageDeployment_Step3:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step2
-      - ComputeDeployment_Step2
-      - BlockStorageDeployment_Step2
-      - ObjectStorageDeployment_Step2
-      - CephStorageDeployment_Step2
-    properties:
-      name: BlockStorageDeployment_Step3
-      servers: {get_param: [servers, BlockStorage]}
-      config: {get_resource: BlockStorageConfig}
-      input_values:
-        step: 3
-        update_identifier: {get_param: DeployIdentifier}
-
-  BlockStorageDeployment_Step4:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step3
-      - ComputeDeployment_Step3
-      - BlockStorageDeployment_Step3
-      - ObjectStorageDeployment_Step3
-      - CephStorageDeployment_Step3
-    properties:
-      name: BlockStorageDeployment_Step4
-      servers: {get_param: [servers, BlockStorage]}
-      config: {get_resource: BlockStorageConfig}
-      input_values:
-        step: 4
-        update_identifier: {get_param: DeployIdentifier}
-
-  BlockStorageDeployment_Step5:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step4
-      - ComputeDeployment_Step4
-      - BlockStorageDeployment_Step4
-      - ObjectStorageDeployment_Step4
-      - CephStorageDeployment_Step4
-    properties:
-      name: BlockStorageDeployment_Step5
-      servers: {get_param: [servers, BlockStorage]}
-      config: {get_resource: BlockStorageConfig}
-      input_values:
-        step: 5
-        update_identifier: {get_param: DeployIdentifier}
-
-  BlockStoragePostConfig:
-    type: OS::TripleO::Tasks::BlockStoragePostConfig
-    depends_on:
-      - ControllerDeployment_Step5
-      - ComputeDeployment_Step5
-      - BlockStorageDeployment_Step5
-      - ObjectStorageDeployment_Step5
-      - CephStorageDeployment_Step5
-    properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  BlockStorageExtraConfigPost:
-    depends_on:
-      - ControllerPostConfig
-      - ComputePostConfig
-      - BlockStoragePostConfig
-      - ObjectStoragePostConfig
-      - CephStoragePostConfig
-    type: OS::TripleO::NodeExtraConfigPost
-    properties:
-        servers: {get_param: [servers, BlockStorage]}
-
-  # ObjectStorage Role steps
-  ObjectStorageArtifactsConfig:
-    type: deploy-artifacts.yaml
-
-  ObjectStorageArtifactsDeploy:
-    type: OS::Heat::StructuredDeployments
-    properties:
-      servers:  {get_param: [servers, ObjectStorage]}
-      config: {get_resource: ObjectStorageArtifactsConfig}
-
-  ObjectStoragePreConfig:
-    type: OS::TripleO::Tasks::ObjectStoragePreConfig
-    properties:
-      servers: {get_param: [servers, ObjectStorage]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  ObjectStorageConfig:
-    type: OS::TripleO::ObjectStorageConfig
-    properties:
-      StepConfig: {get_param: [role_data, ObjectStorage, step_config]}
-
-  # Step through a series of configuration steps
-  ObjectStorageDeployment_Step1:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on: [ObjectStoragePreConfig, ObjectStorageArtifactsDeploy]
-    properties:
-      name: ObjectStorageDeployment_Step1
-      servers: {get_param: [servers, ObjectStorage]}
-      config: {get_resource: ObjectStorageConfig}
-      input_values:
-        step: 1
-        update_identifier: {get_param: DeployIdentifier}
-
-  ObjectStorageDeployment_Step2:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step1
-      - ComputeDeployment_Step1
-      - BlockStorageDeployment_Step1
-      - ObjectStorageDeployment_Step1
-      - CephStorageDeployment_Step1
-    properties:
-      name: ObjectStorageDeployment_Step2
-      servers: {get_param: [servers, ObjectStorage]}
-      config: {get_resource: ObjectStorageConfig}
-      input_values:
-        step: 2
-        update_identifier: {get_param: DeployIdentifier}
-
-  ObjectStorageDeployment_Step3:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step2
-      - ComputeDeployment_Step2
-      - BlockStorageDeployment_Step2
-      - ObjectStorageDeployment_Step2
-      - CephStorageDeployment_Step2
-    properties:
-      name: ObjectStorageDeployment_Step3
-      servers: {get_param: [servers, ObjectStorage]}
-      config: {get_resource: ObjectStorageConfig}
-      input_values:
-        step: 3
-        update_identifier: {get_param: DeployIdentifier}
-
-  ObjectStorageDeployment_Step4:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step3
-      - ComputeDeployment_Step3
-      - BlockStorageDeployment_Step3
-      - ObjectStorageDeployment_Step3
-      - CephStorageDeployment_Step3
-    properties:
-      name: ObjectStorageDeployment_Step4
-      servers: {get_param: [servers, ObjectStorage]}
-      config: {get_resource: ObjectStorageConfig}
-      input_values:
-        step: 4
-        update_identifier: {get_param: DeployIdentifier}
-
-  ObjectStorageDeployment_Step5:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step4
-      - ComputeDeployment_Step4
-      - BlockStorageDeployment_Step4
-      - ObjectStorageDeployment_Step4
-      - CephStorageDeployment_Step4
-    properties:
-      name: ObjectStorageDeployment_Step5
-      servers: {get_param: [servers, ObjectStorage]}
-      config: {get_resource: ObjectStorageConfig}
-      input_values:
-        step: 5
-        update_identifier: {get_param: DeployIdentifier}
-
-  ObjectStoragePostConfig:
-    type: OS::TripleO::Tasks::ObjectStoragePostConfig
-    depends_on:
-      - ControllerDeployment_Step5
-      - ComputeDeployment_Step5
-      - BlockStorageDeployment_Step5
-      - ObjectStorageDeployment_Step5
-      - CephStorageDeployment_Step5
-    properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  ObjectStorageExtraConfigPost:
-    depends_on:
-      - ControllerPostConfig
-      - ComputePostConfig
-      - BlockStoragePostConfig
-      - ObjectStoragePostConfig
-      - CephStoragePostConfig
-    type: OS::TripleO::NodeExtraConfigPost
-    properties:
-        servers: {get_param: [servers, ObjectStorage]}
-
-  # CephStorage Role steps
-  CephStorageArtifactsConfig:
-    type: deploy-artifacts.yaml
-
-  CephStorageArtifactsDeploy:
-    type: OS::Heat::StructuredDeployments
-    properties:
-      servers:  {get_param: [servers, CephStorage]}
-      config: {get_resource: CephStorageArtifactsConfig}
-
-  CephStoragePreConfig:
-    type: OS::TripleO::Tasks::CephStoragePreConfig
-    properties:
-      servers: {get_param: [servers, CephStorage]}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  CephStorageConfig:
-    type: OS::TripleO::CephStorageConfig
-    properties:
-      StepConfig: {get_param: [role_data, CephStorage, step_config]}
-
-  # Step through a series of configuration steps
-  CephStorageDeployment_Step1:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on: [CephStoragePreConfig, CephStorageArtifactsDeploy]
-    properties:
-      name: CephStorageDeployment_Step1
-      servers: {get_param: [servers, CephStorage]}
-      config: {get_resource: CephStorageConfig}
-      input_values:
-        step: 1
-        update_identifier: {get_param: DeployIdentifier}
-
-  CephStorageDeployment_Step2:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step1
-      - ComputeDeployment_Step1
-      - BlockStorageDeployment_Step1
-      - ObjectStorageDeployment_Step1
-      - CephStorageDeployment_Step1
-    properties:
-      name: CephStorageDeployment_Step2
-      servers: {get_param: [servers, CephStorage]}
-      config: {get_resource: CephStorageConfig}
-      input_values:
-        step: 2
-        update_identifier: {get_param: DeployIdentifier}
-
-  CephStorageDeployment_Step3:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step2
-      - ComputeDeployment_Step2
-      - BlockStorageDeployment_Step2
-      - ObjectStorageDeployment_Step2
-      - CephStorageDeployment_Step2
-    properties:
-      name: CephStorageDeployment_Step3
-      servers: {get_param: [servers, CephStorage]}
-      config: {get_resource: CephStorageConfig}
-      input_values:
-        step: 3
-        update_identifier: {get_param: DeployIdentifier}
-
-  CephStorageDeployment_Step4:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step3
-      - ComputeDeployment_Step3
-      - BlockStorageDeployment_Step3
-      - ObjectStorageDeployment_Step3
-      - CephStorageDeployment_Step3
-    properties:
-      name: CephStorageDeployment_Step4
-      servers: {get_param: [servers, CephStorage]}
-      config: {get_resource: CephStorageConfig}
-      input_values:
-        step: 4
-        update_identifier: {get_param: DeployIdentifier}
-
-  CephStorageDeployment_Step5:
-    type: OS::Heat::StructuredDeploymentGroup
-    depends_on:
-      - ControllerDeployment_Step4
-      - ComputeDeployment_Step4
-      - BlockStorageDeployment_Step4
-      - ObjectStorageDeployment_Step4
-      - CephStorageDeployment_Step4
-    properties:
-      name: CephStorageDeployment_Step5
-      servers: {get_param: [servers, CephStorage]}
-      config: {get_resource: CephStorageConfig}
-      input_values:
-        step: 5
-        update_identifier: {get_param: DeployIdentifier}
-
-  CephStoragePostConfig:
-    type: OS::TripleO::Tasks::CephStoragePostConfig
-    depends_on:
-      - ControllerDeployment_Step5
-      - ComputeDeployment_Step5
-      - BlockStorageDeployment_Step5
-      - ObjectStorageDeployment_Step5
-      - CephStorageDeployment_Step5
-    properties:
-      servers:  {get_param: servers}
-      input_values:
-        update_identifier: {get_param: DeployIdentifier}
-
-  # Note, this should come last, so use depends_on to ensure
-  # this is created after any other resources.
-  CephStorageExtraConfigPost:
-    depends_on:
-      - ControllerPostConfig
-      - ComputePostConfig
-      - BlockStoragePostConfig
-      - ObjectStoragePostConfig
-      - CephStoragePostConfig
-    type: OS::TripleO::NodeExtraConfigPost
-    properties:
-        servers: {get_param: [servers, CephStorage]}
diff --git a/puppet/role.role.j2.yaml b/puppet/role.role.j2.yaml
new file mode 100644 (file)
index 0000000..e430700
--- /dev/null
@@ -0,0 +1,452 @@
+heat_template_version: 2016-10-14
+description: 'OpenStack {{role}} node configured by Puppet'
+parameters:
+  Overcloud{{role}}Flavor:
+    description: Flavor for the {{role}} node.
+    default: baremetal
+    type: string
+    constraints:
+      - custom_constraint: nova.flavor
+  {{role}}Image:
+    type: string
+    default: overcloud-full
+    constraints:
+      - custom_constraint: glance.image
+  ImageUpdatePolicy:
+    default: 'REBUILD_PRESERVE_EPHEMERAL'
+    description: What policy to use when reconstructing instances. REBUILD for rebuilds, REBUILD_PRESERVE_EPHEMERAL to preserve /mnt.
+    type: string
+  KeyName:
+    description: Name of an existing Nova key pair to enable SSH access to the instances
+    type: string
+    default: default
+    constraints:
+      - custom_constraint: nova.keypair
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  UpdateIdentifier:
+    default: ''
+    type: string
+    description: >
+      Setting to a previously unused value during stack-update will trigger
+      package update on all nodes
+  Hostname:
+    type: string
+    default: '' # Defaults to Heat created hostname
+  HostnameMap:
+    type: json
+    default: {}
+    description: Optional mapping to override hostnames
+  ExtraConfig:
+    default: {}
+    description: |
+      Additional hiera configuration to inject into the cluster. Note
+      that {{role}}ExtraConfig takes precedence over ExtraConfig.
+    type: json
+  {{role}}ExtraConfig:
+    default: {}
+    description: |
+      Role specific additional hiera configuration to inject into the cluster.
+    type: json
+  {{role}}IPs:
+    default: {}
+    type: json
+  NetworkDeploymentActions:
+    type: comma_delimited_list
+    description: >
+      Heat action when to apply network configuration changes
+    default: ['CREATE']
+  SoftwareConfigTransport:
+    default: POLL_SERVER_CFN
+    description: |
+      How the server should receive the metadata required for software configuration.
+    type: string
+    constraints:
+    - allowed_values: [POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE]
+  CloudDomain:
+    type: string
+    description: >
+      The DNS domain used for the hosts. This should match the dhcp_domain
+      configured in the Undercloud neutron. Defaults to localdomain.
+  ServerMetadata:
+    default: {}
+    description: >
+      Extra properties or metadata passed to Nova for the created nodes in
+      the overcloud. It's accessible via the Nova metadata API.
+    type: json
+  {{role}}SchedulerHints:
+    type: json
+    description: Optional scheduler hints to pass to nova
+    default: {}
+  NodeIndex:
+    type: number
+    default: 0
+  ServiceConfigSettings:
+    type: json
+    default: {}
+  ServiceNames:
+    type: comma_delimited_list
+    default: []
+  MonitoringSubscriptions:
+    type: comma_delimited_list
+    default: []
+  ConfigCommand:
+    type: string
+    description: Command which will be run whenever configuration data changes
+    default: os-refresh-config --timeout 14400
+  LoggingSources:
+    type: json
+    default: []
+  LoggingGroups:
+    type: comma_delimited_list
+    default: []
+
+resources:
+  {{role}}:
+    type: OS::TripleO::Server
+    metadata:
+      os-collect-config:
+        command: {get_param: ConfigCommand}
+    properties:
+      image: {get_param: {{role}}Image}
+      image_update_policy: {get_param: ImageUpdatePolicy}
+      flavor: {get_param: Overcloud{{role}}Flavor}
+      key_name: {get_param: KeyName}
+      networks:
+        - network: ctlplane
+      user_data_format: SOFTWARE_CONFIG
+      user_data: {get_resource: UserData}
+      name:
+        str_replace:
+            template: {get_param: Hostname}
+            params: {get_param: HostnameMap}
+      software_config_transport: {get_param: SoftwareConfigTransport}
+      metadata: {get_param: ServerMetadata}
+      scheduler_hints: {get_param: {{role}}SchedulerHints}
+
+  # Combine the NodeAdminUserData and NodeUserData mime archives
+  UserData:
+    type: OS::Heat::MultipartMime
+    properties:
+      parts:
+      - config: {get_resource: NodeAdminUserData}
+        type: multipart
+      - config: {get_resource: NodeUserData}
+        type: multipart
+
+  # Creates the "heat-admin" user if configured via the environment
+  # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+  NodeAdminUserData:
+    type: OS::TripleO::NodeAdminUserData
+
+  # For optional operator additional userdata
+  # Should return a OS::Heat::MultipartMime reference via OS::stack_id
+  NodeUserData:
+    type: OS::TripleO::NodeUserData
+
+  ExternalPort:
+    type: OS::TripleO::{{role}}::Ports::ExternalPort
+    properties:
+      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role}}IPs}
+      NodeIndex: {get_param: NodeIndex}
+
+  InternalApiPort:
+    type: OS::TripleO::{{role}}::Ports::InternalApiPort
+    properties:
+      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role}}IPs}
+      NodeIndex: {get_param: NodeIndex}
+
+  StoragePort:
+    type: OS::TripleO::{{role}}::Ports::StoragePort
+    properties:
+      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role}}IPs}
+      NodeIndex: {get_param: NodeIndex}
+
+  StorageMgmtPort:
+    type: OS::TripleO::{{role}}::Ports::StorageMgmtPort
+    properties:
+      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role}}IPs}
+      NodeIndex: {get_param: NodeIndex}
+
+  TenantPort:
+    type: OS::TripleO::{{role}}::Ports::TenantPort
+    properties:
+      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role}}IPs}
+      NodeIndex: {get_param: NodeIndex}
+
+  ManagementPort:
+    type: OS::TripleO::{{role}}::Ports::ManagementPort
+    properties:
+      ControlPlaneIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      IPPool: {get_param: {{role}}IPs}
+      NodeIndex: {get_param: NodeIndex}
+
+  NetworkConfig:
+    type: OS::TripleO::{{role}}::Net::SoftwareConfig
+    properties:
+      ControlPlaneIp: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+      InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
+      StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
+      StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+      TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
+
+  NetIpMap:
+    type: OS::TripleO::Network::Ports::NetIpMap
+    properties:
+      ControlPlaneIp: {get_attr: [{{role}}, networks, ctlplane, 0]}
+      ExternalIp: {get_attr: [ExternalPort, ip_address]}
+      ExternalIpSubnet: {get_attr: [ExternalPort, ip_subnet]}
+      ExternalIpUri: {get_attr: [ExternalPort, ip_address_uri]}
+      InternalApiIp: {get_attr: [InternalApiPort, ip_address]}
+      InternalApiIpSubnet: {get_attr: [InternalApiPort, ip_subnet]}
+      InternalApiIpUri: {get_attr: [InternalApiPort, ip_address_uri]}
+      StorageIp: {get_attr: [StoragePort, ip_address]}
+      StorageIpSubnet: {get_attr: [StoragePort, ip_subnet]}
+      StorageIpUri: {get_attr: [StoragePort, ip_address_uri]}
+      StorageMgmtIp: {get_attr: [StorageMgmtPort, ip_address]}
+      StorageMgmtIpSubnet: {get_attr: [StorageMgmtPort, ip_subnet]}
+      StorageMgmtIpUri: {get_attr: [StorageMgmtPort, ip_address_uri]}
+      TenantIp: {get_attr: [TenantPort, ip_address]}
+      TenantIpSubnet: {get_attr: [TenantPort, ip_subnet]}
+      TenantIpUri: {get_attr: [TenantPort, ip_address_uri]}
+      ManagementIp: {get_attr: [ManagementPort, ip_address]}
+      ManagementIpSubnet: {get_attr: [ManagementPort, ip_subnet]}
+      ManagementIpUri: {get_attr: [ManagementPort, ip_address_uri]}
+
+  NetworkDeployment:
+    type: OS::TripleO::SoftwareDeployment
+    properties:
+      name: NetworkDeployment
+      config: {get_resource: NetworkConfig}
+      server: {get_resource: {{role}}}
+      actions: {get_param: NetworkDeploymentActions}
+
+  {{role}}Deployment:
+    type: OS::Heat::StructuredDeployment
+    depends_on: NetworkDeployment
+    properties:
+      name: {{role}}Deployment
+      config: {get_resource: {{role}}Config}
+      server: {get_resource: {{role}}}
+      input_values:
+        enable_package_upgrade: {get_attr: [UpdateDeployment, update_managed_packages]}
+
+  {{role}}Config:
+    type: OS::Heat::StructuredConfig
+    properties:
+      group: os-apply-config
+      config:
+        hiera:
+          hierarchy:
+            - '"%{::uuid}"'
+            - heat_config_%{::deploy_config_name}
+            - {{role.lower()}}_extraconfig
+            - extraconfig
+            - service_names
+            - service_configs
+            - bootstrap_node # provided by allNodesConfig
+            - all_nodes # provided by allNodesConfig
+            - vip_data # provided by allNodesConfig
+            - '"%{::osfamily}"'
+          merge_behavior: deeper
+          datafiles:
+            service_names:
+              mapped_data:
+                service_names: {get_param: ServiceNames}
+                sensu::subscriptions: {get_param: MonitoringSubscriptions}
+            service_configs:
+              mapped_data:
+                map_replace:
+                  - {get_param: ServiceConfigSettings}
+                  - values: {get_attr: [NetIpMap, net_ip_map]}
+            {{role.lower()}}_extraconfig:
+              mapped_data: {get_param: {{role}}ExtraConfig}
+            extraconfig:
+              mapped_data: {get_param: ExtraConfig}
+            {{role.lower()}}:
+              mapped_data:
+                tripleo::packages::enable_upgrade: {get_input: enable_package_upgrade}
+                tripleo::profile::base::logging::fluentd::fluentd_sources: {get_param: LoggingSources}
+                tripleo::profile::base::logging::fluentd::fluentd_groups: {get_param: LoggingGroups}
+
+  # Resource for site-specific injection of root certificate
+  NodeTLSCAData:
+    depends_on: {{role}}Deployment
+    type: OS::TripleO::NodeTLSCAData
+    properties:
+      server: {get_resource: {{role}}}
+
+  # Hook for site-specific additional pre-deployment config, e.g extra hieradata
+  {{role}}ExtraConfigPre:
+    depends_on: {{role}}Deployment
+    type: OS::TripleO::{{role}}ExtraConfigPre
+    properties:
+        server: {get_resource: {{role}}}
+
+  # Hook for site-specific additional pre-deployment config,
+  # applying to all nodes, e.g node registration/unregistration
+  NodeExtraConfig:
+    depends_on: [{{role}}ExtraConfigPre, NodeTLSCAData]
+    type: OS::TripleO::NodeExtraConfig
+    properties:
+        server: {get_resource: {{role}}}
+
+  UpdateConfig:
+    type: OS::TripleO::Tasks::PackageUpdate
+
+  UpdateDeployment:
+    type: OS::Heat::SoftwareDeployment
+    properties:
+      config: {get_resource: UpdateConfig}
+      server: {get_resource: {{role}}}
+      input_values:
+        update_identifier:
+          get_param: UpdateIdentifier
+
+outputs:
+  ip_address:
+    description: IP address of the server in the ctlplane network
+    value: {get_attr: [{{role}}, networks, ctlplane, 0]}
+  hostname:
+    description: Hostname of the server
+    value: {get_attr: [{{role}}, name]}
+  hostname_map:
+    description: Mapping of network names to hostnames
+    value:
+      external:
+        list_join:
+        - '.'
+        - - {get_attr: [{{role}}, name]}
+          - external
+          - {get_param: CloudDomain}
+      internal_api:
+        list_join:
+        - '.'
+        - - {get_attr: [{{role}}, name]}
+          - internalapi
+          - {get_param: CloudDomain}
+      storage:
+        list_join:
+        - '.'
+        - - {get_attr: [{{role}}, name]}
+          - storage
+          - {get_param: CloudDomain}
+      storage_mgmt:
+        list_join:
+        - '.'
+        - - {get_attr: [{{role}}, name]}
+          - storagemgmt
+          - {get_param: CloudDomain}
+      tenant:
+        list_join:
+        - '.'
+        - - {get_attr: [{{role}}, name]}
+          - tenant
+          - {get_param: CloudDomain}
+      management:
+        list_join:
+        - '.'
+        - - {get_attr: [{{role}}, name]}
+          - management
+          - {get_param: CloudDomain}
+      ctlplane:
+        list_join:
+        - '.'
+        - - {get_attr: [{{role}}, name]}
+          - ctlplane
+          - {get_param: CloudDomain}
+  hosts_entry:
+    value:
+      str_replace:
+        template: |
+          PRIMARYIP PRIMARYHOST.DOMAIN PRIMARYHOST
+          EXTERNALIP EXTERNALHOST.DOMAIN EXTERNALHOST
+          INTERNAL_APIIP INTERNAL_APIHOST.DOMAIN INTERNAL_APIHOST
+          STORAGEIP STORAGEHOST.DOMAIN STORAGEHOST
+          STORAGE_MGMTIP STORAGE_MGMTHOST.DOMAIN STORAGE_MGMTHOST
+          TENANTIP TENANTHOST.DOMAIN TENANTHOST
+          MANAGEMENTIP MANAGEMENTHOST.DOMAIN MANAGEMENTHOST
+          CTLPLANEIP CTLPLANEHOST.DOMAIN CTLPLANEHOST
+        params:
+          PRIMARYIP: {get_attr: [NetIpMap, net_ip_map, {get_param: [ServiceNetMap, {{role}}HostnameResolveNetwork]}]}
+          DOMAIN: {get_param: CloudDomain}
+          PRIMARYHOST: {get_attr: [{{role}}, name]}
+          EXTERNALIP: {get_attr: [ExternalPort, ip_address]}
+          EXTERNALHOST:
+            list_join:
+            - '.'
+            - - {get_attr: [{{role}}, name]}
+              - external
+          INTERNAL_APIIP: {get_attr: [InternalApiPort, ip_address]}
+          INTERNAL_APIHOST:
+            list_join:
+            - '.'
+            - - {get_attr: [{{role}}, name]}
+              - internalapi
+          STORAGEIP: {get_attr: [StoragePort, ip_address]}
+          STORAGEHOST:
+            list_join:
+            - '.'
+            - - {get_attr: [{{role}}, name]}
+              - storage
+          STORAGE_MGMTIP: {get_attr: [StorageMgmtPort, ip_address]}
+          STORAGE_MGMTHOST:
+            list_join:
+            - '.'
+            - - {get_attr: [{{role}}, name]}
+              - storagemgmt
+          TENANTIP: {get_attr: [TenantPort, ip_address]}
+          TENANTHOST:
+            list_join:
+            - '.'
+            - - {get_attr: [{{role}}, name]}
+              - tenant
+          MANAGEMENTIP: {get_attr: [ManagementPort, ip_address]}
+          MANAGEMENTHOST:
+            list_join:
+            - '.'
+            - - {get_attr: [{{role}}, name]}
+              - management
+          CTLPLANEIP: {get_attr: [{{role}}, networks, ctlplane, 0]}
+          CTLPLANEHOST:
+            list_join:
+            - '.'
+            - - {get_attr: [{{role}}, name]}
+              - ctlplane
+  nova_server_resource:
+    description: Heat resource handle for {{role}} server
+    value:
+      {get_resource: {{role}}}
+  external_ip_address:
+    description: IP address of the server in the external network
+    value: {get_attr: [ExternalPort, ip_address]}
+  internal_api_ip_address:
+    description: IP address of the server in the internal_api network
+    value: {get_attr: [InternalApiPort, ip_address]}
+  storage_ip_address:
+    description: IP address of the server in the storage network
+    value: {get_attr: [StoragePort, ip_address]}
+  storage_mgmt_ip_address:
+    description: IP address of the server in the storage_mgmt network
+    value: {get_attr: [StorageMgmtPort, ip_address]}
+  tenant_ip_address:
+    description: IP address of the server in the tenant network
+    value: {get_attr: [TenantPort, ip_address]}
+  management_ip_address:
+    description: IP address of the server in the management network
+    value: {get_attr: [ManagementPort, ip_address]}
index 65afffa..f4f5bad 100644 (file)
@@ -21,6 +21,11 @@ parameters:
   MonitoringSubscriptionAodhApi:
     default: 'overcloud-ceilometer-aodh-api'
     type: string
+  EnableCombinationAlarms:
+    default: false
+    description: Combination alarms are deprecated in Newton, hence disabled
+                 by default. To enable, set this parameter to true.
+    type: boolean
 
 resources:
   AodhBase:
@@ -48,6 +53,12 @@ outputs:
           - get_attr: [AodhBase, role_data, config_settings]
           - get_attr: [ApacheServiceBase, role_data, config_settings]
           - aodh::wsgi::apache::ssl: false
+            aodh::wsgi::apache::servername:
+              str_replace:
+                template:
+                  '"%{::fqdn_$NETWORK}"'
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, AodhApiNetwork]}
             aodh::api::service_name: 'httpd'
             tripleo.aodh_api.firewall_rules:
               '128 aodh-api':
@@ -62,5 +73,8 @@ outputs:
             # internal_api_subnet - > IP/CIDR
             aodh::api::host: {get_param: [ServiceNetMap, AodhApiNetwork]}
             aodh::wsgi::apache::bind_host: {get_param: [ServiceNetMap, AodhApiNetwork]}
+            tripleo::profile::base::aodh::api::enable_combination_alarms: {get_param: EnableCombinationAlarms}
+      service_config_settings:
+        get_attr: [AodhBase, role_data, service_config_settings]
       step_config: |
         include tripleo::profile::base::aodh::api
index 187345a..0e2410f 100644 (file)
@@ -59,14 +59,7 @@ outputs:
     value:
       service_name: aodh_base
       config_settings:
-        aodh::evaluator::coordination_url:
-          list_join:
-            - ''
-            - - 'redis://:'
-              - {get_param: RedisPassword}
-              - '@'
-              - "%{hiera('redis_vip')}"
-              - ':6379/'
+        aodh_redis_password: {get_param: RedisPassword}
         aodh::db::database_connection:
           list_join:
             - ''
@@ -87,18 +80,21 @@ outputs:
         aodh::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
         aodh::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
         aodh::auth::auth_password: {get_param: AodhPassword}
-        aodh::keystone::auth::public_url: {get_param: [EndpointMap, AodhPublic, uri]}
-        aodh::keystone::auth::internal_url: {get_param: [EndpointMap, AodhInternal, uri]}
-        aodh::keystone::auth::admin_url: {get_param: [EndpointMap, AodhAdmin, uri]}
-        aodh::keystone::auth::password: {get_param: AodhPassword}
-        aodh::keystone::auth::region: {get_param: KeystoneRegion}
-        aodh::keystone::auth::tenant: 'service'
-        aodh::db::mysql::user: aodh
-        aodh::db::mysql::password: {get_param: AodhPassword}
-        aodh::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        aodh::db::mysql::dbname: aodh
-        aodh::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
         aodh::auth::auth_region: 'regionOne'
         aodh::auth::auth_tenant_name: 'service'
+      service_config_settings:
+        keystone:
+          aodh::keystone::auth::public_url: {get_param: [EndpointMap, AodhPublic, uri]}
+          aodh::keystone::auth::internal_url: {get_param: [EndpointMap, AodhInternal, uri]}
+          aodh::keystone::auth::admin_url: {get_param: [EndpointMap, AodhAdmin, uri]}
+          aodh::keystone::auth::password: {get_param: AodhPassword}
+          aodh::keystone::auth::region: {get_param: KeystoneRegion}
+          aodh::keystone::auth::tenant: 'service'
+        mysql:
+          aodh::db::mysql::user: aodh
+          aodh::db::mysql::password: {get_param: AodhPassword}
+          aodh::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          aodh::db::mysql::dbname: aodh
+          aodh::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 7595e4c..c979201 100644 (file)
@@ -5,6 +5,14 @@ description: >
   automatically via other services which run via Apache.
 
 parameters:
+  ApacheMaxRequestWorkers:
+    default: 256
+    description: Maximum number of simultaneously processed requests.
+    type: number
+  ApacheServerLimit:
+    default: 256
+    description: Maximum number of Apache processes.
+    type: number
   ServiceNetMap:
     default: {}
     description: Mapping of service_name -> network name. Typically set
@@ -38,5 +46,7 @@ outputs:
             template: "NETWORK_subnet"
             params:
               NETWORK: {get_param: [ServiceNetMap, ApacheNetwork]}
+        apache::mod::prefork::maxclients: { get_param: ApacheMaxRequestWorkers }
+        apache::mod::prefork::serverlimit: { get_param: ApacheServerLimit }
         apache::mod::remoteip::proxy_ips:
           - "%{hiera('apache_remote_proxy_ips_network')}"
index 5d980d7..c4abc30 100644 (file)
@@ -25,6 +25,11 @@ parameters:
   MonitoringSubscriptionCeilometerCentral:
     default: 'overcloud-ceilometer-agent-central'
     type: string
+  CeilometerAgentCentralLoggingSource:
+    type: json
+    default:
+      tag: openstack.ceilometer.agent.central
+      path: /var/log/ceilometer/central.log
 
 resources:
   CeilometerServiceBase:
@@ -40,16 +45,12 @@ outputs:
     value:
       service_name: ceilometer_agent_central
       monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCentral}
+      logging_source: {get_param: CeilometerAgentCentralLoggingSource}
+      logging_groups:
+        - ceilometer
       config_settings:
         map_merge:
           - get_attr: [CeilometerServiceBase, role_data, config_settings]
-          - ceilometer::agent::central::coordination_url:
-              list_join:
-                - ''
-                - - 'redis://:'
-                  - {get_param: RedisPassword}
-                  - '@'
-                  - "%{hiera('redis_vip')}"
-                  - ':6379/'
+          - ceilometer_redis_password: {get_param: RedisPassword}
       step_config: |
         include ::tripleo::profile::base::ceilometer::agent::central
index bedb8b0..ea403aa 100644 (file)
@@ -21,6 +21,11 @@ parameters:
   MonitoringSubscriptionCeilometerNotification:
     default: 'overcloud-ceilometer-agent-notification'
     type: string
+  CeilometerAgentNotificationLoggingSource:
+    type: json
+    default:
+      tag: openstack.ceilometer.agent.notification
+      path: /var/log/ceilometer/agent-notification.log
 
 
 resources:
@@ -37,6 +42,9 @@ outputs:
     value:
       service_name: ceilometer_agent_notification
       monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerNotification}
+      logging_source: {get_param: CeilometerAgentNotificationLoggingSource}
+      logging_groups:
+        - ceilometer
       config_settings:
         get_attr: [CeilometerServiceBase, role_data, config_settings]
       step_config: |
index 5df9f2b..ecea38b 100644 (file)
@@ -21,6 +21,11 @@ parameters:
   MonitoringSubscriptionCeilometerApi:
     default: 'overcloud-ceilometer-api'
     type: string
+  CeilometerApiLoggingSource:
+    type: json
+    default:
+      tag: openstack.ceilometer.api
+      path: /var/log/ceilometer/api.log
 
 
 resources:
@@ -44,6 +49,9 @@ outputs:
     value:
       service_name: ceilometer_api
       monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerApi}
+      logging_source: {get_param: CeilometerApiLoggingSource}
+      logging_groups:
+        - ceilometer
       config_settings:
         map_merge:
           - get_attr: [ApacheServiceBase, role_data, config_settings]
@@ -63,5 +71,13 @@ outputs:
             ceilometer::api::host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
             ceilometer::wsgi::apache::bind_host: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
             ceilometer::wsgi::apache::ssl: false
+            ceilometer::wsgi::apache::servername:
+              str_replace:
+                template:
+                  '"%{::fqdn_$NETWORK}"'
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, CeilometerApiNetwork]}
+      service_config_settings:
+        get_attr: [CeilometerServiceBase, role_data, service_config_settings]
       step_config: |
         include ::tripleo::profile::base::ceilometer::api
index 62fdd5c..4ace752 100644 (file)
@@ -101,29 +101,32 @@ outputs:
         ceilometer::agent::auth::auth_region: {get_param: KeystoneRegion}
         ceilometer::agent::auth::auth_tenant_name: 'service'
         ceilometer::agent::auth::auth_endpoint_type: 'internalURL'
-        ceilometer::db::mysql::password: {get_param: CeilometerPassword}
         ceilometer::collector::meter_dispatcher: {get_param: CeilometerMeterDispatcher}
         ceilometer::dispatcher::gnocchi::url: {get_param: [EndpointMap, GnocchiInternal, uri]}
         ceilometer::dispatcher::gnocchi::filter_project: 'service'
         ceilometer::dispatcher::gnocchi::archive_policy: 'low'
         ceilometer::dispatcher::gnocchi::resources_definition_file: 'gnocchi_resources.yaml'
-        ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
-        ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
-        ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
-        ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
-        ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
-        ceilometer::keystone::auth::tenant: 'service'
         ceilometer::rabbit_userid: {get_param: RabbitUserName}
         ceilometer::rabbit_password: {get_param: RabbitPassword}
         ceilometer::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         ceilometer::rabbit_port: {get_param: RabbitClientPort}
-        ceilometer::db::mysql::user: ceilometer
-        ceilometer::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        ceilometer::db::mysql::dbname: ceilometer
-        ceilometer::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
         ceilometer::rabbit_heartbeat_timeout_threshold: 60
         ceilometer::db::database_db_max_retries: -1
         ceilometer::db::database_max_retries: -1
         ceilometer::telemetry_secret: {get_param: CeilometerMeteringSecret}
+      service_config_settings:
+        keystone:
+          ceilometer::keystone::auth::public_url: {get_param: [EndpointMap, CeilometerPublic, uri]}
+          ceilometer::keystone::auth::internal_url: {get_param: [EndpointMap, CeilometerInternal, uri]}
+          ceilometer::keystone::auth::admin_url: {get_param: [EndpointMap, CeilometerAdmin, uri]}
+          ceilometer::keystone::auth::password: {get_param: CeilometerPassword}
+          ceilometer::keystone::auth::region: {get_param: KeystoneRegion}
+          ceilometer::keystone::auth::tenant: 'service'
+        mysql:
+          ceilometer::db::mysql::password: {get_param: CeilometerPassword}
+          ceilometer::db::mysql::user: ceilometer
+          ceilometer::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          ceilometer::db::mysql::dbname: ceilometer
+          ceilometer::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 9dbb275..e3f1ef4 100644 (file)
@@ -21,6 +21,11 @@ parameters:
   MonitoringSubscriptionCeilometerCollector:
     default: 'overcloud-ceilometer-collector'
     type: string
+  CeilometerCollectorLoggingSource:
+    type: json
+    default:
+      tag: openstack.ceilometer.collector
+      path: /var/log/ceilometer/collector.log
 
 resources:
   CeilometerServiceBase:
@@ -30,13 +35,27 @@ resources:
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
 
+  MongoDbBase:
+    type: ./database/mongodb-base.yaml
+    properties:
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+      EndpointMap: {get_param: EndpointMap}
+
 outputs:
   role_data:
     description: Role data for the Ceilometer Collector role.
     value:
       service_name: ceilometer_collector
       monitoring_subscription: {get_param: MonitoringSubscriptionCeilometerCollector}
+      logging_source: {get_param: CeilometerCollectorLoggingSource}
+      logging_groups:
+        - ceilometer
       config_settings:
-        get_attr: [CeilometerServiceBase, role_data, config_settings]
+        map_merge:
+          - get_attr: [MongoDbBase, role_data, config_settings]
+          - get_attr: [CeilometerServiceBase, role_data, config_settings]
+      service_config_settings:
+        get_attr: [CeilometerServiceBase, role_data, service_config_settings]
       step_config: |
         include ::tripleo::profile::base::ceilometer::collector
index a2b3f13..552086a 100644 (file)
@@ -76,6 +76,9 @@ outputs:
           - get_attr: [CephBase, role_data, config_settings]
           - ceph::profile::params::ms_bind_ipv6: {get_param: CephIPv6}
             ceph::profile::params::mon_key: {get_param: CephMonKey}
+            ceph::profile::params::osd_pool_default_pg_num: 32
+            ceph::profile::params::osd_pool_default_pgp_num: 32
+            ceph::profile::params::osd_pool_default_size: 3
             # repeat returns items in a list, so we need to map_merge twice
             tripleo::profile::base::ceph::mon::ceph_pools:
               map_merge:
@@ -90,9 +93,9 @@ outputs:
                           - {get_param: GnocchiRbdPoolName}
                       template:
                         <%pool%>:
-                          pg_num: 32
-                          pgp_num: 32
-                          size: 3
+                          pg_num: "%{hiera('ceph::profile::params::osd_pool_default_pg_num')}"
+                          pgp_num: "%{hiera('ceph::profile::params::osd_pool_default_pgp_num')}"
+                          size: "%{hiera('ceph::profile::params::osd_pool_default_size')}"
                 - {get_param: CephPools}
             tripleo.ceph_mon.firewall_rules:
               '110 ceph_mon':
index 6bb4f6d..18a4b78 100644 (file)
@@ -67,11 +67,13 @@ outputs:
             tripleo.ceph_rgw.firewall_rules:
               '122 ceph rgw':
                 dport: {get_param: [EndpointMap, CephRgwInternal, port]}
-            ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]}
-            ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
-            ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
-            ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
-            ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
-            ceph::rgw::keystone::auth::tenant: 'service'
       step_config: |
         include ::tripleo::profile::base::ceph::rgw
+      service_config_settings:
+        keystone:
+          ceph::rgw::keystone::auth::public_url: {get_param: [EndpointMap, CephRgwPublic, uri]}
+          ceph::rgw::keystone::auth::internal_url: {get_param: [EndpointMap, CephRgwInternal, uri]}
+          ceph::rgw::keystone::auth::admin_url: {get_param: [EndpointMap, CephRgwAdmin, uri]}
+          ceph::rgw::keystone::auth::password: {get_param: SwiftPassword}
+          ceph::rgw::keystone::auth::region: {get_param: KeystoneRegion}
+          ceph::rgw::keystone::auth::tenant: 'service'
index 94c94a6..9c96acc 100644 (file)
@@ -34,6 +34,11 @@ parameters:
   MonitoringSubscriptionCinderApi:
     default: 'overcloud-cinder-api'
     type: string
+  CinderApiLoggingSource:
+    type: json
+    default:
+      tag: openstack.cinder.api
+      path: /var/log/cinder/cinder-api.log
 
 resources:
 
@@ -50,6 +55,9 @@ outputs:
     value:
       service_name: cinder_api
       monitoring_subscription: {get_param: MonitoringSubscriptionCinderApi}
+      logging_source: {get_param: CinderApiLoggingSource}
+      logging_groups:
+        - cinder
       config_settings:
         map_merge:
           - get_attr: [CinderBase, role_data, config_settings]
@@ -57,19 +65,8 @@ outputs:
             cinder::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
             cinder::keystone::authtoken::password: {get_param: CinderPassword}
             cinder::keystone::authtoken::project_name: 'service'
-            cinder::keystone::auth::tenant: 'service'
-            cinder::keystone::auth::public_url: {get_param: [EndpointMap, CinderPublic, uri]}
-            cinder::keystone::auth::internal_url: {get_param: [EndpointMap, CinderInternal, uri]}
-            cinder::keystone::auth::admin_url: {get_param: [EndpointMap, CinderAdmin, uri]}
-            cinder::keystone::auth::public_url_v2: {get_param: [EndpointMap, CinderV2Public, uri]}
-            cinder::keystone::auth::internal_url_v2: {get_param: [EndpointMap, CinderV2Internal, uri]}
-            cinder::keystone::auth::admin_url_v2: {get_param: [EndpointMap, CinderV2Admin, uri]}
-            cinder::keystone::auth::public_url_v3: {get_param: [EndpointMap, CinderV3Public, uri]}
-            cinder::keystone::auth::internal_url_v3: {get_param: [EndpointMap, CinderV3Internal, uri]}
-            cinder::keystone::auth::admin_url_v3: {get_param: [EndpointMap, CinderV3Admin, uri]}
-            cinder::keystone::auth::password: {get_param: CinderPassword}
-            cinder::keystone::auth::region: {get_param: KeystoneRegion}
             cinder::api::enable_proxy_headers_parsing: true
+
             cinder::api::nova_catalog_info: 'compute:Compute Service:internalURL'
             # TODO(emilien) move it to puppet-cinder
             cinder::config:
@@ -90,3 +87,25 @@ outputs:
             cinder::api::bind_host: {get_param: [ServiceNetMap, CinderApiNetwork]}
       step_config: |
         include ::tripleo::profile::base::cinder::api
+      service_config_settings:
+        keystone:
+          cinder::keystone::auth::tenant: 'service'
+          cinder::keystone::auth::public_url: {get_param: [EndpointMap, CinderPublic, uri]}
+          cinder::keystone::auth::internal_url: {get_param: [EndpointMap, CinderInternal, uri]}
+          cinder::keystone::auth::admin_url: {get_param: [EndpointMap, CinderAdmin, uri]}
+          cinder::keystone::auth::public_url_v2: {get_param: [EndpointMap, CinderV2Public, uri]}
+          cinder::keystone::auth::internal_url_v2: {get_param: [EndpointMap, CinderV2Internal, uri]}
+          cinder::keystone::auth::admin_url_v2: {get_param: [EndpointMap, CinderV2Admin, uri]}
+          cinder::keystone::auth::public_url_v3: {get_param: [EndpointMap, CinderV3Public, uri]}
+          cinder::keystone::auth::internal_url_v3: {get_param: [EndpointMap, CinderV3Internal, uri]}
+          cinder::keystone::auth::admin_url_v3: {get_param: [EndpointMap, CinderV3Admin, uri]}
+          cinder::keystone::auth::password: {get_param: CinderPassword}
+          cinder::keystone::auth::region: {get_param: KeystoneRegion}
+        mysql:
+          cinder::db::mysql::password: {get_param: CinderPassword}
+          cinder::db::mysql::user: cinder
+          cinder::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          cinder::db::mysql::dbname: cinder
+          cinder::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 0db1718..59c9b84 100644 (file)
@@ -60,20 +60,12 @@ outputs:
               - '@'
               - {get_param: [EndpointMap, MysqlInternal, host]}
               - '/cinder'
-        cinder::db::mysql::password: {get_param: CinderPassword}
         cinder::debug: {get_param: Debug}
         cinder::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         cinder::rabbit_userid: {get_param: RabbitUserName}
         cinder::rabbit_password: {get_param: RabbitPassword}
         cinder::rabbit_port: {get_param: RabbitClientPort}
-        cinder::db::mysql::user: cinder
-        cinder::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        cinder::db::mysql::dbname: cinder
-        cinder::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
         cinder::rabbit_heartbeat_timeout_threshold: 60
-        cinder::host: hostgroup
         cinder::cron::db_purge::destination: '/dev/null'
         cinder::db::database_db_max_retries: -1
         cinder::db::database_max_retries: -1
index 1326e26..94c263e 100644 (file)
@@ -21,6 +21,11 @@ parameters:
   MonitoringSubscriptionCinderScheduler:
     default: 'overcloud-cinder-scheduler'
     type: string
+  CinderSchedulerLoggingSource:
+    type: json
+    default:
+      tag: openstack.cinder.scheduler
+      path: /var/log/cinder/cinder-scheduler.log
 
 resources:
 
@@ -37,6 +42,9 @@ outputs:
     value:
       service_name: cinder_scheduler
       monitoring_subscription: {get_param: MonitoringSubscriptionCinderScheduler}
+      logging_source: {get_param: CinderSchedulerLoggingSource}
+      logging_groups:
+        - cinder
       config_settings:
         map_merge:
           - get_attr: [CinderBase, role_data, config_settings]
index c84c784..82e16f3 100644 (file)
@@ -59,6 +59,11 @@ parameters:
   MonitoringSubscriptionCinderVolume:
     default: 'overcloud-cinder-volume'
     type: string
+  CinderVolumeLoggingSource:
+    type: json
+    default:
+      tag: openstack.cinder.volume
+      path: /var/log/cinder/cinder-volume.log
 
 resources:
 
@@ -75,6 +80,9 @@ outputs:
     value:
       service_name: cinder_volume
       monitoring_subscription: {get_param: MonitoringSubscriptionCinderVolume}
+      logging_source: {get_param: CinderVolumeLoggingSource}
+      logging_groups:
+        - cinder
       config_settings:
         map_merge:
           - get_attr: [CinderBase, role_data, config_settings]
index 36962a3..01daeaf 100644 (file)
@@ -19,6 +19,15 @@ parameters:
     description: Mapping of service endpoint -> protocol. Typically set
                  via parameter_defaults in the resource registry.
     type: json
+  MongoDbLoggingSource:
+    type: json
+    description: Fluentd logging configuration for mongodb.
+    default:
+      tag: database.mongodb
+      path: /var/log/mongodb/mongodb.log
+      format: >-
+        /(?<time>\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d+\+\d{4})
+        (?<message>.*)$/
 
 resources:
   MongoDbBase:
@@ -33,6 +42,9 @@ outputs:
     description: Service mongodb using composable services.
     value:
       service_name: mongodb
+      logging_groups:
+        - mongodb
+      logging_source: {get_param: MongoDbLoggingSource}
       config_settings:
         map_merge:
           - get_attr: [MongoDbBase, role_data, config_settings]
index b0eea48..094a7c9 100644 (file)
@@ -74,5 +74,11 @@ outputs:
         # internal_api_uri -> [IP]
         # internal_api_subnet - > IP/CIDR
         mysql_bind_host: {get_param: [ServiceNetMap, MysqlNetwork]}
+        tripleo::profile::base::database::mysql::bind_address:
+          str_replace:
+            template:
+              '"%{::fqdn_$NETWORK}"'
+            params:
+              $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
       step_config: |
         include ::tripleo::profile::base::database::mysql
index adc1b4c..80ba9ae 100644 (file)
@@ -45,9 +45,16 @@ parameters:
     constraints:
     - allowed_values: ['swift', 'file', 'rbd']
   GlanceWorkers:
-    default: 0
-    description: Number of workers for Glance service.
-    type: number
+    default: ''
+    description: |
+      Number of API worker processes for Glance. If left unset (empty string), the
+      default value will result in the configuration being left unset and a
+      system-dependent default value will be chosen (e.g.: number of
+      processors). Please note that this will create a large number of
+      processes on systems with a large number of CPUs resulting in excess
+      memory consumption. It is recommended that a suitable non-default value
+      be selected on such systems.
+    type: string
   GlanceRbdPoolName:
     default: images
     type: string
@@ -76,6 +83,11 @@ parameters:
   MonitoringSubscriptionGlanceApi:
     default: 'overcloud-glance-api'
     type: string
+  GlanceApiLoggingSource:
+    type: json
+    default:
+      tag: openstack.glance.api
+      path: /var/log/glance/api.log
 
 outputs:
   role_data:
@@ -83,6 +95,9 @@ outputs:
     value:
       service_name: glance_api
       monitoring_subscription: {get_param: MonitoringSubscriptionGlanceApi}
+      logging_source: {get_param: GlanceApiLoggingSource}
+      logging_groups:
+        - glance
       config_settings:
         glance::api::database_connection:
           list_join:
@@ -101,6 +116,7 @@ outputs:
             template: "'REGISTRY_HOST'"
             params:
               REGISTRY_HOST: {get_param: [EndpointMap, GlanceRegistryInternal, host]}
+        glance::api::registry_client_protocol: {get_param: [EndpointMap, GlanceRegistryInternal, protocol] }
         glance::api::authtoken::password: {get_param: GlancePassword}
         glance::api::enable_proxy_headers_parsing: true
         glance::api::debug: {get_param: Debug}
@@ -114,16 +130,10 @@ outputs:
         glance::backend::rbd::rbd_store_pool: {get_param: GlanceRbdPoolName}
         glance::backend::rbd::rbd_store_user: {get_param: CephClientUserName}
         glance_backend: {get_param: GlanceBackend}
-        glance::db::mysql::password: {get_param: GlancePassword}
         glance::notify::rabbitmq::rabbit_userid: {get_param: RabbitUserName}
         glance::notify::rabbitmq::rabbit_port: {get_param: RabbitClientPort}
         glance::notify::rabbitmq::rabbit_password: {get_param: RabbitPassword}
         glance::notify::rabbitmq::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
-        glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
-        glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
-        glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
-        glance::keystone::auth::password: {get_param: GlancePassword }
-        glance::keystone::auth::region: {get_param: KeystoneRegion}
         glance::registry::db::database_db_max_retries: -1
         glance::registry::db::database_max_retries: -1
         tripleo.glance_api.firewall_rules:
@@ -131,7 +141,6 @@ outputs:
             dport:
               - 9292
               - 13292
-        glance::keystone::auth::tenant: 'service'
         glance::api::authtoken::project_name: 'service'
         glance::api::pipeline: 'keystone'
         glance::api::show_image_direct_url: true
@@ -144,3 +153,11 @@ outputs:
         glance::api::bind_host: {get_param: [ServiceNetMap, GlanceApiNetwork]}
       step_config: |
         include ::tripleo::profile::base::glance::api
+      service_config_settings:
+        keystone:
+          glance::keystone::auth::public_url: {get_param: [EndpointMap, GlancePublic, uri]}
+          glance::keystone::auth::internal_url: {get_param: [EndpointMap, GlanceInternal, uri]}
+          glance::keystone::auth::admin_url: {get_param: [EndpointMap, GlanceAdmin, uri]}
+          glance::keystone::auth::password: {get_param: GlancePassword }
+          glance::keystone::auth::region: {get_param: KeystoneRegion}
+          glance::keystone::auth::tenant: 'service'
index d5f01d4..30df67f 100644 (file)
@@ -27,12 +27,24 @@ parameters:
     type: string
     hidden: true
   GlanceWorkers:
-    default: 0
-    description: Number of workers for Glance service.
-    type: number
+    default: ''
+    description: |
+      Number of worker processes for glance registry. If left unset (empty
+      string), the default value will result in the configuration being left
+      unset and a system-dependent default value will be chosen (e.g.: number of
+      processors). Please note that this will create a large number of processes
+      on systems with a large number of CPUs resulting in excess memory
+      consumption. It is recommended that a suitable non-default value be
+      selected on such systems.
+    type: string
   MonitoringSubscriptionGlanceRegistry:
     default: 'overcloud-glance-registry'
     type: string
+  GlanceRegistryLoggingSource:
+    type: json
+    default:
+      tag: openstack.glance.registry
+      path: /var/log/glance/registry.log
 
 outputs:
   role_data:
@@ -40,6 +52,9 @@ outputs:
     value:
       service_name: glance_registry
       monitoring_subscription: {get_param: MonitoringSubscriptionGlanceRegistry}
+      logging_source: {get_param: GlanceRegistryLoggingSource}
+      logging_groups:
+        - glance
       config_settings:
         glance::registry::database_connection:
           list_join:
@@ -57,12 +72,6 @@ outputs:
         glance::registry::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
         glance::registry::debug: {get_param: Debug}
         glance::registry::workers: {get_param: GlanceWorkers}
-        glance::db::mysql::user: glance
-        glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        glance::db::mysql::dbname: glance
-        glance::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
         glance::registry::db::database_db_max_retries: -1
         glance::registry::db::database_max_retries: -1
         tripleo.glance_registry.firewall_rules:
@@ -78,3 +87,12 @@ outputs:
         glance::registry::bind_host: {get_param: [ServiceNetMap, GlanceRegistryNetwork]}
       step_config: |
         include ::tripleo::profile::base::glance::registry
+      service_config_settings:
+        mysql:
+          glance::db::mysql::password: {get_param: GlancePassword}
+          glance::db::mysql::user: glance
+          glance::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          glance::db::mysql::dbname: glance
+          glance::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 650865e..1512179 100644 (file)
@@ -36,6 +36,11 @@ parameters:
   MonitoringSubscriptionGnocchiApi:
     default: 'overcloud-gnocchi-api'
     type: string
+  GnocchiApiLoggingSource:
+    type: json
+    default:
+      tag: openstack.gnocchi.api
+      path: /var/log/gnocchi/app.log
 
 resources:
 
@@ -59,6 +64,9 @@ outputs:
     value:
       service_name: gnocchi_api
       monitoring_subscription: {get_param: MonitoringSubscriptionGnocchiApi}
+      logging_source: {get_param: GnocchiApiLoggingSource}
+      logging_groups:
+        - gnocchi
       config_settings:
         map_merge:
           - get_attr: [ApacheServiceBase, role_data, config_settings]
@@ -70,17 +78,17 @@ outputs:
                   - 13041
             gnocchi::api::enabled: true
             gnocchi::api::service_name: 'httpd'
-            gnocchi::keystone::auth::admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] }
-            gnocchi::keystone::auth::internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]}
-            gnocchi::keystone::auth::password: {get_param: GnocchiPassword}
-            gnocchi::keystone::auth::public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] }
-            gnocchi::keystone::auth::region: {get_param: KeystoneRegion}
-            gnocchi::keystone::auth::tenant: 'service'
             gnocchi::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
             gnocchi::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
             gnocchi::keystone::authtoken::password: {get_param: GnocchiPassword}
             gnocchi::keystone::authtoken::project_name: 'service'
             gnocchi::wsgi::apache::ssl: false
+            gnocchi::wsgi::apache::servername:
+              str_replace:
+                template:
+                  '"%{::fqdn_$NETWORK}"'
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, GnocchiApiNetwork]}
             tripleo::profile::base::gnocchi::api::gnocchi_backend: {get_param: GnocchiBackend}
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
@@ -96,3 +104,19 @@ outputs:
             gnocchi::storage::swift::swift_authurl: {get_param: [EndpointMap, KeystoneInternal, uri]}
       step_config: |
         include ::tripleo::profile::base::gnocchi::api
+      service_config_settings:
+        keystone:
+          gnocchi::keystone::auth::admin_url: { get_param: [ EndpointMap, GnocchiAdmin, uri ] }
+          gnocchi::keystone::auth::internal_url: {get_param: [EndpointMap, GnocchiInternal, uri]}
+          gnocchi::keystone::auth::password: {get_param: GnocchiPassword}
+          gnocchi::keystone::auth::public_url: { get_param: [ EndpointMap, GnocchiPublic, uri ] }
+          gnocchi::keystone::auth::region: {get_param: KeystoneRegion}
+          gnocchi::keystone::auth::tenant: 'service'
+        mysql:
+          gnocchi::db::mysql::password: {get_param: GnocchiPassword}
+          gnocchi::db::mysql::user: gnocchi
+          gnocchi::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          gnocchi::db::mysql::dbname: gnocchi
+          gnocchi::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 9f114ac..556baae 100644 (file)
@@ -56,6 +56,7 @@ outputs:
       service_name: gnocchi_base
       config_settings:
         #Gnocchi engine
+        gnocchi_redis_password: {get_param: RedisPassword}
         gnocchi::debug: {get_param: Debug}
         gnocchi::db::database_connection:
           list_join:
@@ -66,16 +67,7 @@ outputs:
               - '@'
               - {get_param: [EndpointMap, MysqlInternal, host]}
               - '/gnocchi'
-        gnocchi::db::mysql::password: {get_param: GnocchiPassword}
         gnocchi::db::sync::extra_opts: '--skip-storage --create-legacy-resource-types'
-        gnocchi::storage::coordination_url:
-          list_join:
-            - ''
-            - - 'redis://:'
-              - {get_param: RedisPassword}
-              - '@'
-              - "%{hiera('redis_vip')}"
-              - ':6379/'
         gnocchi::storage::swift::swift_user: 'service:gnocchi'
         gnocchi::storage::swift::swift_auth_version: 2
         gnocchi::storage::swift::swift_key: {get_param: GnocchiPassword}
@@ -94,9 +86,3 @@ outputs:
         gnocchi::statsd::project_id: '6c38cd8d-099a-4cb2-aecf-17be688e8616'
         gnocchi::statsd::flush_delay: 10
         gnocchi::statsd::archive_policy_name: 'low'
-        gnocchi::db::mysql::user: gnocchi
-        gnocchi::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        gnocchi::db::mysql::dbname: gnocchi
-        gnocchi::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
index ebdebd1..1400bc9 100644 (file)
@@ -21,6 +21,10 @@ parameters:
   MonitoringSubscriptionGnocchiMetricd:
     default: 'overcloud-gnocchi-metricd'
     type: string
+  GnocchiMetricdWorkers:
+    default: ''
+    description: Number of workers for Gnocchi MetricD
+    type: string
 
 resources:
   GnocchiServiceBase:
@@ -39,5 +43,6 @@ outputs:
       config_settings:
         map_merge:
           - get_attr: [GnocchiServiceBase, role_data, config_settings]
+          - gnocchi::metricd::workers: {get_param: GnocchiMetricdWorkers}
       step_config: |
         include ::tripleo::profile::base::gnocchi::metricd
index 61a6907..a47fec5 100644 (file)
@@ -33,6 +33,11 @@ parameters:
   MonitoringSubscriptionHeatApiCnf:
     default: 'overcloud-heat-api-cfn'
     type: string
+  HeatApiCfnLoggingSource:
+    type: json
+    default:
+      tag: openstack.heat.api.cfn
+      path: /var/log/heat/heat-api-cfn.log
 
 resources:
   HeatBase:
@@ -48,16 +53,13 @@ outputs:
     value:
       service_name: heat_api_cfn
       monitoring_subscription: {get_param: MonitoringSubscriptionHeatApiCnf}
+      logging_source: {get_param: HeatApiCfnLoggingSource}
+      logging_groups:
+        - heat
       config_settings:
         map_merge:
           - get_attr: [HeatBase, role_data, config_settings]
           - heat::api_cfn::workers: {get_param: HeatWorkers}
-            heat::keystone::auth_cfn::tenant: 'service'
-            heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
-            heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
-            heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
-            heat::keystone::auth_cfn::password: {get_param: HeatPassword}
-            heat::keystone::auth::region: {get_param: KeystoneRegion}
             tripleo.heat_api_cfn.firewall_rules:
               '125 heat_cfn':
                 dport:
@@ -72,3 +74,11 @@ outputs:
             heat::api_cfn::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
       step_config: |
         include ::tripleo::profile::base::heat::api_cfn
+      service_config_settings:
+        keystone:
+          heat::keystone::auth_cfn::tenant: 'service'
+          heat::keystone::auth_cfn::public_url: {get_param: [EndpointMap, HeatCfnPublic, uri]}
+          heat::keystone::auth_cfn::internal_url: {get_param: [EndpointMap, HeatCfnInternal, uri]}
+          heat::keystone::auth_cfn::admin_url: {get_param: [EndpointMap, HeatCfnAdmin, uri]}
+          heat::keystone::auth_cfn::password: {get_param: HeatPassword}
+          heat::keystone::auth::region: {get_param: KeystoneRegion}
index c12e56e..6dfeaaf 100644 (file)
@@ -25,6 +25,11 @@ parameters:
   MonitoringSubscriptionHeatApiCloudwatch:
     default: 'overcloud-heat-api-cloudwatch'
     type: string
+  HeatApiCloudwatchLoggingSource:
+    type: json
+    default:
+      tag: openstack.heat.api.cloudwatch
+      path: /var/log/heat/heat-api-cloudwatch.log
 
 resources:
   HeatBase:
@@ -40,6 +45,9 @@ outputs:
     value:
       service_name: heat_api_cloudwatch
       monitoring_subscription: {get_param: MonitoringSubscriptionHeatApiCloudwatch}
+      logging_source: {get_param: HeatApiCloudwatchLoggingSource}
+      logging_groups:
+        - heat
       config_settings:
         map_merge:
           - get_attr: [HeatBase, role_data, config_settings]
index 64b0c53..2ea96fc 100644 (file)
@@ -33,6 +33,11 @@ parameters:
   MonitoringSubscriptionHeatApi:
     default: 'overcloud-heat-api'
     type: string
+  HeatApiLoggingSource:
+    type: json
+    default:
+      tag: openstack.heat.api
+      path: /var/log/heat/heat-api.log
 
 resources:
   HeatBase:
@@ -48,16 +53,13 @@ outputs:
     value:
       service_name: heat_api
       monitoring_subscription: {get_param: MonitoringSubscriptionHeatApi}
+      logging_source: {get_param: HeatApiLoggingSource}
+      logging_groups:
+        - heat
       config_settings:
         map_merge:
           - get_attr: [HeatBase, role_data, config_settings]
           - heat::api::workers: {get_param: HeatWorkers}
-            heat::keystone::auth::tenant: 'service'
-            heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
-            heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
-            heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
-            heat::keystone::auth::password: {get_param: HeatPassword}
-            heat::keystone::auth::region: {get_param: KeystoneRegion}
             tripleo.heat_api.firewall_rules:
               '125 heat_api':
                 dport:
@@ -72,3 +74,11 @@ outputs:
             heat::api::bind_host: {get_param: [ServiceNetMap, HeatApiNetwork]}
       step_config: |
         include ::tripleo::profile::base::heat::api
+      service_config_settings:
+        keystone:
+          heat::keystone::auth::tenant: 'service'
+          heat::keystone::auth::public_url: {get_param: [EndpointMap, HeatPublic, uri]}
+          heat::keystone::auth::internal_url: {get_param: [EndpointMap, HeatInternal, uri]}
+          heat::keystone::auth::admin_url: {get_param: [EndpointMap, HeatAdmin, uri]}
+          heat::keystone::auth::password: {get_param: HeatPassword}
+          heat::keystone::auth::region: {get_param: KeystoneRegion}
index 089bf53..24c3636 100644 (file)
@@ -43,6 +43,11 @@ parameters:
   MonitoringSubscriptionHeatEngine:
     default: 'overcloud-heat-engine'
     type: string
+  HeatEngineLoggingSource:
+    type: json
+    default:
+      tag: openstack.heat.engine
+      path: /var/log/heat/heat-engine.log
 
 resources:
   HeatBase:
@@ -58,6 +63,9 @@ outputs:
     value:
       service_name: heat_engine
       monitoring_subscription: {get_param: MonitoringSubscriptionHeatEngine}
+      logging_source: {get_param: HeatEngineLoggingSource}
+      logging_groups:
+        - heat
       config_settings:
         map_merge:
           - get_attr: [HeatBase, role_data, config_settings]
@@ -75,14 +83,7 @@ outputs:
                   - {get_param: [EndpointMap, MysqlInternal, host]}
                   - '/heat'
             heat::keystone_ec2_uri: {get_param: [EndpointMap, KeystoneEC2, uri]}
-            heat::db::mysql::password: {get_param: HeatPassword}
             heat::keystone::domain::domain_password: {get_param: HeatStackDomainAdminPassword}
-            heat::db::mysql::user: heat
-            heat::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-            heat::db::mysql::dbname: heat
-            heat::db::mysql::allowed_hosts:
-              - '%'
-              - "%{hiera('mysql_bind_host')}"
             heat::engine::auth_encryption_key:
               yaql:
                 expression: $.data.passwords.where($ != '').first()
@@ -92,3 +93,13 @@ outputs:
                     - {get_param: [DefaultPasswords, heat_auth_encryption_key]}
       step_config: |
         include ::tripleo::profile::base::heat::engine
+
+      service_config_settings:
+        mysql:
+          heat::db::mysql::password: {get_param: HeatPassword}
+          heat::db::mysql::user: heat
+          heat::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          heat::db::mysql::dbname: heat
+          heat::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 5c3f370..c8a2e83 100644 (file)
@@ -58,12 +58,6 @@ outputs:
             ironic::api::port: {get_param: [EndpointMap, IronicInternal, port]}
             # This is used to build links in responses
             ironic::api::public_endpoint: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
-            ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri_no_suffix]}
-            ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
-            ironic::keystone::auth::public_url: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
-            ironic::keystone::auth::auth_name: 'ironic'
-            ironic::keystone::auth::password: {get_param: IronicPassword }
-            ironic::keystone::auth::tenant: 'service'
             tripleo.ironic_api.firewall_rules:
               '133 ironic api':
                 dport:
@@ -71,3 +65,19 @@ outputs:
                   - 13385
       step_config: |
         include ::tripleo::profile::base::ironic::api
+      service_config_settings:
+        keystone:
+          ironic::keystone::auth::admin_url: {get_param: [EndpointMap, IronicAdmin, uri_no_suffix]}
+          ironic::keystone::auth::internal_url: {get_param: [EndpointMap, IronicInternal, uri_no_suffix]}
+          ironic::keystone::auth::public_url: {get_param: [EndpointMap, IronicPublic, uri_no_suffix]}
+          ironic::keystone::auth::auth_name: 'ironic'
+          ironic::keystone::auth::password: {get_param: IronicPassword }
+          ironic::keystone::auth::tenant: 'service'
+        mysql:
+          ironic::db::mysql::password: {get_param: IronicPassword}
+          ironic::db::mysql::user: ironic
+          ironic::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          ironic::db::mysql::dbname: ironic
+          ironic::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 2f242da..0ff393c 100644 (file)
@@ -65,12 +65,5 @@ outputs:
         ironic::rabbit_password: {get_param: RabbitPassword}
         ironic::rabbit_port: {get_param: RabbitClientPort}
         ironic::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
-        ironic::db::mysql::password: {get_param: IronicPassword}
-        ironic::db::mysql::user: ironic
-        ironic::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        ironic::db::mysql::dbname: ironic
-        ironic::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
       step_config: |
         include ::tripleo::profile::base::ironic
index 2b069d6..38cfbe2 100644 (file)
@@ -41,5 +41,8 @@ outputs:
       config_settings:
         tripleo::keepalived::control_virtual_interface: {get_param: ControlVirtualInterface}
         tripleo::keepalived::public_virtual_interface: {get_param: PublicVirtualInterface}
+        tripleo.keepalived.firewall_rules:
+          '106 keepalived vrrp':
+            proto: vrrp
       step_config: |
         include ::tripleo::profile::base::keepalived
index b321ecb..e353163 100644 (file)
@@ -93,6 +93,11 @@ parameters:
   KeystoneCredential1:
     type: string
     description: The second Keystone credential key. Must be a valid key.
+  KeystoneLoggingSource:
+    type: json
+    default:
+      tag: openstack.keystone
+      path: /var/log/keystone/keystone.log
 
 resources:
 
@@ -109,7 +114,9 @@ outputs:
     value:
       service_name: keystone
       monitoring_subscription: {get_param: MonitoringSubscriptionKeystone}
-      config_settings:
+      logging_source: {get_param: KeystoneLoggingSource}
+      logging_groups:
+        - keystone
       config_settings:
         map_merge:
           - get_attr: [ApacheServiceBase, role_data, config_settings]
@@ -134,7 +141,6 @@ outputs:
               '/etc/keystone/credential-keys/1':
                 content: {get_param: KeystoneCredential1}
             keystone::debug: {get_param: Debug}
-            keystone::db::mysql::password: {get_param: AdminToken}
             keystone::rabbit_userid: {get_param: RabbitUserName}
             keystone::rabbit_password: {get_param: RabbitPassword}
             keystone::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -148,13 +154,6 @@ outputs:
             keystone::endpoint::admin_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
             keystone::endpoint::region: {get_param: KeystoneRegion}
             keystone_enable_db_purge: {get_param: KeystoneEnableDBPurge}
-            keystone::public_endpoint: {get_param: [EndpointMap, KeystonePublic, uri_no_suffix]}
-            keystone::db::mysql::user: keystone
-            keystone::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-            keystone::db::mysql::dbname: keystone
-            keystone::db::mysql::allowed_hosts:
-              - '%'
-              - "%{hiera('mysql_bind_host')}"
             keystone::rabbit_heartbeat_timeout_threshold: 60
             keystone::cron::token_flush::maxdelay: 3600
             keystone::roles::admin::service_tenant: 'service'
@@ -165,7 +164,18 @@ outputs:
                 value: 'keystone.contrib.ec2.backends.sql.Ec2'
             keystone::service_name: 'httpd'
             keystone::wsgi::apache::ssl: false
-    
+            keystone::wsgi::apache::servername:
+              str_replace:
+                template:
+                  '"%{::fqdn_$NETWORK}"'
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, KeystonePublicApiNetwork]}
+            keystone::wsgi::apache::servername_admin:
+              str_replace:
+                template:
+                  '"%{::fqdn_$NETWORK}"'
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
             keystone::wsgi::apache::workers: {get_param: KeystoneWorkers}
             # override via extraconfig:
             keystone::wsgi::apache::threads: 1
@@ -191,3 +201,12 @@ outputs:
             keystone::wsgi::apache::admin_bind_host: {get_param: [ServiceNetMap, KeystoneAdminApiNetwork]}
       step_config: |
         include ::tripleo::profile::base::keystone
+      service_config_settings:
+        mysql:
+          keystone::db::mysql::password: {get_param: AdminToken}
+          keystone::db::mysql::user: keystone
+          keystone::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          keystone::db::mysql::dbname: keystone
+          keystone::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/logging/fluentd-base.yaml b/puppet/services/logging/fluentd-base.yaml
new file mode 100644 (file)
index 0000000..c8f6755
--- /dev/null
@@ -0,0 +1,37 @@
+heat_template_version: 2016-04-08
+
+description: Fluentd base service
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: >
+      Mapping of service endpoint -> protocol. Typically set
+      via parameter_defaults in the resource registry.
+    type: json
+
+
+outputs:
+  role_data:
+    description: Role data for the Fluentd role.
+    value:
+      service_name: fluentd_base
+      config_settings:
+        fluentd::package_name: fluentd
+        fluentd::service_name: fluentd
+        fluentd::config_file: /etc/fluentd/fluent.conf
+        fluentd::config_owner: fluentd
+        fluentd::config_group: fluentd
+        fluentd::config_path: /etc/fluentd/config.d
+        fluentd::plugin_provider: yum
+        fluentd::service_provider: systemd
+        fluentd::repo_install: false
diff --git a/puppet/services/logging/fluentd-client.yaml b/puppet/services/logging/fluentd-client.yaml
new file mode 100644 (file)
index 0000000..3ae7110
--- /dev/null
@@ -0,0 +1,64 @@
+heat_template_version: 2016-10-14
+
+description: Fluentd client configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: >
+      Mapping of service endpoint -> protocol. Typically set
+      via parameter_defaults in the resource registry.
+    type: json
+
+resources:
+  FluentdBase:
+    type: ./fluentd-base.yaml
+    properties:
+      EndpointMap: {get_param: EndpointMap}
+      ServiceNetMap: {get_param: ServiceNetMap}
+      DefaultPasswords: {get_param: DefaultPasswords}
+
+  LoggingConfiguration:
+    type: OS::TripleO::LoggingConfiguration
+
+outputs:
+  role_data:
+    description: Role data for the Fluentd client role.
+    value:
+      service_name: fluentd_client
+      config_settings:
+        map_merge:
+          - get_attr: [FluentdBase, role_data, config_settings]
+          - tripleo::profile::base::logging::fluentd::fluentd_servers:
+              get_attr: [LoggingConfiguration, LoggingServers]
+            tripleo::profile::base::logging::fluentd::fluentd_filters:
+              yaql:
+                expression: >
+                  $.data.filters.flatten().where($)
+                data:
+                  filters:
+                    - get_attr: [LoggingConfiguration, LoggingDefaultFilters]
+                    - get_attr: [LoggingConfiguration, LoggingExtraFilters]
+            tripleo::profile::base::logging::fluentd::fluentd_pos_file_path:
+              get_attr: [LoggingConfiguration, LoggingPosFilePath]
+            tripleo::profile::base::logging::fluentd::fluentd_use_ssl:
+              get_attr: [LoggingConfiguration, LoggingUsesSSL]
+            tripleo::profile::base::logging::fluentd::fluentd_ssl_certificate:
+              get_attr: [LoggingConfiguration, LoggingSSLCertificate]
+            tripleo::profile::base::logging::fluentd::fluentd_ssl_key:
+              get_attr: [LoggingConfiguration, LoggingSSLKey]
+            tripleo::profile::base::logging::fluentd::fluentd_ssl_key_passphrase:
+              get_attr: [LoggingConfiguration, LoggingSSLKeyPassphrase]
+            tripleo::profile::base::logging::fluentd::fluentd_shared_key:
+              get_attr: [LoggingConfiguration, LoggingSharedKey]
+      step_config: |
+        include ::tripleo::profile::base::logging::fluentd
diff --git a/puppet/services/logging/fluentd-config.yaml b/puppet/services/logging/fluentd-config.yaml
new file mode 100644 (file)
index 0000000..58b423f
--- /dev/null
@@ -0,0 +1,154 @@
+heat_template_version: 2016-10-14
+
+description: Fluentd logging configuration
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: >
+      Mapping of service endpoint -> protocol. Typically set
+      via parameter_defaults in the resource registry.
+    type: json
+  LoggingDefaultFormat:
+    description: >
+      Default format used to parse messages from log files.
+    type: string
+    default: >-
+      /(?<time>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d+)
+      (?<pid>\d+)
+      (?<priority>\S+)
+      (?<message>.*)$/
+  LoggingPosFilePath:
+    description: >
+      Directory in which to place fluentd pos_file files (used to track
+      file position for the 'tail' input type).
+    type: string
+    default: /var/cache/fluentd
+  LoggingDefaultGroups:
+    description: >
+      Make fluentd user a member of these groups. Only override this parameter
+      if you want to modify the default list of groups.  Use
+      LoggingExtraGroups to add the fluentd user to additional groups.
+    type: comma_delimited_list
+    default:
+      - root
+  LoggingExtraGroups:
+    description: >
+      Make fluentd user a member of these groups (in addition to
+      LoggingDefaultGroups and the groups provided by individual
+      composable services).
+    type: comma_delimited_list
+    default: []
+  LoggingServers:
+    description: |
+      A list of destinations to which fluentd will forward log messages.  Expects
+      a list of dictionaries of the form:
+
+          - host: loghost1.example.com
+            port: 24224
+          - host: loghost2.example.com
+            port: 24224
+    type: json
+    default: []
+  LoggingDefaultFilters:
+    description: >
+      A list of fluentd default filters. This will be passed verbatim
+      to the 'filter' key of a fluentd::config resource.  Only override this
+      if you do not want the default set of filters; use LoggingExtraFilters
+      if you just want to add additional servers.
+    type: json
+    default:
+      - tag_pattern: '**'
+        type: record_transformer
+        record:
+          host: '${hostname}'
+
+      - tag_pattern: 'openstack.**'
+        type: record_transformer
+        record:
+          component: '${tag_parts[1]}'
+  LoggingExtraFilters:
+    description: >
+      A list of additional fluentd filters. This will be passed
+      verbatim to the 'filter' key of a fluentd::config resource.
+    type: json
+    default: []
+  LoggingUsesSSL:
+    description: >
+      A boolean value indicating whether or not we should forward log messages
+      use the secure_forward plugin.
+    type: boolean
+    default: false
+  LoggingSSLCertificate:
+    description: >
+      PEM-encoded SSL CA certificate for fluentd.
+    type: string
+    default: ""
+  LoggingSSLKey:
+    description: >
+      PEM-encoded key for fluentd CA certificate (used by in_secure_forward).
+    type: string
+    default: ""
+  LoggingSSLKeyPassphrase:
+    description: >
+      Passphrase for LoggingSSLKey (used by in_secure_forward).
+    type: string
+    default: ""
+  LoggingSharedKey:
+    description: >
+      Shared secret for fluentd secure-forward plugin.
+    type: string
+    default: ""
+  LoggingDefaultSources:
+    description: >
+      A list of default logging sources for fluentd.  You should only override
+      this parameter if you wish to disable the default logging sources.  Use
+      LoggingExtraSources to define additional source configurations.
+    type: json
+    default: []
+  LoggingExtraSources:
+    description: >
+      A list of additional logging sources for fluentd.  These will be combined
+      with the LoggingDefaultSources and any logging sources defined by
+      composable services.
+    type: json
+    default: []
+
+outputs:
+  LoggingDefaultFormat:
+    value: {get_param: LoggingDefaultFormat}
+  LoggingDefaultFilters:
+    value: {get_param: LoggingDefaultFilters}
+  LoggingExtraFilters:
+    value: {get_param: LoggingExtraFilters}
+  LoggingDefaultGroups:
+    value: {get_param: LoggingDefaultGroups}
+  LoggingExtraGroups:
+    value: {get_param: LoggingExtraGroups}
+  LoggingPosFilePath:
+    value: {get_param: LoggingPosFilePath}
+  LoggingSSLCertificate:
+    value: {get_param: LoggingSSLCertificate}
+  LoggingSSLKey:
+    value: {get_param: LoggingSSLKey}
+  LoggingSSLKeyPassphrase:
+    value: {get_param: LoggingSSLKeyPassphrase}
+  LoggingServers:
+    value: {get_param: LoggingServers}
+  LoggingSharedKey:
+    value: {get_param: LoggingSharedKey}
+  LoggingUsesSSL:
+    value: {get_param: LoggingUsesSSL}
+  LoggingDefaultSources:
+    value: {get_param: LoggingDefaultSources}
+  LoggingExtraSources:
+    value: {get_param: LoggingExtraSources}
index 1513ab3..4d3fd47 100644 (file)
@@ -51,14 +51,6 @@ outputs:
             manila::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri]}
             manila::keystone::authtoken::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
             manila::keystone::authtoken::project_name: 'service'
-            manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]}
-            manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]}
-            manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]}
-            manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]}
-            manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]}
-            manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]}
-            manila::keystone::auth::password: {get_param: ManilaPassword }
-            manila::keystone::auth::region: {get_param: KeystoneRegion }
             # NOTE: bind IP is found in Heat replacing the network name with the
             # local node IP for the given network; replacement examples
             # (eg. for internal_api):
@@ -69,4 +61,22 @@ outputs:
             manila::api::enable_proxy_headers_parsing: true
       step_config: |
         include ::tripleo::profile::base::manila::api
-
+      service_config_settings:
+        keystone:
+          manila::keystone::auth::tenant: 'service'
+          manila::keystone::auth::public_url: {get_param: [EndpointMap, ManilaV1Public, uri]}
+          manila::keystone::auth::internal_url: {get_param: [EndpointMap, ManilaV1Internal, uri]}
+          manila::keystone::auth::admin_url: {get_param: [EndpointMap, ManilaV1Admin, uri]}
+          manila::keystone::auth::public_url_v2: {get_param: [EndpointMap, ManilaPublic, uri]}
+          manila::keystone::auth::internal_url_v2: {get_param: [EndpointMap, ManilaInternal, uri]}
+          manila::keystone::auth::admin_url_v2: {get_param: [EndpointMap, ManilaAdmin, uri]}
+          manila::keystone::auth::password: {get_param: ManilaPassword}
+          manila::keystone::auth::region: {get_param: KeystoneRegion}
+        mysql:
+          manila::db::mysql::password: {get_param: ManilaPassword}
+          manila::db::mysql::user: manila
+          manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          manila::db::mysql::dbname: manila
+          manila::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
diff --git a/puppet/services/manila-backend-cephfs.yaml b/puppet/services/manila-backend-cephfs.yaml
new file mode 100644 (file)
index 0000000..37b0a1d
--- /dev/null
@@ -0,0 +1,61 @@
+heat_template_version: 2016-04-08
+
+description: >
+  Openstack Manila Cephfs backend
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  # CephFS Native backend params:
+  ManilaCephFSNativeEnableBackend:
+    type: boolean
+    default: false
+  ManilaCephFSNativeBackendName:
+    type: string
+    default: cephfsnative
+  ManilaCephFSNativeDriverHandlesShareServers:
+    type: boolean
+    default: false
+  ManilaCephFSNativeShareBackendName:
+    type: string
+    default: 'cephfs'
+  ManilaCephFSNativeCephFSConfPath:
+    type: string
+    default: '/etc/ceph/ceph.conf'
+  ManilaCephFSNativeCephFSAuthId:
+    type: string
+    default: 'manila'
+  ManilaCephFSNativeCephFSClusterName:
+    type: string
+    default: 'ceph'
+  ManilaCephFSNativeCephFSEnableSnapshots:
+    type: boolean
+    default: true
+
+outputs:
+  role_data:
+    description: Role data for the Manila Cephfs backend.
+    value:
+      service_name: manila_backend_cephfs
+      config_settings:
+        manila::backend::cephfsnative::enable_backend: {get_param: ManilaCephFSNativeEnableBackend}
+        manila::backend::cephfsnative::title: {get_param: ManilaCephFSNativeBackendName}
+        manila::backend::cephfsnative::driver_handles_share_servers: {get_param: ManilaCephFSNativeDriverHandlesShareServers}
+        manila::backend::cephfsnative::share_backend_name: {get_param: ManilaCephFSNativeShareBackendName}
+        manila::backend::cephfsnative::cephfs_conf_path: {get_param: ManilaCephFSNativeCephFSConfPath}
+        manila::backend::cephfsnative::cephfs_auth_id: {get_param: ManilaCephFSNativeCephFSAuthId}
+        manila::backend::cephfsnative::cephfs_cluster_name: {get_param: ManilaCephFSNativeCephFSClusterName}
+        manila::backend::cephfsnative::cephfs_enable_snapshots: {get_param: ManilaCephFSNativeCephFSEnableSnapshots}
+      step_config:
diff --git a/puppet/services/manila-backend-generic.yaml b/puppet/services/manila-backend-generic.yaml
new file mode 100644 (file)
index 0000000..5c001c8
--- /dev/null
@@ -0,0 +1,93 @@
+heat_template_version: 2016-04-08
+
+description: >
+  Openstack Manila generic backend.
+
+parameters:
+  ManilaGenericEnableBackend:
+    type: boolean
+    default: false
+  ManilaGenericBackendName:
+    type: string
+    default: tripleo_generic
+  ManilaGenericDriverHandlesShareServers:
+    type: string
+    default: true
+  ManilaGenericSmbTemplateConfigPath:
+    type: string
+    default: '$state_path/smb.conf'
+  ManilaGenericVolumeNameTemplate:
+    type: string
+    default: 'manila-share-%s'
+  ManilaGenericVolumeSnapshotNameTemplate:
+    type: string
+    default: 'manila-snapshot-%s'
+  ManilaGenericShareMountPath:
+    type: string
+    default: '/shares'
+  ManilaGenericMaxTimeToCreateVolume:
+    type: string
+    default: '180'
+  ManilaGenericMaxTimeToAttach:
+    type: string
+    default: '120'
+  ManilaGenericServiceInstanceSmbConfigPath:
+    type: string
+    default: '$share_mount_path/smb.conf'
+  ManilaGenericShareVolumeFsType:
+    type: string
+    default: 'ext4'
+  ManilaGenericCinderVolumeType:
+    type: string
+    default: ''
+  ManilaServiceInstanceUser:
+    type: string
+    default: ''
+  ManilaServiceInstancePassword: #SET THIS via parameter_defaults
+    type: string
+    hidden: true
+  ManilaServiceInstanceFlavorId:
+    type: number
+    default: 1
+  ManilaServiceNetworkCidr:
+    type: string
+    default: '172.16.0.0/16'
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    type: json
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+
+outputs:
+  role_data:
+    description: Role data for the Manila Generic backend.
+    value:
+      service_name: manila_backend_generic
+      config_settings:
+        manila_generic_enable_backend: {get_param: ManilaGenericEnableBackend}
+        manila::backend::generic::title: {get_param: ManilaGenericBackendName}
+        manila::backend::generic::driver_handles_share_servers: {get_param: ManilaGenericDriverHandlesShareServers}
+        manila::backend::generic::smb_template_config_path: {get_param: ManilaGenericSmbTemplateConfigPath}
+        manila::backend::generic::volume_name_template: {get_param: ManilaGenericVolumeNameTemplate}
+        manila::backend::generic::volume_snapshot_name_template: {get_param: ManilaGenericVolumeSnapshotNameTemplate}
+        manila::backend::generic::share_mount_path: {get_param: ManilaGenericShareMountPath}
+        manila::backend::generic::max_time_to_create_volume: {get_param: ManilaGenericMaxTimeToCreateVolume}
+        manila::backend::generic::max_time_to_attach: {get_param: ManilaGenericMaxTimeToAttach}
+        manila::backend::generic::service_instance_smb_config_path: {get_param: ManilaGenericServiceInstanceSmbConfigPath}
+        manila::backend::generic::share_volume_fstype: {get_param: ManilaGenericShareVolumeFsType}
+        manila::backend::generic::cinder_volume_type: {get_param: ManilaGenericCinderVolumeType}
+        manila::service_instance::service_instance_user: {get_param: ManilaServiceInstanceUser}
+        manila::service_instance::service_instance_password: {get_param: ManilaServiceInstancePassword}
+        manila::service_instance::service_instance_flavor_id: {get_param: ManilaServiceInstanceFlavorId}
+        manila::service_instance::service_network_cidr: {get_param: ManilaServiceNetworkCidr}
+
+      step_config:
diff --git a/puppet/services/manila-backend-netapp.yaml b/puppet/services/manila-backend-netapp.yaml
new file mode 100644 (file)
index 0000000..c95a8da
--- /dev/null
@@ -0,0 +1,112 @@
+heat_template_version: 2016-04-08
+
+description: >
+  Openstack Manila netapp backend.
+
+parameters:
+  ManilaNetappEnableBackend:
+    type: boolean
+    default: false
+  ManilaNetappDriverHandlesShareServers:
+    type: string
+    default: true
+  ManilaNetappBackendName:
+    type: string
+    default: tripleo_netapp
+  ManilaNetappLogin:
+    type: string
+    default: ''
+  ManilaNetappPassword:
+    type: string
+    default: ''
+  ManilaNetappServerHostname:
+    type: string
+    default: ''
+  ManilaNetappTransportType:
+    type: string
+    default: 'http'
+  ManilaNetappStorageFamily:
+    type: string
+    default: 'ontap_cluster'
+  ManilaNetappServerPort:
+    type: number
+    default: 80
+  ManilaNetappVolumeNameTemplate:
+    type: string
+    default: 'share_%(share_id)s'
+  ManilaNetappVserver:
+    type: string
+    default: ''
+  ManilaNetappVserverNameTemplate:
+    type: string
+    default: 'os_%s'
+  ManilaNetappLifNameTemplate:
+    type: string
+    default: 'os_%(net_allocation_id)s'
+  ManilaNetappAggrNameSearchPattern:
+    type: string
+    default: '(.*)'
+  ManilaNetappRootVolumeAggr:
+    type: string
+    default: ''
+  ManilaNetappRootVolume:
+    type: string
+    default: 'root'
+  ManilaNetappPortNameSearchPattern:
+    type: string
+    default: '(.*)'
+  ManilaNetappTraceFlags:
+    type: string
+    default: ''
+  ManilaNetappEnabledShareProtocols:
+    type: string
+    default: 'nfs3, nfs4.0'
+  ManilaNetappVolumeSnapshotReservePercent:
+    type: number
+    default: 5
+  ManilaNetappSnapmirrorQuiesceTimeout:
+    type: number
+    default: 3600
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    type: json
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+
+outputs:
+  role_data:
+    description: Role data for the Manila Netapp backend.
+    value:
+      service_name: manila_backend_netapp
+      config_settings:
+        manila_netapp_enable_backend: {get_param: ManilaNetappEnableBackend}
+        manila::backend::netapp::title: {get_param: ManilaNetappBackendName}
+        manila::backend::netapp::netapp_login: {get_param: ManilaNetappLogin}
+        manila::backend::netapp::driver_handles_share_servers: {get_param: ManilaNetappDriverHandlesShareServers}
+        manila::backend::netapp::netapp_password: {get_param: ManilaNetappPassword}
+        manila::backend::netapp::netapp_server_hostname: {get_param: ManilaNetappServerHostname}
+        manila::backend::netapp::netapp_transport_type: {get_param: ManilaNetappTransportType}
+        manila::backend::netapp::netapp_storage_family: {get_param: ManilaNetappStorageFamily}
+        manila::backend::netapp::netapp_server_port: {get_param: ManilaNetappServerPort}
+        manila::backend::netapp::netapp_volume_name_template: {get_param: ManilaNetappVolumeNameTemplate}
+        manila::backend::netapp::netapp_vserver: {get_param: ManilaNetappVserver}
+        manila::backend::netapp::netapp_vserver_name_template: {get_param: ManilaNetappVserverNameTemplate}
+        manila::backend::netapp::netapp_lif_name_template: {get_param: ManilaNetappLifNameTemplate}
+        manila::backend::netapp::netapp_aggregate_name_search_pattern: {get_param: ManilaNetappAggrNameSearchPattern}
+        manila::backend::netapp::netapp_root_volume_aggregate: {get_param: ManilaNetappRootVolumeAggr}
+        manila::backend::netapp::netapp_root_volume: {get_param: ManilaNetappRootVolume}
+        manila::backend::netapp::netapp_port_name_search_pattern: {get_param: ManilaNetappPortNameSearchPattern}
+        manila::backend::netapp::netapp_trace_flags: {get_param: ManilaNetappTraceFlags}
+        manila::backend::netapp::netapp_enabled_share_protocols: {get_param: ManilaNetappEnabledShareProtocols}
+        manila::backend::netapp::netapp_volume_snapshot_reserve_percent: {get_param: ManilaNetappVolumeSnapshotReservePercent}
+        manila::backend::netapp::netapp_snapmirror_quiesce_timeout: {get_param: ManilaNetappSnapmirrorQuiesceTimeout}
+      step_config:
index 78bf1c6..d228577 100644 (file)
@@ -40,55 +40,6 @@ parameters:
     default: 5672
     description: Set rabbit subscriber port, change this if using SSL
     type: number
-  # Config specific parameters, to be provided via parameter_defaults
-  ManilaGenericEnableBackend:
-    type: boolean
-    default: true
-  ManilaGenericBackendName:
-    type: string
-    default: tripleo_generic
-  ManilaGenericDriverHandlesShareServers:
-    type: string
-    default: true
-  ManilaGenericSmbTemplateConfigPath:
-    type: string
-    default: '$state_path/smb.conf'
-  ManilaGenericVolumeNameTemplate:
-    type: string
-    default: 'manila-share-%s'
-  ManilaGenericVolumeSnapshotNameTemplate:
-    type: string
-    default: 'manila-snapshot-%s'
-  ManilaGenericShareMountPath:
-    type: string
-    default: '/shares'
-  ManilaGenericMaxTimeToCreateVolume:
-    type: string
-    default: '180'
-  ManilaGenericMaxTimeToAttach:
-    type: string
-    default: '120'
-  ManilaGenericServiceInstanceSmbConfigPath:
-    type: string
-    default: '$share_mount_path/smb.conf'
-  ManilaGenericShareVolumeFsType:
-    type: string
-    default: 'ext4'
-  ManilaGenericCinderVolumeType:
-    type: string
-    default: ''
-  ManilaGenericServiceInstanceUser:
-    type: string
-    default: ''
-  ManilaGenericServiceInstancePassword: #SET THIS via parameter_defaults
-    type: string
-    hidden: true
-  ManilaGenericServiceInstanceFlavorId:
-    type: number
-    default: 1
-  ManilaGenericServiceNetworkCidr:
-    type: string
-    default: '172.16.0.0/16'
 
 outputs:
   role_data:
@@ -101,28 +52,5 @@ outputs:
         manila::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
         manila::rabbit_port: {get_param: RabbitClientPort}
         manila::debug: {get_param: Debug}
-        manila::db::mysql::user: manila
-        manila::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        manila::db::mysql::dbname: manila
         manila::db::database_db_max_retries: -1
         manila::db::database_max_retries: -1
-        manila_generic_enable_backend: {get_param: ManilaGenericEnableBackend}
-        manila::backend::generic::title: {get_param: ManilaGenericBackendName}
-        manila::backend::generic::driver_handles_share_servers: {get_param: ManilaGenericDriverHandlesShareServers}
-        manila::backend::generic::smb_template_config_path: {get_param: ManilaGenericSmbTemplateConfigPath}
-        manila::backend::generic::volume_name_template: {get_param: ManilaGenericVolumeNameTemplate}
-        manila::backend::generic::volume_snapshot_name_template: {get_param: ManilaGenericVolumeSnapshotNameTemplate}
-        manila::backend::generic::share_mount_path: {get_param: ManilaGenericShareMountPath}
-        manila::backend::generic::max_time_to_create_volume: {get_param: ManilaGenericMaxTimeToCreateVolume}
-        manila::backend::generic::max_time_to_attach: {get_param: ManilaGenericMaxTimeToAttach}
-        manila::backend::generic::service_instance_smb_config_path: {get_param: ManilaGenericServiceInstanceSmbConfigPath}
-        manila::backend::generic::share_volume_fstype: {get_param: ManilaGenericShareVolumeFsType}
-        manila::backend::generic::cinder_volume_type: {get_param: ManilaGenericCinderVolumeType}
-        manila::service_instance::service_instance_user: {get_param: ManilaGenericServiceInstanceUser}
-        manila::service_instance::service_instance_password: {get_param: ManilaGenericServiceInstancePassword}
-        manila::service_instance::service_instance_flavor_id: {get_param: ManilaGenericServiceInstanceFlavorId}
-        manila::service_instance::service_network_cidr: {get_param: ManilaGenericServiceNetworkCidr}
-        manila::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
-
index 28addd6..474cc24 100644 (file)
@@ -54,7 +54,6 @@ outputs:
           - manila::compute::nova::nova_admin_auth_url: {get_param: [EndpointMap, KeystoneInternal, uri]}
             manila::compute::nova::nova_admin_password: {get_param: NovaPassword}
             manila::compute::nova::nova_admin_tenant_name: 'service'
-            manila::db::mysql::password: {get_param: ManilaPassword}
             manila::network::neutron::neutron_url: {get_param: [EndpointMap, NeutronInternal, uri]}
             manila::network::neutron::neutron_admin_auth_url: {get_param: [EndpointMap, NeutronAdmin, uri]}
             manila::network::neutron::neutron_admin_password: {get_param: NeutronPassword}
index e4ca489..c2b6b6f 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: 2016-04-08
+heat_template_version: 2016-10-14
 
 description: >
   OpenStack Neutron Server configured with Puppet
@@ -37,10 +37,6 @@ parameters:
     default: 'True'
     description: Allow automatic l3-agent failover
     type: string
-  NeutronL3HA:
-    default: false
-    description: Whether to enable HA for virtual routers
-    type: boolean
   NovaPassword:
     description: The password for the nova service and db account, used by nova-api.
     type: string
@@ -56,6 +52,41 @@ parameters:
   MonitoringSubscriptionNeutronServer:
     default: 'overcloud-neutron-server'
     type: string
+  NeutronApiLoggingSource:
+    type: json
+    default:
+      tag: openstack.neutron.api
+      path: /var/log/neutron/server.log
+  ControllerCount:
+    description: |
+      Under normal conditions, this should not be overridden manually and is
+      set at deployment time. The default value is present to allow the
+      template to be used in environments that do not override it.
+    default: 1
+    type: number
+
+  # DEPRECATED: the following options are deprecated and are currently maintained
+  # for backwards compatibility. They will be removed in the Ocata cycle.
+  NeutronL3HA:
+    default: false
+    description: |
+      Whether to enable HA for virtual routers. While the default value is
+      'false', L3 HA will be automatically enabled if the number of nodes
+      hosting controller configurations and DVR is disabled. This parameter is
+      being deprecated in Newton and is scheduled to be removed in Ocata.
+      Future releases will enable L3 HA by default if it is appropriate for the
+      deployment type. Alternate mechanisms will be available to override.
+    type: boolean
+
+parameter_groups:
+- label: deprecated
+  description: |
+   The following parameters are deprecated and will be removed. They should not
+   be relied on for new deployments. If you have concerns regarding deprecated
+   parameters, please contact the TripleO development team on IRC or the
+   OpenStack mailing list.
+  parameters:
+  - NeutronL3HA
 
 resources:
 
@@ -66,12 +97,27 @@ resources:
       DefaultPasswords: {get_param: DefaultPasswords}
       EndpointMap: {get_param: EndpointMap}
 
+conditions:
+
+  auto_enable_l3_ha:
+    and:
+      - not:
+          equals:
+            - get_param: ControllerCount
+            - 1
+      - equals:
+        - get_param: NeutronEnableDVR
+        - false
+
 outputs:
   role_data:
     description: Role data for the Neutron Server agent service.
     value:
       service_name: neutron_api
       monitoring_subscription: {get_param: MonitoringSubscriptionNeutronServer}
+      logging_source: {get_param: NeutronApiLoggingSource}
+      logging_groups:
+        - neutron
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
@@ -84,18 +130,12 @@ outputs:
                   - '@'
                   - {get_param: [EndpointMap, MysqlInternal, host]}
                   - '/ovs_neutron'
-            neutron::keystone::auth::tenant: 'service'
-            neutron::keystone::auth::public_url: {get_param: [EndpointMap, NeutronPublic, uri]}
-            neutron::keystone::auth::internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
-            neutron::keystone::auth::admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
-            neutron::keystone::auth::password: {get_param: NeutronPassword}
-            neutron::keystone::auth::region: {get_param: KeystoneRegion}
             neutron::keystone::authtoken::auth_uri: {get_param: [EndpointMap, KeystoneInternal, uri] }
             neutron::keystone::authtoken::auth_url: {get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix]}
             neutron::server::api_workers: {get_param: NeutronWorkers}
             neutron::server::rpc_workers: {get_param: NeutronWorkers}
             neutron::server::allow_automatic_l3agent_failover: {get_param: NeutronAllowL3AgentFailover}
-            neutron::server::l3_ha: {get_param: NeutronL3HA}
+            neutron::server::l3_ha: {if: ["auto_enable_l3_ha", true, {get_param: NeutronL3HA}]}
             neutron::keystone::authtoken::password: {get_param: NeutronPassword}
 
             neutron::server::notifications::nova_url: { get_param: [ EndpointMap, NovaInternal, uri ] }
@@ -105,23 +145,11 @@ outputs:
             neutron::server::notifications::password: {get_param: NovaPassword}
             neutron::keystone::authtoken::project_name: 'service'
             neutron::server::sync_db: true
-            neutron::db::mysql::password: {get_param: NeutronPassword}
-            neutron::db::mysql::user: neutron
-            neutron::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-            neutron::db::mysql::dbname: ovs_neutron
-            neutron::db::mysql::allowed_hosts:
-              - '%'
-              - "%{hiera('mysql_bind_host')}"
-            tripleo.neutron_server.firewall_rules:
-              '114 neutron server':
+            tripleo.neutron_api.firewall_rules:
+              '114 neutron api':
                 dport:
                   - 9696
                   - 13696
-              '118 neutron vxlan networks':
-                proto: 'udp'
-                dport: 4789
-              '106 vrrp':
-                proto: vrrp
             neutron::server::router_distributed: {get_param: NeutronEnableDVR}
             # NOTE: bind IP is found in Heat replacing the network name with the local node IP
             # for the given network; replacement examples (eg. for internal_api):
@@ -131,3 +159,19 @@ outputs:
             neutron::bind_host: {get_param: [ServiceNetMap, NeutronApiNetwork]}
       step_config: |
         include tripleo::profile::base::neutron::server
+      service_config_settings:
+        keystone:
+          neutron::keystone::auth::tenant: 'service'
+          neutron::keystone::auth::public_url: {get_param: [EndpointMap, NeutronPublic, uri]}
+          neutron::keystone::auth::internal_url: { get_param: [ EndpointMap, NeutronInternal, uri ] }
+          neutron::keystone::auth::admin_url: { get_param: [ EndpointMap, NeutronAdmin, uri ] }
+          neutron::keystone::auth::password: {get_param: NeutronPassword}
+          neutron::keystone::auth::region: {get_param: KeystoneRegion}
+        mysql:
+          neutron::db::mysql::password: {get_param: NeutronPassword}
+          neutron::db::mysql::user: neutron
+          neutron::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          neutron::db::mysql::dbname: ovs_neutron
+          neutron::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index b2ad5da..2cd08f9 100644 (file)
@@ -34,6 +34,11 @@ parameters:
   MonitoringSubscriptionNeutronDhcp:
     default: 'overcloud-neutron-dhcp'
     type: string
+  NeutronDhcpAgentLoggingSource:
+    type: json
+    default:
+      tag: openstack.neutron.agent.dhcp
+      path: /var/log/neutron/dhcp-agent.log
 
 resources:
 
@@ -50,6 +55,9 @@ outputs:
     value:
       service_name: neutron_dhcp
       monitoring_subscription: {get_param: MonitoringSubscriptionNeutronDhcp}
+      logging_source: {get_param: NeutronDhcpAgentLoggingSource}
+      logging_groups:
+        - neutron
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
index 5eb3e25..b6c2911 100644 (file)
@@ -29,6 +29,11 @@ parameters:
   MonitoringSubscriptionNeutronL3Dvr:
     default: 'overcloud-neutron-l3-dvr'
     type: string
+  NeutronL3ComputeAgentLoggingSource:
+    type: json
+    default:
+      tag: openstack.neutron.agent.l3-compute
+      path: /var/log/neutron/l3-agent.log
 
 resources:
 
@@ -45,6 +50,9 @@ outputs:
     value:
       service_name: neutron_l3_compute_dvr
       monitoring_subscription: {get_param: MonitoringSubscriptionNeutronL3Dvr}
+      logging_source: {get_param: NeutronL3ComputeAgentLoggingSource}
+      logging_groups:
+        - neutron
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
index de62a50..a89e3d7 100644 (file)
@@ -37,6 +37,11 @@ parameters:
   MonitoringSubscriptionNeutronL3:
     default: 'overcloud-neutron-l3-agent'
     type: string
+  NeutronL3AgentLoggingSource:
+    type: json
+    default:
+      tag: openstack.neutron.agent.l3
+      path: /var/log/neutron/l3-agent.log
 
 resources:
 
@@ -53,11 +58,17 @@ outputs:
     value:
       service_name: neutron_l3
       monitoring_subscription: {get_param: MonitoringSubscriptionNeutronL3}
+      logging_source: {get_param: NeutronL3AgentLoggingSource}
+      logging_groups:
+        - neutron
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
           - neutron::agents::l3::external_network_bridge: {get_param: NeutronExternalNetworkBridge}
             neutron::agents::l3::router_delete_namespaces: True
             neutron::agents::l3::agent_mode : {get_param: NeutronL3AgentMode}
+            tripleo.neutron_l3.firewall_rules:
+              '106 neutron_l3 vrrp':
+                proto: vrrp
       step_config: |
         include tripleo::profile::base::neutron::l3
index 320ae0c..8be4c6d 100644 (file)
@@ -23,9 +23,16 @@ parameters:
     type: string
     hidden: true
   NeutronWorkers:
-    default: 0
-    description: Number of workers for Neutron service.
-    type: number
+    default: ''
+    description: |
+      Sets the number of worker processes for the neutron metadata agent. The
+      default value results in the configuration being left unset and a
+      system-dependent default will be chosen (usually the number of
+      processors). Please note that this can result in a large number of
+      processes and memory consumption on systems with a large core count. On
+      such systems it is recommended that a non-default value be selected that
+      matches the load requirements.
+    type: string
   NeutronPassword:
     description: The password for the neutron service and db account, used by neutron agents.
     type: string
@@ -33,6 +40,11 @@ parameters:
   MonitoringSubscriptionNeutronMetadata:
     default: 'overcloud-neutron-metadata'
     type: string
+  NeutronMetadataAgentLoggingSource:
+    type: json
+    default:
+      tag: openstack.neutron.agent.metadata
+      path: /var/log/neutron/metadata-agent.log
 
 resources:
 
@@ -49,6 +61,9 @@ outputs:
     value:
       service_name: neutron_metadata
       monitoring_subscription: {get_param: MonitoringSubscriptionNeutronMetadata}
+      logging_source: {get_param: NeutronMetadataAgentLoggingSource}
+      logging_groups:
+        - neutron
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
@@ -57,11 +72,6 @@ outputs:
             neutron::agents::metadata::auth_password: {get_param: NeutronPassword}
             neutron::agents::metadata::auth_url: { get_param: [EndpointMap, KeystoneAdmin, uri_no_suffix] }
             neutron::agents::metadata::auth_tenant: 'service'
-            # NOTE: bind IP is found in Heat replacing the network name with the local node IP
-            # for the given network; replacement examples (eg. for internal_api):
-            # internal_api -> IP
-            # internal_api_uri -> [IP]
-            # internal_api_subnet - > IP/CIDR
-            neutron::agents::metadata::metadata_ip: {get_param: [ServiceNetMap, NeutronApiNetwork]}
+            neutron::agents::metadata::metadata_ip: '"%{hiera(\"nova_metadata_vip\")}"'
       step_config: |
         include tripleo::profile::base::neutron::metadata
index ade322e..cca0dee 100644 (file)
@@ -64,6 +64,11 @@ parameters:
       examples are: noop, openvswitch, iptables_hybrid. The default value of an
       empty string will result in a default supported configuration.
     type: string
+  NeutronOpenVswitchAgentLoggingSource:
+    type: json
+    default:
+      tag: openstack.neutron.agent.openvswitch
+      path: /var/log/neutron/openvswitch-agent.log
 
 resources:
 
@@ -80,6 +85,9 @@ outputs:
     value:
       service_name: neutron_ovs_agent
       monitoring_subscription: {get_param: MonitoringSubscriptionNeutronOvs}
+      logging_source: {get_param: NeutronOpenVswitchAgentLoggingSource}
+      logging_groups:
+        - neutron
       config_settings:
         map_merge:
           - get_attr: [NeutronBase, role_data, config_settings]
@@ -109,5 +117,11 @@ outputs:
             # internal_api_subnet - > IP/CIDR
             neutron::agents::ml2::ovs::local_ip: {get_param: [ServiceNetMap, NeutronTenantNetwork]}
             neutron::agents::ml2::ovs::firewall_driver: {get_param: NeutronOVSFirewallDriver}
+            tripleo.neutron_ovs_agent.firewall_rules:
+              '118 neutron vxlan networks':
+                proto: 'udp'
+                dport: 4789
+              '136 neutron gre networks':
+                proto: 'gre'
       step_config: |
         include ::tripleo::profile::base::neutron::ovs
index e1dbd8e..b2ec003 100644 (file)
@@ -46,6 +46,11 @@ parameters:
   MonitoringSubscriptionNovaApi:
     default: 'overcloud-nova-api'
     type: string
+  NovaApiLoggingSource:
+    type: json
+    default:
+      tag: openstack.nova.api
+      path: /var/log/nova/nova-api.log
 
 resources:
   NovaBase:
@@ -61,6 +66,9 @@ outputs:
     value:
       service_name: nova_api
       monitoring_subscription: {get_param: MonitoringSubscriptionNovaApi}
+      logging_source: {get_param: NovaApiLoggingSource}
+      logging_groups:
+        - nova
       config_settings:
         map_merge:
           - get_attr: [NovaBase, role_data, config_settings]
@@ -86,12 +94,6 @@ outputs:
             nova::api::default_floating_pool: 'public'
             nova::api::sync_db_api: true
             nova::api::enable_proxy_headers_parsing: true
-            nova::keystone::auth::tenant: 'service'
-            nova::keystone::auth::public_url: {get_param: [EndpointMap, NovaPublic, uri]}
-            nova::keystone::auth::internal_url: {get_param: [EndpointMap, NovaInternal, uri]}
-            nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
-            nova::keystone::auth::password: {get_param: NovaPassword}
-            nova::keystone::auth::region: {get_param: KeystoneRegion}
             # NOTE: bind IP is found in Heat replacing the network name with the local node IP
             # for the given network; replacement examples (eg. for internal_api):
             # internal_api -> IP
@@ -105,3 +107,26 @@ outputs:
 
       step_config: |
         include tripleo::profile::base::nova::api
+      service_config_settings:
+        keystone:
+          nova::keystone::auth::tenant: 'service'
+          nova::keystone::auth::public_url: {get_param: [EndpointMap, NovaPublic, uri]}
+          nova::keystone::auth::internal_url: {get_param: [EndpointMap, NovaInternal, uri]}
+          nova::keystone::auth::admin_url: {get_param: [EndpointMap, NovaAdmin, uri]}
+          nova::keystone::auth::password: {get_param: NovaPassword}
+          nova::keystone::auth::region: {get_param: KeystoneRegion}
+        mysql:
+          nova::db::mysql::password: {get_param: NovaPassword}
+          nova::db::mysql::user: nova
+          nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          nova::db::mysql::dbname: nova
+          nova::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
+          nova::db::mysql_api::password: {get_param: NovaPassword}
+          nova::db::mysql_api::user: nova_api
+          nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          nova::db::mysql_api::dbname: nova_api
+          nova::db::mysql_api::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index 24a63bb..8db00d8 100644 (file)
@@ -95,20 +95,6 @@ outputs:
               - '@'
               - {get_param: [EndpointMap, MysqlInternal, host]}
               - '/nova_api'
-        nova::db::mysql::password: {get_param: NovaPassword}
-        nova::db::mysql::user: nova
-        nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        nova::db::mysql::dbname: nova
-        nova::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
-        nova::db::mysql_api::password: {get_param: NovaPassword}
-        nova::db::mysql_api::user: nova_api
-        nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        nova::db::mysql_api::dbname: nova_api
-        nova::db::mysql_api::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
         nova::debug: {get_param: Debug}
         nova::purge_config: {get_param: EnableConfigPurge}
         nova::network::neutron::neutron_project_name: 'service'
@@ -123,18 +109,6 @@ outputs:
         nova::notify_on_state_change: 'vm_and_task_state'
         nova::notification_driver: messagingv2
         nova::network::neutron::neutron_auth_type: 'v3password'
-        nova::db::mysql::user: nova
-        nova::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        nova::db::mysql::dbname: nova
-        nova::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
-        nova::db::mysql_api::user: nova_api
-        nova::db::mysql_api::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        nova::db::mysql_api::dbname: nova_api
-        nova::db::mysql_api::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
         nova::db::database_db_max_retries: -1
         nova::db::database_max_retries: -1
         nova::glance_api_servers: {get_param: [EndpointMap, GlanceInternal, uri]}
index d1d7ae6..f7f2510 100644 (file)
@@ -70,6 +70,11 @@ parameters:
   MonitoringSubscriptionNovaCompute:
     default: 'overcloud-nova-compute'
     type: string
+  NovaComputeLoggingSource:
+    type: json
+    default:
+      tag: openstack.nova.compute
+      path: /var/log/nova/nova-compute.log
 
 resources:
   NovaBase:
@@ -85,6 +90,9 @@ outputs:
     value:
       service_name: nova_compute
       monitoring_subscription: {get_param: MonitoringSubscriptionNovaCompute}
+      logging_source: {get_param: NovaComputeLoggingSource}
+      logging_groups:
+        - nova
       config_settings:
         map_merge:
           - get_attr: [NovaBase, role_data, config_settings]
@@ -129,6 +137,9 @@ outputs:
             # internal_api_subnet - > IP/CIDR
             nova::compute::vncserver_proxyclient_address: {get_param: [ServiceNetMap, NovaVncProxyNetwork]}
             nova::compute::vncproxy_host: {get_param: [EndpointMap, NovaPublic, host_nobrackets]}
+            nova::vncproxy::common::vncproxy_protocol: {get_param: [EndpointMap, NovaVNCProxyPublic, protocol]}
+            nova::vncproxy::common::vncproxy_host: {get_param: [EndpointMap, NovaVNCProxyPublic, host_nobrackets]}
+            nova::vncproxy::common::vncproxy_port: {get_param: [EndpointMap, NovaVNCProxyPublic, port]}
       step_config: |
         # TODO(emilien): figure how to deal with libvirt profile.
         # We'll probably treat it like we do with Neutron plugins.
index 5dbc7ca..2671cdd 100644 (file)
@@ -25,6 +25,11 @@ parameters:
   MonitoringSubscriptionNovaConductor:
     default: 'overcloud-nova-conductor'
     type: string
+  NovaSchedulerLoggingSource:
+    type: json
+    default:
+      tag: openstack.nova.scheduler
+      path: /var/log/nova/nova-scheduler.log
 
 resources:
   NovaBase:
@@ -40,6 +45,9 @@ outputs:
     value:
       service_name: nova_conductor
       monitoring_subscription: {get_param: MonitoringSubscriptionNovaConductor}
+      logging_source: {get_param: NovaSchedulerLoggingSource}
+      logging_groups:
+        - nova
       config_settings:
         map_merge:
           - get_attr: [NovaBase, role_data, config_settings]
index 13e3a26..85e6042 100644 (file)
@@ -21,6 +21,11 @@ parameters:
   MonitoringSubscriptionNovaConsoleauth:
     default: 'overcloud-nova-consoleauth'
     type: string
+  NovaConsoleauthLoggingSource:
+    type: json
+    default:
+      tag: openstack.nova.consoleauth
+      path: /var/log/nova/nova-consoleauth.log
 
 resources:
   NovaBase:
@@ -36,6 +41,9 @@ outputs:
     value:
       service_name: nova_consoleauth
       monitoring_subscription: {get_param: MonitoringSubscriptionNovaConsoleauth}
+      logging_source: {get_param: NovaConsoleauthLoggingSource}
+      logging_groups:
+        - nova
       config_settings:
         get_attr: [NovaBase, role_data, config_settings]
       step_config: |
diff --git a/puppet/services/nova-metadata.yaml b/puppet/services/nova-metadata.yaml
new file mode 100644 (file)
index 0000000..92373c5
--- /dev/null
@@ -0,0 +1,34 @@
+heat_template_version: 2016-04-08
+
+description: >
+  OpenStack Nova API service configured with Puppet
+
+parameters:
+  ServiceNetMap:
+    default: {}
+    description: Mapping of service_name -> network name. Typically set
+                 via parameter_defaults in the resource registry.  This
+                 mapping overrides those in ServiceNetMapDefaults.
+    type: json
+  DefaultPasswords:
+    default: {}
+    type: json
+  EndpointMap:
+    default: {}
+    description: Mapping of service endpoint -> protocol. Typically set
+                 via parameter_defaults in the resource registry.
+    type: json
+  NovaWorkers:
+    default: 0
+    description: Number of workers for Nova API service.
+    type: number
+
+outputs:
+  role_data:
+    description: Role data for the Nova Metadata service.
+    value:
+      service_name: nova_metadata
+      config_settings:
+        nova::api::metadata_workers: {get_param: NovaWorkers}
+        nova::api::metadata_listen: {get_param: [ServiceNetMap, NovaMetadataNetwork]}
+      step_config: ""
index 3ffc9c5..d89e3e1 100644 (file)
@@ -32,6 +32,11 @@ parameters:
   MonitoringSubscriptionNovaScheduler:
     default: 'overcloud-nova-scheduler'
     type: string
+  NovaSchedulerLoggingSource:
+    type: json
+    default:
+      tag: openstack.nova.scheduler
+      path: /var/log/nova/nova-scheduler.log
 
 resources:
   NovaBase:
@@ -47,6 +52,9 @@ outputs:
     value:
       service_name: nova_scheduler
       monitoring_subscription: {get_param: MonitoringSubscriptionNovaScheduler}
+      logging_source: {get_param: NovaSchedulerLoggingSource}
+      logging_groups:
+        - nova
       config_settings:
         map_merge:
           - get_attr: [NovaBase, role_data, config_settings]
index 899fa35..85d59ae 100644 (file)
@@ -21,6 +21,11 @@ parameters:
   MonitoringSubscriptionNovaVNCProxy:
     default: 'overcloud-nova-vncproxy'
     type: string
+  NovaVncproxyLoggingSource:
+    type: json
+    default:
+      tag: openstack.nova.vncproxy
+      path: /var/log/nova/nova-vncproxy.log
 
 resources:
   NovaBase:
@@ -36,6 +41,9 @@ outputs:
     value:
       service_name: nova_vnc_proxy
       monitoring_subscription: {get_param: MonitoringSubscriptionNovaVNCProxy}
+      logging_source: {get_param: NovaVncproxyLoggingSource}
+      logging_groups:
+        - nova
       config_settings:
         map_merge:
           - get_attr: [NovaBase, role_data, config_settings]
index 5d1d666..abfb9c8 100644 (file)
@@ -66,6 +66,16 @@ parameters:
           ]
         }
     type: json
+  PacemakerLoggingSource:
+    type: json
+    default:
+      tag: system.pacemaker
+      path: /var/log/pacemaker.log,/var/log/cluster/corosync.log
+      format: >-
+        /^(?<time>[^ ]*\s*[^ ]* [^ ]*)
+        \[(?<pid>[^ ]*)\]
+        (?<host>[^ ]*)
+        (?<message>.*)$/
 
 outputs:
   role_data:
@@ -73,6 +83,9 @@ outputs:
     value:
       service_name: pacemaker
       monitoring_subscription: {get_param: MonitoringSubscriptionPacemaker}
+      logging_groups:
+        - haclient
+      logging_source: {get_param: PacemakerLoggingSource}
       config_settings:
         pacemaker::corosync::cluster_name: 'tripleo_cluster'
         pacemaker::corosync::manage_fw: false
index e4bcfc3..6823789 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: cinder_api
       monitoring_subscription: {get_attr: [CinderApiBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [CinderApiBase, role_data, logging_source]}
+      logging_groups: {get_attr: [CinderApiBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [CinderApiBase, role_data, config_settings]
index eb578e5..15e44be 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: cinder_scheduler
       monitoring_subscription: {get_attr: [CinderSchedulerBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [CinderSchedulerBase, role_data, logging_source]}
+      logging_groups: {get_attr: [CinderSchedulerBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [CinderSchedulerBase, role_data, config_settings]
index d5dedf3..d91a018 100644 (file)
@@ -34,10 +34,13 @@ outputs:
     value:
       service_name: cinder_volume
       monitoring_subscription: {get_attr: [CinderVolumeBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [CinderVolumeBase, role_data, logging_source]}
+      logging_groups: {get_attr: [CinderVolumeBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [CinderVolumeBase, role_data, config_settings]
           - cinder::volume::manage_service: false
             cinder::volume::enabled: false
+            cinder::host: hostgroup
       step_config:
         include ::tripleo::profile::pacemaker::cinder::volume
index 64ae2e9..982b606 100644 (file)
@@ -22,7 +22,7 @@ parameters:
 
 resources:
   MongoDbBase:
-    type: ../../database/mongodb-base.yaml
+    type: ../../database/mongodb.yaml
     properties:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
index d555ed0..7deaf0c 100644 (file)
@@ -35,6 +35,21 @@ outputs:
     value:
       service_name: mysql
       config_settings:
-        get_attr: [MysqlBase, role_data, config_settings]
+        map_merge:
+          - get_attr: [MysqlBase, role_data, config_settings]
+          - tripleo::profile::pacemaker::database::mysql::bind_address:
+              str_replace:
+                template:
+                  '"%{::fqdn_$NETWORK}"'
+                params:
+                  $NETWORK: {get_param: [ServiceNetMap, MysqlNetwork]}
+            # NOTE: bind IP is found in Heat replacing the network name with the
+            # local node IP for the given network; replacement examples
+            # (eg. for internal_api):
+            # internal_api -> IP
+            # internal_api_uri -> [IP]
+            # internal_api_subnet - > IP/CIDR
+            tripleo::profile::pacemaker::database::mysql::gmcast_listen_addr:
+              get_param: [ServiceNetMap, MysqlNetwork]
       step_config: |
         include ::tripleo::profile::pacemaker::database::mysql
index d9156e6..196754e 100644 (file)
@@ -21,7 +21,7 @@ parameters:
 
 resources:
   RedisBase:
-    type: ../../database/redis-base.yaml
+    type: ../../database/redis.yaml
     properties:
       ServiceNetMap: {get_param: ServiceNetMap}
       DefaultPasswords: {get_param: DefaultPasswords}
index 684785a..20a439f 100644 (file)
@@ -58,6 +58,8 @@ outputs:
     value:
       service_name: glance_api
       monitoring_subscription: {get_attr: [GlanceApiBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [GlanceApiBase, role_data, logging_source]}
+      logging_groups: {get_attr: [GlanceApiBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [GlanceApiBase, role_data, config_settings]
index 5bcabca..41f89fd 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: glance_registry
       monitoring_subscription: {get_attr: [GlanceRegistryBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [GlanceRegistryBase, role_data, logging_source]}
+      logging_groups: {get_attr: [GlanceRegistryBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [GlanceRegistryBase, role_data, config_settings]
index eae01b5..dd25905 100644 (file)
@@ -33,6 +33,8 @@ outputs:
     value:
       service_name: heat_api_cfn
       monitoring_subscription: {get_attr: [HeatApiCfnBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [HeatApiCfnBase, role_data, logging_source]}
+      logging_groups: {get_attr: [HeatApiCfnBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [HeatApiCfnBase, role_data, config_settings]
index 5608ae9..18d2a0d 100644 (file)
@@ -33,6 +33,8 @@ outputs:
     value:
       service_name: heat_api_cloudwatch
       monitoring_subscription: {get_attr: [HeatApiCloudwatchBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [HeatApiCloudwatchBase, role_data, logging_source]}
+      logging_groups: {get_attr: [HeatApiCloudwatchBase, role_data, logging_groups]}
       config_settings:
         map_merge:
            - get_attr: [HeatApiCloudwatchBase, role_data, config_settings]
index 6fd790c..43122cb 100644 (file)
@@ -33,6 +33,8 @@ outputs:
     value:
       service_name: heat_api
       monitoring_subscription: {get_attr: [HeatApiBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [HeatApiBase, role_data, logging_source]}
+      logging_groups: {get_attr: [HeatApiBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [HeatApiBase, role_data, config_settings]
index b8c962a..54bfdad 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: heat_engine
       monitoring_subscription: {get_attr: [HeatEngineBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [HeatEngineBase, role_data, logging_source]}
+      logging_groups: {get_attr: [HeatEngineBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [HeatEngineBase, role_data, config_settings]
index 0a479c9..908b9bb 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: keystone
       monitoring_subscription: {get_attr: [KeystoneServiceBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [KeystoneServiceBase, role_data, logging_source]}
+      logging_groups: {get_attr: [KeystoneServiceBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [KeystoneServiceBase, role_data, config_settings]
index 9b9e584..7fca73d 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: neutron_dhcp
       monitoring_subscription: {get_attr: [NeutronDhcpBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NeutronDhcpBase, role_data, logging_source]}
+      logging_groups: {get_attr: [NeutronDhcpBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [NeutronDhcpBase, role_data, config_settings]
index 21ac02d..cdb87f5 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: neutron_l3
       monitoring_subscription: {get_attr: [NeutronL3Base, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NeutronL3Base, role_data, logging_source]}
+      logging_groups: {get_attr: [NeutronL3Base, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [NeutronL3Base, role_data, config_settings]
index 8c22d42..49a31eb 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: neutron_metadata
       monitoring_subscription: {get_attr: [NeutronMetadataBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NeutronMetadataBase, role_data, logging_source]}
+      logging_groups: {get_attr: [NeutronMetadataBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [NeutronMetadataBase, role_data, config_settings]
index 18d6073..a2bd7c8 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: neutron_ovs_agent
       monitoring_subscription: {get_attr: [NeutronOvsBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NeutronOvsBase, role_data, logging_source]}
+      logging_groups: {get_attr: [NeutronOvsBase, role_data, logging_groups]}
       config_settings:
         get_attr: [NeutronOvsBase, role_data, config_settings]
       step_config: |
index 3d56534..b86e438 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: nova_api
       monitoring_subscription: {get_attr: [NovaApiBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NovaApiBase, role_data, logging_source]}
+      logging_groups: {get_attr: [NovaApiBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [NovaApiBase, role_data, config_settings]
index 9d55a48..a0a766e 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: nova_conductor
       monitoring_subscription: {get_attr: [NovaConductorBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NovaConductorBase, role_data, logging_source]}
+      logging_groups: {get_attr: [NovaConductorBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [NovaConductorBase, role_data, config_settings]
index 814505f..5d51eb4 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: nova_consoleauth
       monitoring_subscription: {get_attr: [NovaConsoleauthBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NovaConsoleauthBase, role_data, logging_source]}
+      logging_groups: {get_attr: [NovaConsoleauthBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [NovaConsoleauthBase, role_data, config_settings]
index 2769226..8828ee1 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: nova_scheduler
       monitoring_subscription: {get_attr: [NovaSchedulerBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NovaSchedulerBase, role_data, logging_source]}
+      logging_groups: {get_attr: [NovaSchedulerBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [NovaSchedulerBase, role_data, config_settings]
index d0c4f1d..ebe84a0 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: nova_vnc_proxy
       monitoring_subscription: {get_attr: [NovaVncproxyBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [NovaVncproxyBase, role_data, logging_source]}
+      logging_groups: {get_attr: [NovaVncproxyBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [NovaVncproxyBase, role_data, config_settings]
index 214e8db..3dfb7d9 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: sahara_api
       monitoring_subscription: {get_attr: [SaharaApiBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [SaharaApiBase, role_data, logging_source]}
+      logging_groups: {get_attr: [SaharaApiBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [SaharaApiBase, role_data, config_settings]
index aa85115..a06d11b 100644 (file)
@@ -34,6 +34,8 @@ outputs:
     value:
       service_name: sahara_engine
       monitoring_subscription: {get_attr: [SaharaEngineBase, role_data, monitoring_subscription]}
+      logging_source: {get_attr: [SaharaEngineBase, role_data, logging_source]}
+      logging_groups: {get_attr: [SaharaEngineBase, role_data, logging_groups]}
       config_settings:
         map_merge:
           - get_attr: [SaharaEngineBase, role_data, config_settings]
index a0669dc..5387529 100644 (file)
@@ -38,6 +38,13 @@ parameters:
     type: string
     default: ''
     hidden: true
+  RabbitHAQueues:
+    description:
+      The number of HA queues to be configured in rabbit. The default is 0 which will
+      be automatically overridden to CEIL(N/2) where N is the number of nodes running
+      rabbitmq.
+    default: 0
+    type: number
   MonitoringSubscriptionRabbitmq:
     default: 'overcloud-rabbitmq'
     type: string
@@ -58,7 +65,7 @@ outputs:
             dport:
               - 4369
               - 5672
-              - 35672
+              - 25672
         rabbitmq::delete_guest_user: false
         rabbitmq::wipe_db_on_cookie_change: true
         rabbitmq::port: '5672'
@@ -66,13 +73,14 @@ outputs:
         rabbitmq::repos_ensure: false
         rabbitmq_environment:
           RABBITMQ_NODENAME: "rabbit@%{::hostname}"
-          RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+          RABBITMQ_SERVER_ERL_ARGS: '"+K true +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
         rabbitmq_kernel_variables:
-          inet_dist_listen_min: '35672'
-          inet_dist_listen_max: '35672'
+          inet_dist_listen_min: '25672'
+          inet_dist_listen_max: '25672'
         rabbitmq_config_variables:
           tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
           cluster_partition_handling: 'pause_minority'
+          queue_master_locator: '<<"min-masters">>'
           loopback_users: '[]'
         rabbitmq::erlang_cookie:
           yaql:
@@ -88,5 +96,7 @@ outputs:
         # internal_api_uri -> [IP]
         # internal_api_subnet - > IP/CIDR
         rabbitmq::node_ip_address: {get_param: [ServiceNetMap, RabbitmqNetwork]}
+        rabbitmq::nr_ha_queues: {get_param: RabbitHAQueues}
+
       step_config: |
         include ::tripleo::profile::base::rabbitmq
index 7f15ca7..54e63df 100644 (file)
@@ -33,6 +33,11 @@ parameters:
   MonitoringSubscriptionSaharaApi:
     default: 'overcloud-sahara-api'
     type: string
+  SaharaApiLoggingSource:
+    type: json
+    default:
+      tag: openstack.sahara.api
+      path: /var/log/sahara/sahara-api.log
 
 resources:
   SaharaBase:
@@ -48,16 +53,14 @@ outputs:
     value:
       service_name: sahara_api
       monitoring_subscription: {get_param: MonitoringSubscriptionSaharaApi}
+      logging_source: {get_param: SaharaApiLoggingSource}
+      logging_groups:
+        - sahara
       config_settings:
         map_merge:
           - get_attr: [SaharaBase, role_data, config_settings]
           - sahara::port: {get_param: [EndpointMap, SaharaInternal, port]}
             sahara::service::api::api_workers: {get_param: SaharaWorkers}
-            sahara::keystone::auth::public_url: {get_param: [EndpointMap, SaharaPublic, uri]}
-            sahara::keystone::auth::internal_url: {get_param: [EndpointMap, SaharaInternal, uri]}
-            sahara::keystone::auth::admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]}
-            sahara::keystone::auth::password: {get_param: SaharaPassword }
-            sahara::keystone::auth::region: {get_param: KeystoneRegion}
             # NOTE: bind IP is found in Heat replacing the network name with the local node IP
             # for the given network; replacement examples (eg. for internal_api):
             # internal_api -> IP
@@ -71,3 +74,19 @@ outputs:
                   - 13386
       step_config: |
         include ::tripleo::profile::base::sahara::api
+      service_config_settings:
+        keystone:
+          sahara::keystone::auth::tenant: 'service'
+          sahara::keystone::auth::public_url: {get_param: [EndpointMap, SaharaPublic, uri]}
+          sahara::keystone::auth::internal_url: {get_param: [EndpointMap, SaharaInternal, uri]}
+          sahara::keystone::auth::admin_url: {get_param: [EndpointMap, SaharaAdmin, uri]}
+          sahara::keystone::auth::password: {get_param: SaharaPassword }
+          sahara::keystone::auth::region: {get_param: KeystoneRegion}
+        mysql:
+          sahara::db::mysql::password: {get_param: SaharaPassword}
+          sahara::db::mysql::user: sahara
+          sahara::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
+          sahara::db::mysql::dbname: sahara
+          sahara::db::mysql::allowed_hosts:
+            - '%'
+            - "%{hiera('mysql_bind_host')}"
index c1ab8e8..5fc8ed6 100644 (file)
@@ -60,13 +60,6 @@ outputs:
               - '@'
               - {get_param: [EndpointMap, MysqlInternal, host]}
               - '/sahara'
-        sahara::db::mysql::password: {get_param: SaharaPassword}
-        sahara::db::mysql::user: sahara
-        sahara::db::mysql::host: {get_param: [EndpointMap, MysqlInternal, host_nobrackets]}
-        sahara::db::mysql::dbname: sahara
-        sahara::db::mysql::allowed_hosts:
-          - '%'
-          - "%{hiera('mysql_bind_host')}"
         sahara::rabbit_password: {get_param: RabbitPassword}
         sahara::rabbit_user: {get_param: RabbitUserName}
         sahara::rabbit_use_ssl: {get_param: RabbitClientUseSSL}
@@ -85,6 +78,5 @@ outputs:
           - storm
         sahara::rpc_backend: rabbit
         sahara::admin_tenant_name: 'service'
-        sahara::keystone::auth::tenant: 'service'
         sahara::db::database_db_max_retries: -1
         sahara::db::database_max_retries: -1
index 9224fd5..287c1c0 100644 (file)
@@ -21,6 +21,11 @@ parameters:
   MonitoringSubscriptionSaharaEngine:
     default: 'overcloud-sahara-engine'
     type: string
+  SaharaEngineLoggingSource:
+    type: json
+    default:
+      tag: openstack.sahara.engine
+      path: /var/log/sahara/sahara-engine.log
 
 resources:
   SaharaBase:
@@ -36,6 +41,9 @@ outputs:
     value:
       service_name: sahara_engine
       monitoring_subscription: {get_param: MonitoringSubscriptionSaharaEngine}
+      logging_source: {get_param: SaharaEngineLoggingSource}
+      logging_groups:
+        - sahara
       config_settings:
         map_merge:
           - get_attr: [SaharaBase, role_data, config_settings]
index b54a6d7..176fd23 100644 (file)
@@ -39,6 +39,9 @@ resources:
         EndpointMap: {get_param: EndpointMap}
         DefaultPasswords: {get_param: DefaultPasswords}
 
+  LoggingConfiguration:
+    type: OS::TripleO::LoggingConfiguration
+
 outputs:
   role_data:
     description: Combined Role data for this set of services.
@@ -51,12 +54,56 @@ outputs:
           data: {s_names: {get_attr: [ServiceChain, role_data, service_name]}}
       monitoring_subscriptions:
         yaql:
-          expression: list($.data.subscriptions.where($ != null))
-          data: {subscriptions: {get_attr: [ServiceChain, role_data, monitoring_subscription]}}
+          expression: list($.data.where($ != null).select($.get('monitoring_subscription')).where($ != null))
+          data: {get_attr: [ServiceChain, role_data]}
+      logging_sources:
+        # Transform the individual logging_source configuration from
+        # each service in the chain into a global list, adding some
+        # default configuration at the same time.
+        yaql:
+          expression: >
+            let(
+            default_format => $.data.default_format,
+            pos_file_path => $.data.pos_file_path,
+            sources => $.data.sources.flatten()
+            ) ->
+            $sources.where($ != null).select({
+            'type' => 'tail',
+            'tag' => $.tag,
+            'path' => $.path,
+            'format' => $.get('format', $default_format),
+            'pos_file' => $.get('pos_file', $pos_file_path + '/' + $.tag + '.pos')
+            })
+          data:
+            sources:
+              - {get_attr: [LoggingConfiguration, LoggingDefaultSources]}
+              - yaql:
+                  expression: list($.data.where($ != null).select($.get('logging_source')).where($ != null))
+                  data: {get_attr: [ServiceChain, role_data]}
+              - {get_attr: [LoggingConfiguration, LoggingExtraSources]}
+            default_format: {get_attr: [LoggingConfiguration, LoggingDefaultFormat]}
+            pos_file_path: {get_attr: [LoggingConfiguration, LoggingPosFilePath]}
+      logging_groups:
+        # Build a list of unique groups to which we should add the
+        # fluentd user.
+        yaql:
+          expression: >
+            set($.data.groups.flatten()).where($)
+          data:
+            groups:
+              - [{get_attr: [LoggingConfiguration, LoggingDefaultGroups]}]
+              - yaql:
+                  expression: list($.data.where($ != null).select($.get('logging_groups')).where($ != null))
+                  data: {get_attr: [ServiceChain, role_data]}
+              - [{get_attr: [LoggingConfiguration, LoggingExtraGroups]}]
       config_settings: {map_merge: {get_attr: [ServiceChain, role_data, config_settings]}}
       global_config_settings:
         map_merge:
           yaql:
-            expression: list($.data.configs.where($ != null))
-            data: {configs: {get_attr: [ServiceChain, role_data, global_config_settings]}}
+            expression: list($.data.where($ != null).select($.get('global_config_settings')).where($ != null))
+            data: {get_attr: [ServiceChain, role_data]}
+      service_config_settings:
+        yaql:
+          expression: $.data.where($ != null).select($.get('service_config_settings')).where($ != null).reduce($1.mergeWith($2), {})
+          data: {get_attr: [ServiceChain, role_data]}
       step_config: {list_join: ["\n", {get_attr: [ServiceChain, role_data, step_config]}]}
index d7b0cd7..8b990bc 100644 (file)
@@ -66,25 +66,11 @@ outputs:
             swift::proxy::authtoken::project_name: 'service'
             swift::proxy::node_timeout: {get_param: SwiftProxyNodeTimeout}
             swift::proxy::workers: {get_param: SwiftWorkers}
-            swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
-            swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]}
-            swift::keystone::auth::admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]}
-            swift::keystone::auth::public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]}
-            swift::keystone::auth::internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]}
-            swift::keystone::auth::admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]}
-            swift::keystone::auth::password: {get_param: SwiftPassword}
-            swift::keystone::auth::region: {get_param: KeystoneRegion}
             tripleo.swift_proxy.firewall_rules:
               '122 swift proxy':
                 dport:
                   - 8080
                   - 13808
-            swift::keystone::auth::tenant: 'service'
-            swift::keystone::auth::configure_s3_endpoint: false
-            swift::keystone::auth::operator_roles:
-              - admin
-              - swiftoperator
-              - ResellerAdmin
             swift::proxy::keystone::operator_roles:
               - admin
               - swiftoperator
@@ -113,3 +99,19 @@ outputs:
             swift::proxy::proxy_local_net_ip: {get_param: [ServiceNetMap, SwiftProxyNetwork]}
       step_config: |
         include ::tripleo::profile::base::swift::proxy
+      service_config_settings:
+        keystone:
+          swift::keystone::auth::public_url: {get_param: [EndpointMap, SwiftPublic, uri]}
+          swift::keystone::auth::internal_url: {get_param: [EndpointMap, SwiftInternal, uri]}
+          swift::keystone::auth::admin_url: {get_param: [EndpointMap, SwiftAdmin, uri]}
+          swift::keystone::auth::public_url_s3: {get_param: [EndpointMap, SwiftS3Public, uri]}
+          swift::keystone::auth::internal_url_s3: {get_param: [EndpointMap, SwiftS3Internal, uri]}
+          swift::keystone::auth::admin_url_s3: {get_param: [EndpointMap, SwiftS3Admin, uri]}
+          swift::keystone::auth::password: {get_param: SwiftPassword}
+          swift::keystone::auth::region: {get_param: KeystoneRegion}
+          swift::keystone::auth::tenant: 'service'
+          swift::keystone::auth::configure_s3_endpoint: false
+          swift::keystone::auth::operator_roles:
+            - admin
+            - swiftoperator
+            - ResellerAdmin
index 7aa3706..eb5237f 100644 (file)
@@ -32,7 +32,7 @@ outputs:
     value:
       service_name: ntp
       config_settings:
-        ntp::ntpservers: {get_param: NtpServer}
+        ntp::servers: {get_param: NtpServer}
         tripleo.ntp.firewall_rules:
           '105 ntp':
             dport: 123
index f6ec458..7eb3990 100644 (file)
@@ -19,7 +19,7 @@ parameters:
                  via parameter_defaults in the resource registry.
     type: json
   ManageFirewall:
-    default: false
+    default: true
     description: Whether to manage IPtables rules.
     type: boolean
   PurgeFirewallRules:
index db0004c..f3b6447 100644 (file)
@@ -1,10 +1,30 @@
+# Specifies which roles (groups of nodes) will be deployed
+# Note this is used as an input to the various *.j2.yaml
+# jinja2 templates, so that they are converted into *.yaml
+# during the plan creation (via a mistral action/workflow).
+#
+# The format is a list, with the following format:
+#
+# * name: (string) mandatory, name of the role, must be unique
+#
+# CountDefault: (number) optional, default number of nodes, defaults to 0
+# sets the default for the {{role.name}}Count parameter in overcloud.yaml
+#
+# HostnameFormatDefault: (string) optional default format string for hostname
+# defaults to '%stackname%-{{role.name.lower()}}-%index%'
+# sets the default for {{role.name}}HostnameFormat parameter in overcloud.yaml
+#
+# ServicesDefault: (list) optional default list of services to be deployed
+# on the role, defaults to an empty list. Sets the default for the
+# {{role.name}}Services parameter in overcloud.yaml
+
 - name: Controller
   CountDefault: 1
-  HostnameFormatDefault: '%stackname%-controller-%index%'
   ServicesDefault:
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CephMon
     - OS::TripleO::Services::CephExternal
+    - OS::TripleO::Services::CephRgw
     - OS::TripleO::Services::CinderApi
     - OS::TripleO::Services::CinderBackup
     - OS::TripleO::Services::CinderScheduler
@@ -34,6 +54,7 @@
     - OS::TripleO::Services::NovaConductor
     - OS::TripleO::Services::MongoDb
     - OS::TripleO::Services::NovaApi
+    - OS::TripleO::Services::NovaMetadata
     - OS::TripleO::Services::NovaScheduler
     - OS::TripleO::Services::NovaConsoleauth
     - OS::TripleO::Services::NovaVncProxy
@@ -54,6 +75,9 @@
     - OS::TripleO::Services::GnocchiStatsd
     - OS::Tripleo::Services::ManilaApi
     - OS::Tripleo::Services::ManilaScheduler
+    - OS::Tripleo::Services::ManilaBackendGeneric
+    - OS::Tripleo::Services::ManilaBackendNetapp
+    - OS::Tripleo::Services::ManilaBackendCephFs
     - OS::Tripleo::Services::ManilaShare
     - OS::TripleO::Services::AodhApi
     - OS::TripleO::Services::AodhEvaluator
@@ -68,6 +92,7 @@
     - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::OpenDaylight
     - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::FluentdClient
     - OS::TripleO::Services::VipHosts
 
 - name: Compute
     - OS::TripleO::Services::NeutronSriovAgent
     - OS::TripleO::Services::OpenDaylightOvs
     - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::FluentdClient
     - OS::TripleO::Services::VipHosts
 
 - name: BlockStorage
-  CountDefault: 0
-  HostnameFormatDefault: '%stackname%-blockstorage-%index%'
   ServicesDefault:
     - OS::TripleO::Services::CACerts
-    - OS::TripleO::Services::CinderVolume
+    - OS::TripleO::Services::BlockStorageCinderVolume
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::Ntp
     - OS::TripleO::Services::Timezone
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::FluentdClient
     - OS::TripleO::Services::VipHosts
 
 - name: ObjectStorage
-  CountDefault: 0
-  HostnameFormatDefault: '%stackname%-objectstorage-%index%'
   ServicesDefault:
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::Kernel
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::FluentdClient
     - OS::TripleO::Services::VipHosts
 
 - name: CephStorage
-  CountDefault: 0
-  HostnameFormatDefault: '%stackname%-cephstorage-%index%'
   ServicesDefault:
     - OS::TripleO::Services::CACerts
     - OS::TripleO::Services::CephOSD
     - OS::TripleO::Services::TripleoPackages
     - OS::TripleO::Services::TripleoFirewall
     - OS::TripleO::Services::SensuClient
+    - OS::TripleO::Services::FluentdClient
     - OS::TripleO::Services::VipHosts