Merge "Create split out neutron networks via Heat."
authorJenkins <jenkins@review.openstack.org>
Wed, 13 May 2015 20:43:28 +0000 (20:43 +0000)
committerGerrit Code Review <review@openstack.org>
Wed, 13 May 2015 20:43:28 +0000 (20:43 +0000)
controller.yaml
overcloud-resource-registry-puppet.yaml
overcloud-without-mergepy.yaml
puppet/all-nodes-config.yaml
puppet/controller-config-pacemaker.yaml [new file with mode: 0644]
puppet/controller-config.yaml [new file with mode: 0644]
puppet/controller-post-puppet.yaml
puppet/controller-puppet.yaml
puppet/hieradata/controller.yaml
puppet/manifests/overcloud_controller.pp
puppet/manifests/overcloud_controller_pacemaker.pp [new file with mode: 0644]

index d7ba1a1..bbac3a6 100644 (file)
@@ -208,6 +208,10 @@ parameters:
     description: Keystone key for signing tokens.
     type: string
     hidden: true
+  MysqlClustercheckPassword:
+    type: string
+    hidden: true
+    default: ''  # Has to be here because of the ignored empty value bug
   MysqlClusterUniquePart:
     description: A unique identifier of the MySQL cluster the controller is in.
     type: string
index 2b9da71..744e115 100644 (file)
@@ -11,6 +11,8 @@ resource_registry:
   OS::TripleO::CephStorage: puppet/ceph-storage-puppet.yaml
   OS::TripleO::CephStorage::Net::SoftwareConfig: net-config-noop.yaml
   OS::TripleO::ControllerPostDeployment: puppet/controller-post-puppet.yaml
+  # set to controller-config-pacemaker.yaml to enable pacemaker
+  OS::TripleO::ControllerConfig: puppet/controller-config.yaml
   OS::TripleO::ComputePostDeployment: puppet/compute-post-puppet.yaml
   OS::TripleO::ObjectStoragePostDeployment: puppet/swift-storage-post.yaml
   OS::TripleO::BlockStoragePostDeployment: puppet/cinder-storage-post.yaml
index 70e197d..d3f8c59 100644 (file)
@@ -598,6 +598,7 @@ resources:
           MysqlClusterUniquePart: {get_attr: [MysqlClusterUniquePart, value]}
           MysqlInnodbBufferPoolSize: {get_param: MysqlInnodbBufferPoolSize}
           MysqlRootPassword: {get_attr: [MysqlRootPassword, value]}
+          MysqlClustercheckPassword: {get_attr: [MysqlClustercheckPassword, value]}
           NeutronPublicInterfaceIP: {get_param: NeutronPublicInterfaceIP}
           NeutronFlatNetworks: {get_param: NeutronFlatNetworks}
           NeutronBridgeMappings: {get_param: NeutronBridgeMappings}
@@ -760,6 +761,11 @@ resources:
     properties:
       length: 10
 
+  MysqlClustercheckPassword:
+    type: OS::Heat::RandomString
+    properties:
+      length: 10
+
   MysqlClusterUniquePart:
     type: OS::Heat::RandomString
     properties:
index 35bd419..c6dcaec 100644 (file)
@@ -54,6 +54,14 @@ resources:
                   list_join:
                   - ','
                   - {get_param: controller_ips}
+                controller_node_names:
+                  list_join:
+                  - ','
+                  - {get_param: controller_names}
+                galera_node_names:
+                  list_join:
+                  - ','
+                  - {get_param: controller_names}
                 rabbit_node_ips:
                   list_join:
                   - ','
diff --git a/puppet/controller-config-pacemaker.yaml b/puppet/controller-config-pacemaker.yaml
new file mode 100644 (file)
index 0000000..4cec83a
--- /dev/null
@@ -0,0 +1,23 @@
+heat_template_version: 2014-10-16
+
+description: >
+  A software config which runs manifests/overcloud_controller_pacemaker.pp
+
+resources:
+
+  ControllerPuppetConfigImpl:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: puppet
+      options:
+        enable_hiera: True
+        enable_facter: False
+      outputs:
+      - name: result
+      config:
+        get_file: manifests/overcloud_controller_pacemaker.pp
+
+outputs:
+  OS::stack_id:
+    description: The software config which runs overcloud_controller_pacemaker.pp
+    value: {get_resource: ControllerPuppetConfigImpl}
diff --git a/puppet/controller-config.yaml b/puppet/controller-config.yaml
new file mode 100644 (file)
index 0000000..34c68ba
--- /dev/null
@@ -0,0 +1,23 @@
+heat_template_version: 2014-10-16
+
+description: >
+  A software config which runs manifests/overcloud_controller.pp
+
+resources:
+
+  ControllerPuppetConfigImpl:
+    type: OS::Heat::SoftwareConfig
+    properties:
+      group: puppet
+      options:
+        enable_hiera: True
+        enable_facter: False
+      outputs:
+      - name: result
+      config:
+        get_file: manifests/overcloud_controller.pp
+
+outputs:
+  OS::stack_id:
+    description: The software config which runs overcloud_controller.pp
+    value: {get_resource: ControllerPuppetConfigImpl}
index debd715..7aab7f5 100644 (file)
@@ -10,16 +10,7 @@ parameters:
 resources:
 
   ControllerPuppetConfig:
-    type: OS::Heat::SoftwareConfig
-    properties:
-      group: puppet
-      options:
-        enable_hiera: True
-        enable_facter: False
-      outputs:
-      - name: result
-      config:
-        get_file: manifests/overcloud_controller.pp
+    type: OS::TripleO::ControllerConfig
 
   # Step through a series of Puppet runs using the same manifest.
   # NOTE(dprince): Heat breakpoints would make for a really cool way to step
@@ -72,10 +63,19 @@ resources:
       input_values:
         step: 3
 
+  ControllerDeploymentOvercloudServices_Step5:
+    type: OS::Heat::StructuredDeployments
+    depends_on: ControllerDeploymentOvercloudServices_Step4
+    properties:
+      servers:  {get_param: servers}
+      config: {get_resource: ControllerPuppetConfig}
+      input_values:
+        step: 4
+
   # Note, this should come last, so use depends_on to ensure
   # this is created after any other resources.
   ExtraConfig:
-    depends_on: ControllerDeploymentOvercloudServices_Step4
+    depends_on: ControllerDeploymentOvercloudServices_Step5
     type: OS::TripleO::NodeExtraConfigPost
     properties:
         servers: {get_param: servers}
index 64e7fc7..09a26d5 100644 (file)
@@ -226,6 +226,10 @@ parameters:
     type: string
     hidden: true
     default: ''  # Has to be here because of the ignored empty value bug
+  MysqlClustercheckPassword:
+    type: string
+    hidden: true
+    default: ''  # Has to be here because of the ignored empty value bug
   NeutronBridgeMappings:
     description: >
       The OVS logical->physical bridge mappings to use. See the Neutron
@@ -553,6 +557,7 @@ resources:
         enable_swift_storage: {get_param: EnableSwiftStorage}
         mysql_innodb_buffer_pool_size: {get_param: MysqlInnodbBufferPoolSize}
         mysql_root_password: {get_param: MysqlRootPassword}
+        mysql_clustercheck_password: {get_param: MysqlClustercheckPassword}
         mysql_cluster_name:
           str_replace:
             template: tripleo-CLUSTER
@@ -775,6 +780,7 @@ resources:
                 mysql_innodb_buffer_pool_size: {get_input: mysql_innodb_buffer_pool_size}
                 mysql::server::root_password: {get_input: mysql_root_password}
                 mysql_cluster_name: {get_input: mysql_cluster_name}
+                mysql_clustercheck_password: {get_input: mysql_clustercheck_password}
 
                 # Neutron
                 neutron::bind_host: {get_input: controller_host}
index 7648845..02b7c42 100644 (file)
@@ -5,11 +5,21 @@ nova::consoleauth::enabled: true
 nova::vncproxy::enabled: true
 nova::scheduler::enabled: true
 
+# rabbitmq
 rabbitmq::delete_guest_user: false
 rabbitmq::wipe_db_on_cookie_change: true
 rabbitmq::port: '5672'
 rabbitmq::package_source: undef
 rabbitmq::repos_ensure: false
+rabbitmq_environment:
+  RABBITMQ_NODENAME: "rabbit@%{::hostname}"
+  RABBITMQ_SERVER_ERL_ARGS: '"+K true +A30 +P 1048576 -kernel inet_default_connect_options [{nodelay,true},{raw,6,18,<<5000:64/native>>}] -kernel inet_default_listen_options [{raw,6,18,<<5000:64/native>>}]"'
+rabbitmq_kernel_variables:
+  inet_dist_listen_min: '35672'
+  inet_dist_listen_max: '35672'
+rabbitmq_config_variables:
+  tcp_listen_options: '[binary, {packet, raw}, {reuseaddr, true}, {backlog, 128}, {nodelay, true}, {exit_on_close, false}, {keepalive, true}]'
+  cluster_partition_handling: 'pause_minority'
 
 mongodb::server::replset: tripleo
 
@@ -84,6 +94,10 @@ horizon::allowed_hosts: '*'
 
 mysql::server::manage_config_file: true
 
+
+tripleo::loadbalancer::galera_master_ip: "%{hiera('bootstrap_nodeid_ip')}"
+tripleo::loadbalancer::galera_master_hostname: "%{hiera('bootstrap_nodeid')}"
+
 tripleo::loadbalancer::keystone_admin: true
 tripleo::loadbalancer::keystone_public: true
 tripleo::loadbalancer::neutron: true
index 6079097..1b088a8 100644 (file)
@@ -24,53 +24,13 @@ if !str2bool(hiera('enable_package_install', 'false')) {
   }
 }
 
-$enable_pacemaker = str2bool(hiera('enable_pacemaker'))
-$enable_keepalived = !$enable_pacemaker
-if $::hostname == downcase(hiera('bootstrap_nodeid')) {
-  $pacemaker_master = true
-} else {
-  $pacemaker_master = false
-}
-
 if hiera('step') >= 1 {
 
   $controller_node_ips = split(hiera('controller_node_ips'), ',')
 
   class { '::tripleo::loadbalancer' :
     controller_hosts => $controller_node_ips,
-    manage_vip       => $enable_keepalived,
-  }
-
-  if $enable_pacemaker {
-    $pacemaker_cluster_members = regsubst(hiera('controller_node_ips'), ',', ' ', 'G')
-    user { 'hacluster':
-     ensure => present,
-    } ->
-    class { '::pacemaker':
-      hacluster_pwd => hiera('hacluster_pwd'),
-    } ->
-    class { '::pacemaker::corosync':
-      cluster_members => $pacemaker_cluster_members,
-      setup_cluster   => $pacemaker_master,
-    }
-    class { '::pacemaker::stonith':
-      disable => true,
-    }
-    if $pacemaker_master {
-      $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
-      pacemaker::resource::ip { 'control_vip':
-        ip_address => $control_vip,
-      }
-      $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
-      pacemaker::resource::ip { 'public_vip':
-        ip_address => $public_vip,
-      }
-      pacemaker::resource::systemd { 'haproxy':
-        clone => true,
-      }
-    }
-
-    Class['::pacemaker::corosync'] -> Pacemaker::Resource::Systemd <| |>
+    manage_vip       => true,
   }
 
 }
@@ -85,15 +45,7 @@ if hiera('step') >= 2 {
   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
     include ::mongodb::globals
 
-    if $enable_pacemaker {
-      $mongodb_service_ensure = undef
-    } else {
-      $mongodb_service_ensure = 'running'
-    }
-
-    class {'::mongodb::server' :
-      service_ensure => $mongodb_service_ensure,
-    }
+    include ::mongodb::server
     $mongo_node_ips = split(hiera('mongo_node_ips'), ',')
     $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
     $mongo_node_string = join($mongo_node_ips_with_port, ',')
@@ -101,26 +53,6 @@ if hiera('step') >= 2 {
     $mongodb_replset = hiera('mongodb::server::replset')
     $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
     if downcase(hiera('bootstrap_nodeid')) == $::hostname {
-
-      if $enable_pacemaker  {
-        pacemaker::resource::systemd { 'mongod' :
-          options => "op start timeout=120s",
-          clone   => true,
-          before  => Exec['mongodb-ready'],
-        }
-        # NOTE (spredzy) : The replset can only be run
-        # once all the nodes have joined the cluster.
-        $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ')
-        exec { 'mongodb-ready' :
-          command   => $mongodb_cluster_ready_command,
-          timeout   => 600,
-          tries     => 60,
-          try_sleep => 10,
-          before    => Mongodb_replset[$mongodb_replset],
-        }
-
-      }
-
       mongodb_replset { $mongodb_replset :
         members => $mongo_node_ips_with_port,
       }
@@ -226,46 +158,24 @@ if hiera('step') >= 2 {
     }
   }
 
-  if $enable_pacemaker {
-    # the module ignores erlang_cookie if cluster_config is false
-    file { '/var/lib/rabbitmq/.erlang.cookie':
-      ensure  => 'present',
-      owner   => 'rabbitmq',
-      group   => 'rabbitmq',
-      mode    => '0400',
-      content => hiera('rabbitmq::erlang_cookie'),
-      replace => true,
-    } ->
+  $rabbit_nodes = split(hiera('rabbit_node_ips'), ',')
+  if count($rabbit_nodes) > 1 {
     class { '::rabbitmq':
-      service_manage        => false,
-      environment_variables => {
-        'RABBITMQ_NODENAME' => "rabbit@$::hostname",
-      },
+      config_cluster          => true,
+      cluster_nodes           => $rabbit_nodes,
+      tcp_keepalive           => false,
+      config_kernel_variables => hiera('rabbitmq_kernel_variables'),
+      config_variables        => hiera('rabbitmq_config_variables'),
+      environment_variables   => hiera('rabbitmq_environment'),
     }
-    if $pacemaker_master {
-      pacemaker::resource::ocf { 'rabbitmq':
-        resource_name => 'heartbeat:rabbitmq-cluster',
-        options       => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
-        clone         => true,
-        require       => Class['::rabbitmq'],
-      }
+    rabbitmq_policy { 'ha-all@/':
+      pattern    => '^(?!amq\.).*',
+      definition => {
+        'ha-mode' => 'all',
+      },
     }
   } else {
-    $rabbit_nodes = split(hiera('rabbit_node_ips'), ',')
-    if count($rabbit_nodes) > 1 {
-      class { '::rabbitmq':
-        config_cluster  => true,
-        cluster_nodes   => $rabbit_nodes,
-      }
-      rabbitmq_policy { 'ha-all@/':
-        pattern    => '^(?!amq\.).*',
-        definition => {
-          'ha-mode' => 'all',
-        },
-      }
-    } else {
-      include ::rabbitmq
-    }
+    include ::rabbitmq
   }
 
   # pre-install swift here so we can build rings
diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp
new file mode 100644 (file)
index 0000000..d9618e8
--- /dev/null
@@ -0,0 +1,591 @@
+# Copyright 2015 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+if !str2bool(hiera('enable_package_install', 'false')) {
+  case $::osfamily {
+    'RedHat': {
+      Package { provider => 'norpm' } # provided by tripleo-puppet
+    }
+    default: {
+      warning('enable_package_install option not supported.')
+    }
+  }
+}
+
+if $::hostname == downcase(hiera('bootstrap_nodeid')) {
+  $pacemaker_master = true
+} else {
+  $pacemaker_master = false
+}
+
+if hiera('step') >= 1 {
+
+  $controller_node_ips = split(hiera('controller_node_ips'), ',')
+  $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
+  class { '::tripleo::loadbalancer' :
+    controller_hosts       => $controller_node_ips,
+    controller_hosts_names => $controller_node_names,
+    manage_vip             => false,
+  }
+
+  $pacemaker_cluster_members = regsubst(hiera('controller_node_ips'), ',', ' ', 'G')
+  user { 'hacluster':
+   ensure => present,
+  } ->
+  class { '::pacemaker':
+    hacluster_pwd => hiera('hacluster_pwd'),
+  } ->
+  class { '::pacemaker::corosync':
+    cluster_members => $pacemaker_cluster_members,
+    setup_cluster   => $pacemaker_master,
+  }
+  class { '::pacemaker::stonith':
+    disable => true,
+  }
+  if $pacemaker_master {
+    $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
+    pacemaker::resource::ip { 'control_vip':
+      ip_address => $control_vip,
+    }
+    $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
+    pacemaker::resource::ip { 'public_vip':
+      ip_address => $public_vip,
+    }
+    pacemaker::resource::systemd { 'haproxy':
+      clone => true,
+    }
+  }
+
+  Class['::pacemaker::corosync'] -> Pacemaker::Resource::Systemd <| |>
+
+}
+
+if hiera('step') >= 2 {
+
+  if count(hiera('ntp::servers')) > 0 {
+    include ::ntp
+  }
+
+  # MongoDB
+  if downcase(hiera('ceilometer_backend')) == 'mongodb' {
+    include ::mongodb::globals
+
+    class {'::mongodb::server' :
+      service_ensure => undef
+    }
+    $mongo_node_ips = split(hiera('mongo_node_ips'), ',')
+    $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
+    $mongo_node_string = join($mongo_node_ips_with_port, ',')
+
+    $mongodb_replset = hiera('mongodb::server::replset')
+    $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
+    if downcase(hiera('bootstrap_nodeid')) == $::hostname {
+
+      pacemaker::resource::systemd { 'mongod' :
+        options => "op start timeout=120s",
+        clone   => true,
+        before  => Exec['mongodb-ready'],
+      }
+      # NOTE (spredzy) : The replset can only be run
+      # once all the nodes have joined the cluster.
+      $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ')
+      exec { 'mongodb-ready' :
+        command   => $mongodb_cluster_ready_command,
+        timeout   => 600,
+        tries     => 60,
+        try_sleep => 10,
+        before    => Mongodb_replset[$mongodb_replset],
+      }
+
+      mongodb_replset { $mongodb_replset :
+        members => $mongo_node_ips_with_port,
+      }
+    }
+  }
+
+  # Redis
+  $redis_node_ips = split(hiera('redis_node_ips'), ',')
+  $redis_master_hostname = downcase(hiera('bootstrap_nodeid'))
+
+  if $redis_master_hostname == $::hostname {
+    $slaveof = undef
+  } else {
+    $slaveof = "${redis_master_hostname} 6379"
+  }
+  class {'::redis' :
+    slaveof => $slaveof,
+  }
+
+  if count($redis_node_ips) > 1 {
+    Class['::tripleo::redis_notification'] -> Service['redis-sentinel']
+    include ::redis::sentinel
+    class {'::tripleo::redis_notification' :
+      haproxy_monitor_ip => hiera('tripleo::loadbalancer::controller_virtual_ip'),
+    }
+  }
+
+  if str2bool(hiera('enable_galera', 'true')) {
+    $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
+  } else {
+    $mysql_config_file = '/etc/my.cnf.d/server.cnf'
+  }
+  $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
+  $galera_nodes_count = count(split($galera_nodes, ','))
+  $clustercheck_password = hiera('mysql_clustercheck_password')
+  $mysql_root_password = hiera('mysql::server::root_password')
+
+  $mysqld_options = {
+    'mysqld' => {
+      'skip-name-resolve'             => '1',
+      'binlog_format'                 => 'ROW',
+      'default-storage-engine'        => 'innodb',
+      'innodb_autoinc_lock_mode'      => '2',
+      'innodb_locks_unsafe_for_binlog'=> '1',
+      'query_cache_size'              => '0',
+      'query_cache_type'              => '0',
+      'bind-address'                  => hiera('controller_host'),
+      'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
+      'wsrep_cluster_name'            => 'galera_cluster',
+      'wsrep_slave_threads'           => '1',
+      'wsrep_certify_nonPK'           => '1',
+      'wsrep_max_ws_rows'             => '131072',
+      'wsrep_max_ws_size'             => '1073741824',
+      'wsrep_debug'                   => '0',
+      'wsrep_convert_LOCK_to_trx'     => '0',
+      'wsrep_retry_autocommit'        => '1',
+      'wsrep_auto_increment_control'  => '1',
+      'wsrep_drupal_282555_workaround'=> '0',
+      'wsrep_causal_reads'            => '0',
+      'wsrep_notify_cmd'              => '',
+      'wsrep_sst_method'              => 'rsync',
+    }
+  }
+
+  class { '::mysql::server':
+    create_root_user   => false,
+    create_root_my_cnf => false,
+    config_file        => $mysql_config_file,
+    override_options   => $mysqld_options,
+    service_manage     => false,
+  }
+
+  if $pacemaker_master {
+    $sync_db = true
+
+    pacemaker::resource::ocf { 'galera' :
+      resource_name => 'heartbeat:galera',
+      options       => "enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}' meta master-max=${galera_nodes_count} ordered=true op promote timeout=300s on-fail=block --master",
+      require       => Class['::mysql::server'],
+      before        => Exec['galera-ready'],
+    }
+
+    mysql_user { 'clustercheckuser@localhost' :
+      password_hash => mysql_password($clustercheck_password),
+      require       => Exec['galera-ready'],
+    }
+  } else {
+    $sync_db = false
+  }
+
+  exec { 'galera-ready' :
+    command     => '/bin/mysql -e "SHOW GLOBAL VARIABLES LIKE \'read_only\'" | /bin/grep -i off',
+    timeout     => 3600,
+    tries       => 60,
+    try_sleep   => 60,
+    environment => 'HOME=/root',
+    require     => Class['::mysql::server'],
+  }
+
+  file { '/etc/sysconfig/clustercheck' :
+    ensure  => file,
+    content => "MYSQL_USERNAME=clustercheckuser\n
+MYSQL_PASSWORD=${clustercheck_password}\n
+MYSQL_HOST=localhost\n",
+    require       => Exec['galera-ready'],
+  }
+
+  xinetd::service { 'galera-monitor' :
+    port           => '9200',
+    server         => '/usr/bin/clustercheck',
+    per_source     => 'UNLIMITED',
+    log_on_success => '',
+    log_on_failure => 'HOST',
+    flags          => 'REUSE',
+    service_type   => 'UNLISTED',
+    user           => 'root',
+    group          => 'root',
+    require        => File['/etc/sysconfig/clustercheck'],
+  }
+
+  # FIXME: this should only occur on the bootstrap host (ditto for db syncs)
+  # Create all the database schemas
+  # Example DSN format: mysql://user:password@host/dbname
+  if $sync_db {
+    $allowed_hosts = ['%',hiera('controller_host')]
+    $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]')
+    class { 'keystone::db::mysql':
+      user          => $keystone_dsn[3],
+      password      => $keystone_dsn[4],
+      host          => $keystone_dsn[5],
+      dbname        => $keystone_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]')
+    class { 'glance::db::mysql':
+      user          => $glance_dsn[3],
+      password      => $glance_dsn[4],
+      host          => $glance_dsn[5],
+      dbname        => $glance_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]')
+    class { 'nova::db::mysql':
+      user          => $nova_dsn[3],
+      password      => $nova_dsn[4],
+      host          => $nova_dsn[5],
+      dbname        => $nova_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]')
+    class { 'neutron::db::mysql':
+      user          => $neutron_dsn[3],
+      password      => $neutron_dsn[4],
+      host          => $neutron_dsn[5],
+      dbname        => $neutron_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]')
+    class { 'cinder::db::mysql':
+      user          => $cinder_dsn[3],
+      password      => $cinder_dsn[4],
+      host          => $cinder_dsn[5],
+      dbname        => $cinder_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]')
+    class { 'heat::db::mysql':
+      user          => $heat_dsn[3],
+      password      => $heat_dsn[4],
+      host          => $heat_dsn[5],
+      dbname        => $heat_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    if downcase(hiera('ceilometer_backend')) == 'mysql' {
+      $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]')
+      class { 'ceilometer::db::mysql':
+        user          => $ceilometer_dsn[3],
+        password      => $ceilometer_dsn[4],
+        host          => $ceilometer_dsn[5],
+        dbname        => $ceilometer_dsn[6],
+        allowed_hosts => $allowed_hosts,
+        require       => Exec['galera-ready'],
+      }
+    }
+  }
+
+  # the module ignores erlang_cookie if cluster_config is false
+  file { '/var/lib/rabbitmq/.erlang.cookie':
+    ensure  => 'present',
+    owner   => 'rabbitmq',
+    group   => 'rabbitmq',
+    mode    => '0400',
+    content => hiera('rabbitmq::erlang_cookie'),
+    replace => true,
+  } ->
+  class { '::rabbitmq':
+    service_manage          => false,
+    tcp_keepalive           => false,
+    config_kernel_variables => hiera('rabbitmq_kernel_variables'),
+    config_variables        => hiera('rabbitmq_config_variables'),
+    environment_variables   => hiera('rabbitmq_environment'),
+  }
+  if $pacemaker_master {
+    pacemaker::resource::ocf { 'rabbitmq':
+      resource_name => 'heartbeat:rabbitmq-cluster',
+      options       => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
+      clone         => true,
+      require       => Class['::rabbitmq'],
+    }
+  }
+
+  # pre-install swift here so we can build rings
+  include ::swift
+
+  $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false)
+  $enable_ceph = $cinder_enable_rbd_backend
+
+  if $enable_ceph {
+    class { 'ceph::profile::params':
+      mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
+    }
+    include ::ceph::profile::mon
+  }
+
+  if str2bool(hiera('enable_ceph_storage', 'false')) {
+    include ::ceph::profile::client
+    include ::ceph::profile::osd
+  }
+
+} #END STEP 2
+
+if (hiera('step') >= 3 and $::hostname == downcase(hiera('bootstrap_nodeid')))
+   or hiera('step') >= 4 {
+
+  include ::keystone
+
+  #TODO: need a cleanup-keystone-tokens.sh solution here
+  keystone_config {
+    'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
+  }
+  file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
+    ensure  => 'directory',
+    owner   => 'keystone',
+    group   => 'keystone',
+    require => Package['keystone'],
+  }
+  file { '/etc/keystone/ssl/certs/signing_cert.pem':
+    content => hiera('keystone_signing_certificate'),
+    owner   => 'keystone',
+    group   => 'keystone',
+    notify  => Service['keystone'],
+    require => File['/etc/keystone/ssl/certs'],
+  }
+  file { '/etc/keystone/ssl/private/signing_key.pem':
+    content => hiera('keystone_signing_key'),
+    owner   => 'keystone',
+    group   => 'keystone',
+    notify  => Service['keystone'],
+    require => File['/etc/keystone/ssl/private'],
+  }
+  file { '/etc/keystone/ssl/certs/ca.pem':
+    content => hiera('keystone_ca_certificate'),
+    owner   => 'keystone',
+    group   => 'keystone',
+    notify  => Service['keystone'],
+    require => File['/etc/keystone/ssl/certs'],
+  }
+
+  $glance_backend = downcase(hiera('glance_backend', 'swift'))
+  case $glance_backend {
+      swift: { $glance_store = 'glance.store.swift.Store' }
+      file: { $glance_store = 'glance.store.filesystem.Store' }
+      rbd: { $glance_store = 'glance.store.rbd.Store' }
+      default: { fail('Unrecognized glance_backend parameter.') }
+  }
+
+  # TODO: notifications, scrubber, etc.
+  include ::glance
+  class { 'glance::api':
+    known_stores => [$glance_store]
+  }
+  class { '::glance::registry' :
+    sync_db => $sync_db,
+  }
+  include join(['::glance::backend::', $glance_backend])
+
+  class { 'nova':
+    glance_api_servers     => join([hiera('glance_protocol'), '://', hiera('controller_virtual_ip'), ':', hiera('glance_port')]),
+  }
+
+  class { '::nova::api' :
+    sync_db => $sync_db,
+  }
+  include ::nova::cert
+  include ::nova::conductor
+  include ::nova::consoleauth
+  include ::nova::network::neutron
+  include ::nova::vncproxy
+  include ::nova::scheduler
+
+  include ::neutron
+  class { '::neutron::server' :
+    sync_db => $sync_db,
+  }
+  include ::neutron::agents::dhcp
+  include ::neutron::agents::l3
+
+  file { '/etc/neutron/dnsmasq-neutron.conf':
+    content => hiera('neutron_dnsmasq_options'),
+    owner   => 'neutron',
+    group   => 'neutron',
+    notify  => Service['neutron-dhcp-service'],
+    require => Package['neutron'],
+  }
+
+  class { 'neutron::plugins::ml2':
+    flat_networks        => split(hiera('neutron_flat_networks'), ','),
+    tenant_network_types => [hiera('neutron_tenant_network_type')],
+    type_drivers         => [hiera('neutron_tenant_network_type')],
+  }
+
+  class { 'neutron::agents::ml2::ovs':
+    bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
+    tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
+  }
+
+  class { 'neutron::agents::metadata':
+    auth_url => join(['http://', hiera('controller_virtual_ip'), ':35357/v2.0']),
+  }
+
+  Service['neutron-server'] -> Service['neutron-dhcp-service']
+  Service['neutron-server'] -> Service['neutron-l3']
+  Service['neutron-server'] -> Service['neutron-ovs-agent-service']
+  Service['neutron-server'] -> Service['neutron-metadata']
+
+  include ::cinder
+  include ::cinder::api
+  include ::cinder::glance
+  include ::cinder::scheduler
+  include ::cinder::volume
+  class {'cinder::setup_test_volume':
+    size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
+  }
+
+  $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
+  if $cinder_enable_iscsi {
+    $cinder_iscsi_backend = 'tripleo_iscsi'
+
+    cinder::backend::iscsi { $cinder_iscsi_backend :
+      iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
+      iscsi_helper     => hiera('cinder_iscsi_helper'),
+    }
+  }
+
+  if $enable_ceph {
+
+    Ceph_pool {
+      pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
+      pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
+      size    => hiera('ceph::profile::params::osd_pool_default_size'),
+    }
+
+    $ceph_pools = hiera('ceph_pools')
+    ceph::pool { $ceph_pools : }
+  }
+
+  if $cinder_enable_rbd_backend {
+    $cinder_rbd_backend = 'tripleo_ceph'
+
+    cinder_config {
+      "${cinder_rbd_backend}/host": value => 'hostgroup';
+    }
+
+    cinder::backend::rbd { $cinder_rbd_backend :
+      rbd_pool        => 'volumes',
+      rbd_user        => 'openstack',
+      rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
+      require         => Ceph::Pool['volumes'],
+    }
+  }
+
+  $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend])
+  class { '::cinder::backends' :
+    enabled_backends => $cinder_enabled_backends,
+  }
+
+  # swift proxy
+  include ::memcached
+  include ::swift::proxy
+  include ::swift::proxy::proxy_logging
+  include ::swift::proxy::healthcheck
+  include ::swift::proxy::cache
+  include ::swift::proxy::keystone
+  include ::swift::proxy::authtoken
+  include ::swift::proxy::staticweb
+  include ::swift::proxy::ceilometer
+  include ::swift::proxy::ratelimit
+  include ::swift::proxy::catch_errors
+  include ::swift::proxy::tempurl
+  include ::swift::proxy::formpost
+
+  # swift storage
+  if str2bool(hiera('enable_swift_storage', 'true')) {
+    class {'swift::storage::all':
+      mount_check => str2bool(hiera('swift_mount_check'))
+    }
+    if(!defined(File['/srv/node'])) {
+      file { '/srv/node':
+        ensure  => directory,
+        owner   => 'swift',
+        group   => 'swift',
+        require => Package['openstack-swift'],
+      }
+    }
+    $swift_components = ['account', 'container', 'object']
+    swift::storage::filter::recon { $swift_components : }
+    swift::storage::filter::healthcheck { $swift_components : }
+  }
+
+  # Ceilometer
+  $ceilometer_backend = downcase(hiera('ceilometer_backend'))
+  case $ceilometer_backend {
+    /mysql/ : {
+      $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
+    }
+    default : {
+      $ceilometer_database_connection = $ceilometer_mongodb_conn_string
+    }
+  }
+  include ::ceilometer
+  include ::ceilometer::api
+  include ::ceilometer::agent::notification
+  include ::ceilometer::agent::central
+  include ::ceilometer::alarm::notifier
+  include ::ceilometer::alarm::evaluator
+  include ::ceilometer::expirer
+  include ::ceilometer::collector
+  class { '::ceilometer::db' :
+    database_connection => $ceilometer_database_connection,
+    sync_db             => $sync_db,
+  }
+  class { 'ceilometer::agent::auth':
+    auth_url => join(['http://', hiera('controller_virtual_ip'), ':5000/v2.0']),
+  }
+
+  Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
+
+  # Heat
+  class { '::heat' :
+    sync_db => $sync_db,
+  }
+  include ::heat::api
+  include ::heat::api_cfn
+  include ::heat::api_cloudwatch
+  include ::heat::engine
+
+  # Horizon
+  $vhost_params = { add_listen => false }
+  class { 'horizon':
+    cache_server_ip    => split(hiera('memcache_node_ips', '127.0.0.1'), ','),
+    vhost_extra_params => $vhost_params,
+  }
+
+  $snmpd_user = hiera('snmpd_readonly_user_name')
+  snmp::snmpv3_user { $snmpd_user:
+    authtype => 'MD5',
+    authpass => hiera('snmpd_readonly_user_password'),
+  }
+  class { 'snmp':
+    agentaddress => ['udp:161','udp6:[::1]:161'],
+    snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
+  }
+
+} #END STEP 3