Fix Redis bind setting to use redis_network
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
index ff1ad88..1f91583 100644 (file)
 # License for the specific language governing permissions and limitations
 # under the License.
 
+Pcmk_resource <| |> {
+  tries     => 10,
+  try_sleep => 3,
+}
+
 if !str2bool(hiera('enable_package_install', 'false')) {
   case $::osfamily {
     'RedHat': {
@@ -26,20 +31,36 @@ if !str2bool(hiera('enable_package_install', 'false')) {
 
 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
   $pacemaker_master = true
+  $sync_db = true
 } else {
   $pacemaker_master = false
+  $sync_db = false
 }
 
+# When to start and enable services which haven't been Pacemakerized
+# FIXME: remove when we start all OpenStack services using Pacemaker
+# (occurences of this variable will be gradually replaced with false)
+$non_pcmk_start = hiera('step') >= 4
+
 if hiera('step') >= 1 {
 
-  $controller_node_ips = split(hiera('controller_node_ips'), ',')
+  create_resources(sysctl::value, hiera('sysctl_settings'), {})
 
+  if count(hiera('ntp::servers')) > 0 {
+    include ::ntp
+  }
+
+  $controller_node_ips = split(hiera('controller_node_ips'), ',')
+  $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
   class { '::tripleo::loadbalancer' :
-    controller_hosts => $controller_node_ips,
-    manage_vip       => false,
+    controller_hosts       => $controller_node_ips,
+    controller_hosts_names => $controller_node_names,
+    redis                  => false,
+    manage_vip             => false,
+    haproxy_service_manage => false,
   }
 
-  $pacemaker_cluster_members = regsubst(hiera('controller_node_ips'), ',', ' ', 'G')
+  $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
   user { 'hacluster':
    ensure => present,
   } ->
@@ -53,7 +74,101 @@ if hiera('step') >= 1 {
   class { '::pacemaker::stonith':
     disable => true,
   }
+
+  # Only configure RabbitMQ in this step, don't start it yet to
+  # avoid races where non-master nodes attempt to start without
+  # config (eg. binding on 0.0.0.0)
+  # The module ignores erlang_cookie if cluster_config is false
+  class { '::rabbitmq':
+    service_manage          => false,
+    tcp_keepalive           => false,
+    config_kernel_variables => hiera('rabbitmq_kernel_variables'),
+    config_variables        => hiera('rabbitmq_config_variables'),
+    environment_variables   => hiera('rabbitmq_environment'),
+  } ->
+  file { '/var/lib/rabbitmq/.erlang.cookie':
+    ensure  => 'present',
+    owner   => 'rabbitmq',
+    group   => 'rabbitmq',
+    mode    => '0400',
+    content => hiera('rabbitmq::erlang_cookie'),
+    replace => true,
+  }
+
+  if downcase(hiera('ceilometer_backend')) == 'mongodb' {
+    include ::mongodb::globals
+    class { '::mongodb::server' :
+      service_manage => false,
+    }
+  }
+
+  # Memcached
+  class {'::memcached' :
+    service_manage => false,
+  }
+
+  # Redis
+  class { '::redis' :
+    service_manage => false,
+    notify_service => false,
+  }
+
+  # Galera
+  if str2bool(hiera('enable_galera', 'true')) {
+    $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
+  } else {
+    $mysql_config_file = '/etc/my.cnf.d/server.cnf'
+  }
+  $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
+  $galera_nodes_count = count(split($galera_nodes, ','))
+
+  $mysqld_options = {
+    'mysqld' => {
+      'skip-name-resolve'             => '1',
+      'binlog_format'                 => 'ROW',
+      'default-storage-engine'        => 'innodb',
+      'innodb_autoinc_lock_mode'      => '2',
+      'innodb_locks_unsafe_for_binlog'=> '1',
+      'query_cache_size'              => '0',
+      'query_cache_type'              => '0',
+      'bind-address'                  => hiera('mysql_bind_host'),
+      'max_connections'               => '1024',
+      'open_files_limit'              => '-1',
+      'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
+      'wsrep_cluster_name'            => 'galera_cluster',
+      'wsrep_slave_threads'           => '1',
+      'wsrep_certify_nonPK'           => '1',
+      'wsrep_max_ws_rows'             => '131072',
+      'wsrep_max_ws_size'             => '1073741824',
+      'wsrep_debug'                   => '0',
+      'wsrep_convert_LOCK_to_trx'     => '0',
+      'wsrep_retry_autocommit'        => '1',
+      'wsrep_auto_increment_control'  => '1',
+      'wsrep_drupal_282555_workaround'=> '0',
+      'wsrep_causal_reads'            => '0',
+      'wsrep_notify_cmd'              => '',
+      'wsrep_sst_method'              => 'rsync',
+    }
+  }
+
+  class { '::mysql::server':
+    create_root_user   => false,
+    create_root_my_cnf => false,
+    config_file        => $mysql_config_file,
+    override_options   => $mysqld_options,
+    service_manage     => false,
+  }
+
+}
+
+if hiera('step') >= 2 {
+
   if $pacemaker_master {
+
+    # FIXME: we should not have to access tripleo::loadbalancer class
+    # parameters here to configure pacemaker VIPs. The configuration
+    # of pacemaker VIPs could move into puppet-tripleo or we should
+    # make use of less specific hiera parameters here for the settings.
     $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
     pacemaker::resource::ip { 'control_vip':
       ip_address => $control_vip,
@@ -62,185 +177,209 @@ if hiera('step') >= 1 {
     pacemaker::resource::ip { 'public_vip':
       ip_address => $public_vip,
     }
-    pacemaker::resource::systemd { 'haproxy':
-      clone => true,
-    }
-  }
-
-  Class['::pacemaker::corosync'] -> Pacemaker::Resource::Systemd <| |>
 
-}
-
-if hiera('step') >= 2 {
+    $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
+    if $internal_api_vip and $internal_api_vip != $control_vip {
+      pacemaker::resource::ip { 'internal_api_vip':
+        ip_address => $internal_api_vip,
+      }
+    }
 
-  if count(hiera('ntp::servers')) > 0 {
-    include ::ntp
-  }
+    $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
+    if $storage_vip and $storage_vip != $control_vip {
+      pacemaker::resource::ip { 'storage_vip':
+        ip_address => $storage_vip,
+      }
+    }
 
-  # MongoDB
-  if downcase(hiera('ceilometer_backend')) == 'mongodb' {
-    include ::mongodb::globals
+    $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
+    if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
+      pacemaker::resource::ip { 'storage_mgmt_vip':
+        ip_address => $storage_mgmt_vip,
+      }
+    }
 
-    class {'::mongodb::server' :
-      service_ensure => undef
+    pacemaker::resource::service { 'haproxy':
+      clone_params => true,
+    }
+    pacemaker::resource::service { $::memcached::params::service_name :
+      clone_params => true,
+      require      => Class['::memcached'],
     }
-    $mongo_node_ips = split(hiera('mongo_node_ips'), ',')
-    $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
-    $mongo_node_string = join($mongo_node_ips_with_port, ',')
 
-    $mongodb_replset = hiera('mongodb::server::replset')
-    $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
-    if downcase(hiera('bootstrap_nodeid')) == $::hostname {
+    pacemaker::resource::ocf { 'rabbitmq':
+      ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
+      resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
+      clone_params    => 'ordered=true interleave=true',
+      require         => Class['::rabbitmq'],
+    }
 
-      pacemaker::resource::systemd { 'mongod' :
-        options => "op start timeout=120s",
-        clone   => true,
-        before  => Exec['mongodb-ready'],
+    if downcase(hiera('ceilometer_backend')) == 'mongodb' {
+      pacemaker::resource::service { $::mongodb::params::service_name :
+        op_params    => 'start timeout=120s',
+        clone_params => true,
+        require      => Class['::mongodb::server'],
       }
       # NOTE (spredzy) : The replset can only be run
       # once all the nodes have joined the cluster.
-      $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ')
-      exec { 'mongodb-ready' :
-        command   => $mongodb_cluster_ready_command,
-        timeout   => 600,
-        tries     => 60,
-        try_sleep => 10,
-        before    => Mongodb_replset[$mongodb_replset],
+      $mongo_node_ips = hiera('mongo_node_ips')
+      $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
+      $mongo_node_string = join($mongo_node_ips_with_port, ',')
+      $mongodb_pacemaker_resource = Pacemaker::Resource::Service[$::mongodb::params::service_name]
+      $mongodb_replset = hiera('mongodb::server::replset')
+      mongodb_conn_validator { $mongo_node_ips_with_port :
+        require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
+        before  => Mongodb_replset[$mongodb_replset],
       }
-
       mongodb_replset { $mongodb_replset :
         members => $mongo_node_ips_with_port,
       }
     }
-  }
 
-  # Redis
-  $redis_node_ips = split(hiera('redis_node_ips'), ',')
-  $redis_master_hostname = downcase(hiera('bootstrap_nodeid'))
+    pacemaker::resource::ocf { 'galera' :
+      ocf_agent_name  => 'heartbeat:galera',
+      op_params       => 'promote timeout=300s on-fail=block',
+      master_params   => '',
+      meta_params     => "master-max=${galera_nodes_count} ordered=true",
+      resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
+      require         => Class['::mysql::server'],
+      before          => Exec['galera-ready'],
+    }
+
+    pacemaker::resource::ocf { 'redis':
+      ocf_agent_name  => 'heartbeat:redis',
+      master_params   => '',
+      meta_params     => 'notify=true ordered=true interleave=true',
+      resource_params => 'wait_last_known_master=true',
+      require         => Class['::redis'],
+    }
+    $redis_vip = hiera('redis_vip')
+    pacemaker::resource::ip { 'vip-redis':
+      ip_address => $redis_vip,
+    }
+    pacemaker::constraint::base { 'redis-master-then-vip-redis':
+      constraint_type => 'order',
+      first_resource  => 'redis-master',
+      second_resource => "ip-${redis_vip}",
+      first_action    => 'promote',
+      second_action   => 'start',
+      require => [Pacemaker::Resource::Ocf['redis'],
+                  Pacemaker::Resource::Ip['vip-redis']],
+    }
+    pacemaker::constraint::colocation { 'vip-redis-with-redis-master':
+      source  => "ip-${redis_vip}",
+      target  => 'redis-master',
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Ocf['redis'],
+                  Pacemaker::Resource::Ip['vip-redis']],
+    }
 
-  if $redis_master_hostname == $::hostname {
-    $slaveof = undef
-  } else {
-    $slaveof = "${redis_master_hostname} 6379"
-  }
-  class {'::redis' :
-    slaveof => $slaveof,
   }
 
-  if count($redis_node_ips) > 1 {
-    Class['::tripleo::redis_notification'] -> Service['redis-sentinel']
-    include ::redis::sentinel
-    class {'::tripleo::redis_notification' :
-      haproxy_monitor_ip => hiera('tripleo::loadbalancer::controller_virtual_ip'),
-    }
+  exec { 'galera-ready' :
+    command     => '/usr/bin/clustercheck >/dev/null',
+    timeout     => 30,
+    tries       => 180,
+    try_sleep   => 10,
+    environment => ["AVAILABLE_WHEN_READONLY=0"],
+    require     => File['/etc/sysconfig/clustercheck'],
   }
 
-  if str2bool(hiera('enable_galera', 'true')) {
-    $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
-  } else {
-    $mysql_config_file = '/etc/my.cnf.d/server.cnf'
+  file { '/etc/sysconfig/clustercheck' :
+    ensure  => file,
+    content => "MYSQL_USERNAME=root\n
+MYSQL_PASSWORD=''\n
+MYSQL_HOST=localhost\n",
   }
-  # TODO Galara
-  class { 'mysql::server':
-    config_file => $mysql_config_file,
-    override_options => {
-      'mysqld' => {
-        'bind-address' => hiera('controller_host')
-      }
-    }
+
+  xinetd::service { 'galera-monitor' :
+    port           => '9200',
+    server         => '/usr/bin/clustercheck',
+    per_source     => 'UNLIMITED',
+    log_on_success => '',
+    log_on_failure => 'HOST',
+    flags          => 'REUSE',
+    service_type   => 'UNLISTED',
+    user           => 'root',
+    group          => 'root',
+    require        => File['/etc/sysconfig/clustercheck'],
   }
 
-  # FIXME: this should only occur on the bootstrap host (ditto for db syncs)
   # Create all the database schemas
   # Example DSN format: mysql://user:password@host/dbname
-  $allowed_hosts = ['%',hiera('controller_host')]
-  $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]')
-  class { 'keystone::db::mysql':
-    user          => $keystone_dsn[3],
-    password      => $keystone_dsn[4],
-    host          => $keystone_dsn[5],
-    dbname        => $keystone_dsn[6],
-    allowed_hosts => $allowed_hosts,
-  }
-  $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]')
-  class { 'glance::db::mysql':
-    user          => $glance_dsn[3],
-    password      => $glance_dsn[4],
-    host          => $glance_dsn[5],
-    dbname        => $glance_dsn[6],
-    allowed_hosts => $allowed_hosts,
-  }
-  $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]')
-  class { 'nova::db::mysql':
-    user          => $nova_dsn[3],
-    password      => $nova_dsn[4],
-    host          => $nova_dsn[5],
-    dbname        => $nova_dsn[6],
-    allowed_hosts => $allowed_hosts,
-  }
-  $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]')
-  class { 'neutron::db::mysql':
-    user          => $neutron_dsn[3],
-    password      => $neutron_dsn[4],
-    host          => $neutron_dsn[5],
-    dbname        => $neutron_dsn[6],
-    allowed_hosts => $allowed_hosts,
-  }
-  $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]')
-  class { 'cinder::db::mysql':
-    user          => $cinder_dsn[3],
-    password      => $cinder_dsn[4],
-    host          => $cinder_dsn[5],
-    dbname        => $cinder_dsn[6],
-    allowed_hosts => $allowed_hosts,
-  }
-  $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]')
-  class { 'heat::db::mysql':
-    user          => $heat_dsn[3],
-    password      => $heat_dsn[4],
-    host          => $heat_dsn[5],
-    dbname        => $heat_dsn[6],
-    allowed_hosts => $allowed_hosts,
-  }
-  if downcase(hiera('ceilometer_backend')) == 'mysql' {
-    $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]')
-    class { 'ceilometer::db::mysql':
-      user          => $ceilometer_dsn[3],
-      password      => $ceilometer_dsn[4],
-      host          => $ceilometer_dsn[5],
-      dbname        => $ceilometer_dsn[6],
+  if $sync_db {
+    $allowed_hosts = ['%',hiera('mysql_bind_host')]
+    $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]')
+    class { 'keystone::db::mysql':
+      user          => $keystone_dsn[3],
+      password      => $keystone_dsn[4],
+      host          => $keystone_dsn[5],
+      dbname        => $keystone_dsn[6],
       allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
     }
-  }
-
-  # the module ignores erlang_cookie if cluster_config is false
-  file { '/var/lib/rabbitmq/.erlang.cookie':
-    ensure  => 'present',
-    owner   => 'rabbitmq',
-    group   => 'rabbitmq',
-    mode    => '0400',
-    content => hiera('rabbitmq::erlang_cookie'),
-    replace => true,
-  } ->
-  class { '::rabbitmq':
-    service_manage          => false,
-    tcp_keepalive           => false,
-    config_kernel_variables => hiera('rabbitmq_kernel_variables'),
-    config_variables        => hiera('rabbitmq_config_variables'),
-    environment_variables   => hiera('rabbitmq_environment'),
-  }
-  if $pacemaker_master {
-    pacemaker::resource::ocf { 'rabbitmq':
-      resource_name => 'heartbeat:rabbitmq-cluster',
-      options       => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
-      clone         => true,
-      require       => Class['::rabbitmq'],
+    $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]')
+    class { 'glance::db::mysql':
+      user          => $glance_dsn[3],
+      password      => $glance_dsn[4],
+      host          => $glance_dsn[5],
+      dbname        => $glance_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]')
+    class { 'nova::db::mysql':
+      user          => $nova_dsn[3],
+      password      => $nova_dsn[4],
+      host          => $nova_dsn[5],
+      dbname        => $nova_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]')
+    class { 'neutron::db::mysql':
+      user          => $neutron_dsn[3],
+      password      => $neutron_dsn[4],
+      host          => $neutron_dsn[5],
+      dbname        => $neutron_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]')
+    class { 'cinder::db::mysql':
+      user          => $cinder_dsn[3],
+      password      => $cinder_dsn[4],
+      host          => $cinder_dsn[5],
+      dbname        => $cinder_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]')
+    class { 'heat::db::mysql':
+      user          => $heat_dsn[3],
+      password      => $heat_dsn[4],
+      host          => $heat_dsn[5],
+      dbname        => $heat_dsn[6],
+      allowed_hosts => $allowed_hosts,
+      require       => Exec['galera-ready'],
+    }
+    if downcase(hiera('ceilometer_backend')) == 'mysql' {
+      $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]')
+      class { 'ceilometer::db::mysql':
+        user          => $ceilometer_dsn[3],
+        password      => $ceilometer_dsn[4],
+        host          => $ceilometer_dsn[5],
+        dbname        => $ceilometer_dsn[6],
+        allowed_hosts => $allowed_hosts,
+        require       => Exec['galera-ready'],
+      }
     }
   }
 
   # pre-install swift here so we can build rings
   include ::swift
 
+  # Ceph
   $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false)
   $enable_ceph = $cinder_enable_rbd_backend
 
@@ -256,11 +395,16 @@ if hiera('step') >= 2 {
     include ::ceph::profile::osd
   }
 
+
 } #END STEP 2
 
 if hiera('step') >= 3 {
 
-  include ::keystone
+  class { '::keystone':
+    sync_db => $sync_db,
+    manage_service => false,
+    enabled => false,
+  }
 
   #TODO: need a cleanup-keystone-tokens.sh solution here
   keystone_config {
@@ -305,28 +449,65 @@ if hiera('step') >= 3 {
   # TODO: notifications, scrubber, etc.
   include ::glance
   class { 'glance::api':
-    known_stores => [$glance_store]
+    known_stores => [$glance_store],
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::glance::registry' :
+    sync_db => $sync_db,
+    manage_service => false,
+    enabled => false,
   }
-  include ::glance::registry
   include join(['::glance::backend::', $glance_backend])
 
-  class { 'nova':
-    glance_api_servers     => join([hiera('glance_protocol'), '://', hiera('controller_virtual_ip'), ':', hiera('glance_port')]),
-  }
+  include ::nova
 
-  include ::nova::api
-  include ::nova::cert
-  include ::nova::conductor
-  include ::nova::consoleauth
+  class { '::nova::api' :
+    sync_db => $sync_db,
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::nova::cert' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::nova::conductor' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::nova::consoleauth' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::nova::vncproxy' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::nova::scheduler' :
+    manage_service => false,
+    enabled => false,
+  }
   include ::nova::network::neutron
-  include ::nova::vncproxy
-  include ::nova::scheduler
 
+  # Neutron class definitions
   include ::neutron
-  include ::neutron::server
-  include ::neutron::agents::dhcp
-  include ::neutron::agents::l3
-
+  class { '::neutron::server' :
+    sync_db => $sync_db,
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::neutron::agents::dhcp' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::neutron::agents::l3' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { 'neutron::agents::metadata':
+    manage_service => false,
+    enabled => false,
+  }
   file { '/etc/neutron/dnsmasq-neutron.conf':
     content => hiera('neutron_dnsmasq_options'),
     owner   => 'neutron',
@@ -334,32 +515,32 @@ if hiera('step') >= 3 {
     notify  => Service['neutron-dhcp-service'],
     require => Package['neutron'],
   }
-
   class { 'neutron::plugins::ml2':
-    flat_networks        => split(hiera('neutron_flat_networks'), ','),
+    flat_networks   => split(hiera('neutron_flat_networks'), ','),
     tenant_network_types => [hiera('neutron_tenant_network_type')],
-    type_drivers         => [hiera('neutron_tenant_network_type')],
   }
-
   class { 'neutron::agents::ml2::ovs':
+    # manage_service   => false # not implemented
+    enabled          => false,
     bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
     tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
   }
 
-  class { 'neutron::agents::metadata':
-    auth_url => join(['http://', hiera('controller_virtual_ip'), ':35357/v2.0']),
-  }
-
-  Service['neutron-server'] -> Service['neutron-dhcp-service']
-  Service['neutron-server'] -> Service['neutron-l3']
-  Service['neutron-server'] -> Service['neutron-ovs-agent-service']
-  Service['neutron-server'] -> Service['neutron-metadata']
-
   include ::cinder
-  include ::cinder::api
+  class { '::cinder::api':
+    sync_db => $sync_db,
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::cinder::scheduler' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::cinder::volume' :
+    manage_service => false,
+    enabled => false,
+  }
   include ::cinder::glance
-  include ::cinder::scheduler
-  include ::cinder::volume
   class {'cinder::setup_test_volume':
     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
   }
@@ -401,14 +582,32 @@ if hiera('step') >= 3 {
     }
   }
 
-  $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend])
+  if hiera('cinder_enable_netapp_backend', false) {
+    $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
+
+    cinder_config {
+      "${cinder_netapp_backend}/host": value => 'hostgroup';
+    }
+
+    if hiera('cinder_netapp_nfs_shares', undef) {
+      $cinder_netapp_nfs_shares = split(hiera('cinder_netapp_nfs_shares', undef), ',')
+    }
+
+    cinder::backend::netapp { $cinder_netapp_backend :
+      nfs_shares => $cinder_netapp_nfs_shares,
+    }
+  }
+
+  $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend])
   class { '::cinder::backends' :
     enabled_backends => $cinder_enabled_backends,
   }
 
   # swift proxy
-  include ::memcached
-  include ::swift::proxy
+  class { '::swift::proxy' :
+    manage_service => $non_pcmk_start,
+    enabled => $non_pcmk_start,
+  }
   include ::swift::proxy::proxy_logging
   include ::swift::proxy::healthcheck
   include ::swift::proxy::cache
@@ -423,9 +622,21 @@ if hiera('step') >= 3 {
 
   # swift storage
   if str2bool(hiera('enable_swift_storage', 'true')) {
-    class {'swift::storage::all':
+    class {'::swift::storage::all':
       mount_check => str2bool(hiera('swift_mount_check'))
     }
+    class {'::swift::storage::account':
+      manage_service => $non_pcmk_start,
+      enabled => $non_pcmk_start,
+    }
+    class {'::swift::storage::container':
+      manage_service => $non_pcmk_start,
+      enabled => $non_pcmk_start,
+    }
+    class {'::swift::storage::object':
+      manage_service => $non_pcmk_start,
+      enabled => $non_pcmk_start,
+    }
     if(!defined(File['/srv/node'])) {
       file { '/srv/node':
         ensure  => directory,
@@ -446,37 +657,69 @@ if hiera('step') >= 3 {
       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
     }
     default : {
-      $ceilometer_database_connection = $ceilometer_mongodb_conn_string
+      $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
     }
   }
   include ::ceilometer
-  include ::ceilometer::api
-  include ::ceilometer::agent::notification
-  include ::ceilometer::agent::central
-  include ::ceilometer::alarm::notifier
-  include ::ceilometer::alarm::evaluator
+  class { '::ceilometer::api' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::ceilometer::agent::notification' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::ceilometer::agent::central' :
+    manage_service => false,
+    enabled => false,
+    coordination_url => "redis://${redis_vip}:6379",
+  }
+  class { '::ceilometer::alarm::notifier' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::ceilometer::alarm::evaluator' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::ceilometer::collector' :
+    manage_service => false,
+    enabled => false,
+  }
   include ::ceilometer::expirer
-  include ::ceilometer::collector
   class { '::ceilometer::db' :
     database_connection => $ceilometer_database_connection,
+    sync_db             => $sync_db,
   }
-  class { 'ceilometer::agent::auth':
-    auth_url => join(['http://', hiera('controller_virtual_ip'), ':5000/v2.0']),
-  }
+  include ceilometer::agent::auth
 
   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
 
   # Heat
-  include ::heat
-  include ::heat::api
-  include ::heat::api_cfn
-  include ::heat::api_cloudwatch
-  include ::heat::engine
+  class { '::heat' :
+    sync_db => $sync_db,
+  }
+  class { '::heat::api' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::heat::api_cfn' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::heat::api_cloudwatch' :
+    manage_service => false,
+    enabled => false,
+  }
+  class { '::heat::engine' :
+    manage_service => false,
+    enabled => false,
+  }
 
   # Horizon
   $vhost_params = { add_listen => false }
   class { 'horizon':
-    cache_server_ip    => split(hiera('memcache_node_ips', '127.0.0.1'), ','),
+    cache_server_ip    => hiera('memcache_node_ips', '127.0.0.1'),
     vhost_extra_params => $vhost_params,
   }
 
@@ -491,3 +734,563 @@ if hiera('step') >= 3 {
   }
 
 } #END STEP 3
+
+if hiera('step') >= 4 {
+  if $pacemaker_master {
+
+    # Keystone
+    pacemaker::resource::service { $::keystone::params::service_name :
+      clone_params => "interleave=true",
+    }
+
+    # Cinder
+    pacemaker::resource::service { $::cinder::params::api_service :
+      clone_params => "interleave=true",
+      require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
+    }
+    pacemaker::resource::service { $::cinder::params::scheduler_service :
+      clone_params => "interleave=true",
+    }
+    pacemaker::resource::service { $::cinder::params::volume_service : }
+
+    pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::keystone::params::service_name}-clone",
+      second_resource => "${::cinder::params::api_service}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
+                          Pacemaker::Resource::Service[$::keystone::params::service_name]],
+    }
+    pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
+      constraint_type => "order",
+      first_resource => "${::cinder::params::api_service}-clone",
+      second_resource => "${::cinder::params::scheduler_service}-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
+                  Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
+    }
+    pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
+      source => "${::cinder::params::scheduler_service}-clone",
+      target => "${::cinder::params::api_service}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
+                  Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
+    }
+    pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
+      constraint_type => "order",
+      first_resource => "${::cinder::params::scheduler_service}-clone",
+      second_resource => "${::cinder::params::volume_service}",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
+                  Pacemaker::Resource::Service[$::cinder::params::volume_service]],
+    }
+    pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
+      source => "${::cinder::params::volume_service}",
+      target => "${::cinder::params::scheduler_service}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
+                  Pacemaker::Resource::Service[$::cinder::params::volume_service]],
+    }
+
+    # Glance
+    pacemaker::resource::service { $::glance::params::registry_service_name :
+      clone_params => "interleave=true",
+      require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
+    }
+    pacemaker::resource::service { $::glance::params::api_service_name :
+      clone_params => "interleave=true",
+    }
+
+    pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::keystone::params::service_name}-clone",
+      second_resource => "${::glance::params::registry_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
+                          Pacemaker::Resource::Service[$::keystone::params::service_name]],
+    }
+    pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
+      constraint_type => "order",
+      first_resource  => "${::glance::params::registry_service_name}-clone",
+      second_resource => "${::glance::params::api_service_name}-clone",
+      first_action    => "start",
+      second_action   => "start",
+      require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
+                  Pacemaker::Resource::Service[$::glance::params::api_service_name]],
+    }
+    pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
+      source  => "${::glance::params::api_service_name}-clone",
+      target  => "${::glance::params::registry_service_name}-clone",
+      score   => "INFINITY",
+      require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
+                  Pacemaker::Resource::Service[$::glance::params::api_service_name]],
+    }
+
+    # Neutron
+    pacemaker::resource::service { $::neutron::params::server_service:
+      op_params => "start timeout=90",
+      clone_params   => "interleave=true",
+      require => Pacemaker::Resource::Service[$::keystone::params::service_name]
+    }
+    pacemaker::resource::service { $::neutron::params::l3_agent_service:
+      clone_params   => "interleave=true",
+    }
+    pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
+      clone_params   => "interleave=true",
+    }
+    pacemaker::resource::service { $::neutron::params::ovs_agent_service:
+      clone_params => "interleave=true",
+    }
+    pacemaker::resource::service { $::neutron::params::metadata_agent_service:
+      clone_params => "interleave=true",
+    }
+    pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
+      ocf_agent_name => "neutron:OVSCleanup",
+      clone_params => "interleave=true",
+    }
+    pacemaker::resource::ocf { 'neutron-netns-cleanup':
+      ocf_agent_name => "neutron:NetnsCleanup",
+      clone_params => "interleave=true",
+    }
+    pacemaker::resource::ocf { 'neutron-scale':
+      ocf_agent_name => "neutron:NeutronScale",
+      clone_params => "globally-unique=true clone-max=3 interleave=true",
+    }
+    pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
+      constraint_type => "order",
+      first_resource => "${::keystone::params::service_name}-clone",
+      second_resource => "${::neutron::params::server_service}-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
+                  Pacemaker::Resource::Service[$::neutron::params::server_service]],
+    }
+    pacemaker::constraint::base { 'neutron-server-to-neutron-scale-constraint':
+      constraint_type => "order",
+      first_resource => "${::neutron::params::server_service}-clone",
+      second_resource => "neutron-scale-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
+                  Pacemaker::Resource::Ocf['neutron-scale']],
+    }
+    pacemaker::constraint::base { 'neutron-scale-to-ovs-cleanup-constraint':
+      constraint_type => "order",
+      first_resource => "neutron-scale-clone",
+      second_resource => "${::neutron::params::ovs_cleanup_service}-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Ocf['neutron-scale'],
+                  Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
+    }
+    pacemaker::constraint::colocation { 'neutron-scale-to-ovs-cleanup-colocation':
+      source => "${::neutron::params::ovs_cleanup_service}-clone",
+      target => "neutron-scale-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Ocf['neutron-scale'],
+                  Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
+    }
+    pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
+      constraint_type => "order",
+      first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
+      second_resource => "neutron-netns-cleanup-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
+                  Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
+    }
+    pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
+      source => "neutron-netns-cleanup-clone",
+      target => "${::neutron::params::ovs_cleanup_service}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
+                  Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
+    }
+    pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
+      constraint_type => "order",
+      first_resource => "neutron-netns-cleanup-clone",
+      second_resource => "${::neutron::params::ovs_agent_service}-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
+                  Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
+    }
+    pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
+      source => "${::neutron::params::ovs_agent_service}-clone",
+      target => "neutron-netns-cleanup-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
+                  Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
+    }
+    pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
+      constraint_type => "order",
+      first_resource => "${::neutron::params::ovs_agent_service}-clone",
+      second_resource => "${::neutron::params::dhcp_agent_service}-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
+                  Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
+
+    }
+    pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
+      source => "${::neutron::params::dhcp_agent_service}-clone",
+      target => "${::neutron::params::ovs_agent_service}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
+                  Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
+    }
+    pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
+      constraint_type => "order",
+      first_resource => "${::neutron::params::dhcp_agent_service}-clone",
+      second_resource => "${::neutron::params::l3_agent_service}-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
+                  Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
+    }
+    pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
+      source => "${::neutron::params::l3_agent_service}-clone",
+      target => "${::neutron::params::dhcp_agent_service}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
+                  Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
+    }
+    pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
+      constraint_type => "order",
+      first_resource => "${::neutron::params::l3_agent_service}-clone",
+      second_resource => "${::neutron::params::metadata_agent_service}-clone",
+      first_action => "start",
+      second_action => "start",
+      require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
+                  Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
+    }
+    pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
+      source => "${::neutron::params::metadata_agent_service}-clone",
+      target => "${::neutron::params::l3_agent_service}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
+                  Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
+    }
+
+    # Nova
+    pacemaker::resource::service { $::nova::params::api_service_name :
+      clone_params    => "interleave=true",
+      op_params       => "monitor start-delay=10s",
+    }
+    pacemaker::resource::service { $::nova::params::conductor_service_name :
+      clone_params    => "interleave=true",
+      op_params       => "monitor start-delay=10s",
+    }
+    pacemaker::resource::service { $::nova::params::consoleauth_service_name :
+      clone_params    => "interleave=true",
+      op_params       => "monitor start-delay=10s",
+      require         => Pacemaker::Resource::Service[$::keystone::params::service_name],
+    }
+    pacemaker::resource::service { $::nova::params::vncproxy_service_name :
+      clone_params    => "interleave=true",
+      op_params       => "monitor start-delay=10s",
+    }
+    pacemaker::resource::service { $::nova::params::scheduler_service_name :
+      clone_params    => "interleave=true",
+      op_params       => "monitor start-delay=10s",
+    }
+
+    pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::keystone::params::service_name}-clone",
+      second_resource => "${::nova::params::consoleauth_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
+                          Pacemaker::Resource::Service[$::keystone::params::service_name]],
+    }
+    pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
+      constraint_type => "order",
+      first_resource  => "${::nova::params::consoleauth_service_name}-clone",
+      second_resource => "${::nova::params::vncproxy_service_name}-clone",
+      first_action    => "start",
+      second_action   => "start",
+      require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
+                  Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
+    }
+    pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
+      source => "${::nova::params::vncproxy_service_name}-clone",
+      target => "${::nova::params::consoleauth_service_name}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
+                  Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
+    }
+    # FIXME(gfidente): novncproxy will not start unless websockify is updated to 0.6
+    # which is not the case for f20 nor f21; ucomment when it becomes available
+    #pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
+    #  constraint_type => "order",
+    #  first_resource  => "${::nova::params::vncproxy_service_name}-clone",
+    #  second_resource => "${::nova::params::api_service_name}-clone",
+    #  first_action    => "start",
+    #  second_action   => "start",
+    #  require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
+    #              Pacemaker::Resource::Service[$::nova::params::api_service_name]],
+    #}
+    #pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
+    #  source => "${::nova::params::api_service_name}-clone",
+    #  target => "${::nova::params::vncproxy_service_name}-clone",
+    #  score => "INFINITY",
+    #  require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
+    #              Pacemaker::Resource::Service[$::nova::params::api_service_name]],
+    #}
+    pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
+      constraint_type => "order",
+      first_resource  => "${::nova::params::api_service_name}-clone",
+      second_resource => "${::nova::params::scheduler_service_name}-clone",
+      first_action    => "start",
+      second_action   => "start",
+      require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
+                  Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
+    }
+    pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
+      source => "${::nova::params::scheduler_service_name}-clone",
+      target => "${::nova::params::api_service_name}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
+                  Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
+    }
+    pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
+      constraint_type => "order",
+      first_resource  => "${::nova::params::scheduler_service_name}-clone",
+      second_resource => "${::nova::params::conductor_service_name}-clone",
+      first_action    => "start",
+      second_action   => "start",
+      require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
+                  Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
+    }
+    pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
+      source => "${::nova::params::conductor_service_name}-clone",
+      target => "${::nova::params::scheduler_service_name}-clone",
+      score => "INFINITY",
+      require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
+                  Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
+    }
+
+    # Ceilometer
+    pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
+      clone_params => 'interleave=true',
+      require      => [Pacemaker::Resource::Service[$::keystone::params::service_name],
+                       $mongodb_pacemaker_resource],
+    }
+    pacemaker::resource::service { $::ceilometer::params::collector_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::resource::service { $::ceilometer::params::api_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::resource::ocf { 'delay' :
+      ocf_agent_name  => 'heartbeat:Delay',
+      clone_params    => 'interleave=true',
+      resource_params => 'startdelay=10',
+    }
+    pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
+      second_resource => "${::ceilometer::params::collector_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
+                          Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
+    }
+    pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::ceilometer::params::collector_service_name}-clone",
+      second_resource => "${::ceilometer::params::api_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
+                          Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
+    }
+    pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
+      source  => "${::ceilometer::params::api_service_name}-clone",
+      target  => "${::ceilometer::params::collector_service_name}-clone",
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
+                  Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
+    }
+    pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::ceilometer::params::api_service_name}-clone",
+      second_resource => 'delay-clone',
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
+                          Pacemaker::Resource::Ocf['delay']],
+    }
+    pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
+      source  => 'delay-clone',
+      target  => "${::ceilometer::params::api_service_name}-clone",
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
+                  Pacemaker::Resource::Ocf['delay']],
+    }
+    pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint':
+      constraint_type => 'order',
+      first_resource  => 'delay-clone',
+      second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
+                          Pacemaker::Resource::Ocf['delay']],
+    }
+    pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation':
+      source  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
+      target  => 'delay-clone',
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
+                  Pacemaker::Resource::Ocf['delay']],
+    }
+    pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
+      second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
+                          Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
+    }
+    pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation':
+      source  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
+      target  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
+                  Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
+    }
+    pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
+      second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
+                          Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
+    }
+    pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation':
+      source  => "${::ceilometer::params::agent_notification_service_name}-clone",
+      target  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
+                  Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
+    }
+    if downcase(hiera('ceilometer_backend')) == 'mongodb' {
+      pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
+        constraint_type => 'order',
+        first_resource  => "${::mongodb::params::service_name}-clone",
+        second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
+        first_action    => 'start',
+        second_action   => 'start',
+        require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
+                            Pacemaker::Resource::Service[$::mongodb::params::service_name]],
+      }
+    }
+    pacemaker::constraint::base { 'vip-redis-then-ceilometer-central':
+      constraint_type => 'order',
+      first_resource  => "ip-${redis_vip}",
+      second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
+                  Pacemaker::Resource::Ip['vip-redis']],
+    }
+    pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::keystone::params::service_name}-clone",
+      second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
+                          Pacemaker::Resource::Service[$::keystone::params::service_name]],
+    }
+
+    # Heat
+    pacemaker::resource::service { $::heat::params::api_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::resource::service { $::heat::params::api_cfn_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::resource::service { $::heat::params::engine_service_name :
+      clone_params => 'interleave=true',
+    }
+    pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::heat::params::api_service_name}-clone",
+      second_resource => "${::heat::params::api_cfn_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
+                  Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
+    }
+    pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
+      source  => "${::heat::params::api_cfn_service_name}-clone",
+      target  => "${::heat::params::api_service_name}-clone",
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
+                  Pacemaker::Resource::Service[$::heat::params::api_service_name]],
+    }
+    pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::heat::params::api_cfn_service_name}-clone",
+      second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
+                  Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
+    }
+    pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
+      source  => "${::heat::params::api_cloudwatch_service_name}-clone",
+      target  => "${::heat::params::api_cfn_service_name}-clone",
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
+                  Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
+    }
+    pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
+      second_resource => "${::heat::params::engine_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
+                  Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
+    }
+    pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
+      source  => "${::heat::params::engine_service_name}-clone",
+      target  => "${::heat::params::api_cloudwatch_service_name}-clone",
+      score   => 'INFINITY',
+      require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
+                  Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
+    }
+    pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
+      constraint_type => 'order',
+      first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
+      second_resource => "${::heat::params::api_service_name}-clone",
+      first_action    => 'start',
+      second_action   => 'start',
+      require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
+                          Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
+    }
+
+  }
+
+} #END STEP 4