984601b93c65348f885ba1bd0c578fc79c360d67
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 include ::tripleo::packages
22 include ::tripleo::firewall
23
24 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
25   $pacemaker_master = true
26   $sync_db = true
27 } else {
28   $pacemaker_master = false
29   $sync_db = false
30 }
31
32 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
33 $enable_load_balancer = hiera('enable_load_balancer', true)
34
35 # When to start and enable services which haven't been Pacemakerized
36 # FIXME: remove when we start all OpenStack services using Pacemaker
37 # (occurences of this variable will be gradually replaced with false)
38 $non_pcmk_start = hiera('step') >= 4
39
40 if hiera('step') >= 1 {
41
42   create_resources(kmod::load, hiera('kernel_modules'), {})
43   create_resources(sysctl::value, hiera('sysctl_settings'), {})
44   Exec <| tag == 'kmod::load' |>  -> Sysctl <| |>
45
46   include ::timezone
47
48   if count(hiera('ntp::servers')) > 0 {
49     include ::ntp
50   }
51
52   $controller_node_ips = split(hiera('controller_node_ips'), ',')
53   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
54   if $enable_load_balancer {
55     class { '::tripleo::loadbalancer' :
56       controller_hosts       => $controller_node_ips,
57       controller_hosts_names => $controller_node_names,
58       manage_vip             => false,
59       mysql_clustercheck     => true,
60       haproxy_service_manage => false,
61     }
62   }
63
64   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
65   $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
66   if $corosync_ipv6 {
67     $cluster_setup_extras = { '--ipv6' => '' }
68   } else {
69     $cluster_setup_extras = {}
70   }
71   user { 'hacluster':
72     ensure => present,
73   } ->
74   class { '::pacemaker':
75     hacluster_pwd => hiera('hacluster_pwd'),
76   } ->
77   class { '::pacemaker::corosync':
78     cluster_members      => $pacemaker_cluster_members,
79     setup_cluster        => $pacemaker_master,
80     cluster_setup_extras => $cluster_setup_extras,
81   }
82   class { '::pacemaker::stonith':
83     disable => !$enable_fencing,
84   }
85   if $enable_fencing {
86     include ::tripleo::fencing
87
88     # enable stonith after all fencing devices have been created
89     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
90   }
91
92   # FIXME(gfidente): sets 200secs as default start timeout op
93   # param; until we can use pcmk global defaults we'll still
94   # need to add it to every resource which redefines op params
95   Pacemaker::Resource::Service {
96     op_params => 'start timeout=200s stop timeout=200s',
97   }
98
99   # Only configure RabbitMQ in this step, don't start it yet to
100   # avoid races where non-master nodes attempt to start without
101   # config (eg. binding on 0.0.0.0)
102   # The module ignores erlang_cookie if cluster_config is false
103   class { '::rabbitmq':
104     service_manage          => false,
105     tcp_keepalive           => false,
106     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
107     config_variables        => hiera('rabbitmq_config_variables'),
108     environment_variables   => hiera('rabbitmq_environment'),
109   } ->
110   file { '/var/lib/rabbitmq/.erlang.cookie':
111     ensure  => file,
112     owner   => 'rabbitmq',
113     group   => 'rabbitmq',
114     mode    => '0400',
115     content => hiera('rabbitmq::erlang_cookie'),
116     replace => true,
117   }
118
119   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
120     include ::mongodb::globals
121     class { '::mongodb::server' :
122       service_manage => false,
123     }
124   }
125
126   # Memcached
127   class {'::memcached' :
128     service_manage => false,
129   }
130
131   # Redis
132   class { '::redis' :
133     service_manage => false,
134     notify_service => false,
135   }
136
137   # Galera
138   if str2bool(hiera('enable_galera', true)) {
139     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
140   } else {
141     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
142   }
143   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
144   $galera_nodes_count = count(split($galera_nodes, ','))
145
146   # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
147   # set bind-address to a hostname instead of an ip address; to move Mysql
148   # from internal_api on another network we'll have to customize both
149   # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
150   $mysql_bind_host = hiera('mysql_bind_host')
151   $mysqld_options = {
152     'mysqld' => {
153       'skip-name-resolve'             => '1',
154       'binlog_format'                 => 'ROW',
155       'default-storage-engine'        => 'innodb',
156       'innodb_autoinc_lock_mode'      => '2',
157       'innodb_locks_unsafe_for_binlog'=> '1',
158       'query_cache_size'              => '0',
159       'query_cache_type'              => '0',
160       'bind-address'                  => $::hostname,
161       'max_connections'               => hiera('mysql_max_connections'),
162       'open_files_limit'              => '-1',
163       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
164       'wsrep_cluster_name'            => 'galera_cluster',
165       'wsrep_slave_threads'           => '1',
166       'wsrep_certify_nonPK'           => '1',
167       'wsrep_max_ws_rows'             => '131072',
168       'wsrep_max_ws_size'             => '1073741824',
169       'wsrep_debug'                   => '0',
170       'wsrep_convert_LOCK_to_trx'     => '0',
171       'wsrep_retry_autocommit'        => '1',
172       'wsrep_auto_increment_control'  => '1',
173       'wsrep_drupal_282555_workaround'=> '0',
174       'wsrep_causal_reads'            => '0',
175       'wsrep_sst_method'              => 'rsync',
176       'wsrep_provider_options'        => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
177     },
178   }
179
180   class { '::mysql::server':
181     create_root_user        => false,
182     create_root_my_cnf      => false,
183     config_file             => $mysql_config_file,
184     override_options        => $mysqld_options,
185     remove_default_accounts => $pacemaker_master,
186     service_manage          => false,
187     service_enabled         => false,
188   }
189
190 }
191
192 if hiera('step') >= 2 {
193
194   # NOTE(gfidente): the following vars are needed on all nodes so they
195   # need to stay out of pacemaker_master conditional
196   $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
197   $mongodb_replset = hiera('mongodb::server::replset')
198
199   if $pacemaker_master {
200
201     if $enable_load_balancer {
202
203       include ::pacemaker::resource_defaults
204
205       # FIXME: we should not have to access tripleo::loadbalancer class
206       # parameters here to configure pacemaker VIPs. The configuration
207       # of pacemaker VIPs could move into puppet-tripleo or we should
208       # make use of less specific hiera parameters here for the settings.
209       pacemaker::resource::service { 'haproxy':
210         clone_params => true,
211       }
212
213       $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
214       pacemaker::resource::ip { 'control_vip':
215         ip_address => $control_vip,
216       }
217       pacemaker::constraint::base { 'control_vip-then-haproxy':
218         constraint_type   => 'order',
219         first_resource    => "ip-${control_vip}",
220         second_resource   => 'haproxy-clone',
221         first_action      => 'start',
222         second_action     => 'start',
223         constraint_params => 'kind=Optional',
224         require           => [Pacemaker::Resource::Service['haproxy'],
225                               Pacemaker::Resource::Ip['control_vip']],
226       }
227       pacemaker::constraint::colocation { 'control_vip-with-haproxy':
228         source  => "ip-${control_vip}",
229         target  => 'haproxy-clone',
230         score   => 'INFINITY',
231         require => [Pacemaker::Resource::Service['haproxy'],
232                     Pacemaker::Resource::Ip['control_vip']],
233       }
234
235       $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
236       if $public_vip and $public_vip != $control_vip {
237         pacemaker::resource::ip { 'public_vip':
238           ip_address => $public_vip,
239         }
240         pacemaker::constraint::base { 'public_vip-then-haproxy':
241           constraint_type   => 'order',
242           first_resource    => "ip-${public_vip}",
243           second_resource   => 'haproxy-clone',
244           first_action      => 'start',
245           second_action     => 'start',
246           constraint_params => 'kind=Optional',
247           require           => [Pacemaker::Resource::Service['haproxy'],
248                                 Pacemaker::Resource::Ip['public_vip']],
249         }
250         pacemaker::constraint::colocation { 'public_vip-with-haproxy':
251           source  => "ip-${public_vip}",
252           target  => 'haproxy-clone',
253           score   => 'INFINITY',
254           require => [Pacemaker::Resource::Service['haproxy'],
255                       Pacemaker::Resource::Ip['public_vip']],
256         }
257       }
258
259       $redis_vip = hiera('redis_vip')
260       if $redis_vip and $redis_vip != $control_vip {
261         pacemaker::resource::ip { 'redis_vip':
262           ip_address => $redis_vip,
263         }
264         pacemaker::constraint::base { 'redis_vip-then-haproxy':
265           constraint_type   => 'order',
266           first_resource    => "ip-${redis_vip}",
267           second_resource   => 'haproxy-clone',
268           first_action      => 'start',
269           second_action     => 'start',
270           constraint_params => 'kind=Optional',
271           require           => [Pacemaker::Resource::Service['haproxy'],
272                                 Pacemaker::Resource::Ip['redis_vip']],
273         }
274         pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
275           source  => "ip-${redis_vip}",
276           target  => 'haproxy-clone',
277           score   => 'INFINITY',
278           require => [Pacemaker::Resource::Service['haproxy'],
279                       Pacemaker::Resource::Ip['redis_vip']],
280         }
281       }
282
283       $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
284       if $internal_api_vip and $internal_api_vip != $control_vip {
285         pacemaker::resource::ip { 'internal_api_vip':
286           ip_address => $internal_api_vip,
287         }
288         pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
289           constraint_type   => 'order',
290           first_resource    => "ip-${internal_api_vip}",
291           second_resource   => 'haproxy-clone',
292           first_action      => 'start',
293           second_action     => 'start',
294           constraint_params => 'kind=Optional',
295           require           => [Pacemaker::Resource::Service['haproxy'],
296                                 Pacemaker::Resource::Ip['internal_api_vip']],
297         }
298         pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
299           source  => "ip-${internal_api_vip}",
300           target  => 'haproxy-clone',
301           score   => 'INFINITY',
302           require => [Pacemaker::Resource::Service['haproxy'],
303                       Pacemaker::Resource::Ip['internal_api_vip']],
304         }
305       }
306
307       $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
308       if $storage_vip and $storage_vip != $control_vip {
309         pacemaker::resource::ip { 'storage_vip':
310           ip_address => $storage_vip,
311         }
312         pacemaker::constraint::base { 'storage_vip-then-haproxy':
313           constraint_type   => 'order',
314           first_resource    => "ip-${storage_vip}",
315           second_resource   => 'haproxy-clone',
316           first_action      => 'start',
317           second_action     => 'start',
318           constraint_params => 'kind=Optional',
319           require           => [Pacemaker::Resource::Service['haproxy'],
320                                 Pacemaker::Resource::Ip['storage_vip']],
321         }
322         pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
323           source  => "ip-${storage_vip}",
324           target  => 'haproxy-clone',
325           score   => 'INFINITY',
326           require => [Pacemaker::Resource::Service['haproxy'],
327                       Pacemaker::Resource::Ip['storage_vip']],
328         }
329       }
330
331       $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
332       if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
333         pacemaker::resource::ip { 'storage_mgmt_vip':
334           ip_address => $storage_mgmt_vip,
335         }
336         pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
337           constraint_type   => 'order',
338           first_resource    => "ip-${storage_mgmt_vip}",
339           second_resource   => 'haproxy-clone',
340           first_action      => 'start',
341           second_action     => 'start',
342           constraint_params => 'kind=Optional',
343           require           => [Pacemaker::Resource::Service['haproxy'],
344                                 Pacemaker::Resource::Ip['storage_mgmt_vip']],
345         }
346         pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
347           source  => "ip-${storage_mgmt_vip}",
348           target  => 'haproxy-clone',
349           score   => 'INFINITY',
350           require => [Pacemaker::Resource::Service['haproxy'],
351                       Pacemaker::Resource::Ip['storage_mgmt_vip']],
352         }
353       }
354
355     }
356
357     pacemaker::resource::service { $::memcached::params::service_name :
358       clone_params => 'interleave=true',
359       require      => Class['::memcached'],
360     }
361
362     pacemaker::resource::ocf { 'rabbitmq':
363       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
364       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
365       clone_params    => 'ordered=true interleave=true',
366       meta_params     => 'notify=true',
367       require         => Class['::rabbitmq'],
368     }
369
370     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
371       pacemaker::resource::service { $::mongodb::params::service_name :
372         op_params    => 'start timeout=370s stop timeout=200s',
373         clone_params => true,
374         require      => Class['::mongodb::server'],
375       }
376       # NOTE (spredzy) : The replset can only be run
377       # once all the nodes have joined the cluster.
378       mongodb_conn_validator { $mongo_node_ips_with_port :
379         timeout => '600',
380         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
381         before  => Mongodb_replset[$mongodb_replset],
382       }
383       mongodb_replset { $mongodb_replset :
384         members => $mongo_node_ips_with_port,
385       }
386     }
387
388     pacemaker::resource::ocf { 'galera' :
389       ocf_agent_name  => 'heartbeat:galera',
390       op_params       => 'promote timeout=300s on-fail=block',
391       master_params   => '',
392       meta_params     => "master-max=${galera_nodes_count} ordered=true",
393       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
394       require         => Class['::mysql::server'],
395       before          => Exec['galera-ready'],
396     }
397
398     pacemaker::resource::ocf { 'redis':
399       ocf_agent_name  => 'heartbeat:redis',
400       master_params   => '',
401       meta_params     => 'notify=true ordered=true interleave=true',
402       resource_params => 'wait_last_known_master=true',
403       require         => Class['::redis'],
404     }
405
406   }
407
408   exec { 'galera-ready' :
409     command     => '/usr/bin/clustercheck >/dev/null',
410     timeout     => 30,
411     tries       => 180,
412     try_sleep   => 10,
413     environment => ['AVAILABLE_WHEN_READONLY=0'],
414     require     => File['/etc/sysconfig/clustercheck'],
415   }
416
417   file { '/etc/sysconfig/clustercheck' :
418     ensure  => file,
419     content => "MYSQL_USERNAME=root\n
420 MYSQL_PASSWORD=''\n
421 MYSQL_HOST=localhost\n",
422   }
423
424   xinetd::service { 'galera-monitor' :
425     port           => '9200',
426     server         => '/usr/bin/clustercheck',
427     per_source     => 'UNLIMITED',
428     log_on_success => '',
429     log_on_failure => 'HOST',
430     flags          => 'REUSE',
431     service_type   => 'UNLISTED',
432     user           => 'root',
433     group          => 'root',
434     require        => File['/etc/sysconfig/clustercheck'],
435   }
436
437   # Create all the database schemas
438   if $sync_db {
439     class { '::keystone::db::mysql':
440       require => Exec['galera-ready'],
441     }
442     class { '::glance::db::mysql':
443       require => Exec['galera-ready'],
444     }
445     class { '::nova::db::mysql':
446       require => Exec['galera-ready'],
447     }
448     class { '::nova::db::mysql_api':
449       require => Exec['galera-ready'],
450     }
451     class { '::neutron::db::mysql':
452       require => Exec['galera-ready'],
453     }
454     class { '::cinder::db::mysql':
455       require => Exec['galera-ready'],
456     }
457     class { '::heat::db::mysql':
458       require => Exec['galera-ready'],
459     }
460
461     if downcase(hiera('ceilometer_backend')) == 'mysql' {
462       class { '::ceilometer::db::mysql':
463         require => Exec['galera-ready'],
464       }
465     }
466
467     class { '::sahara::db::mysql':
468       require       => Exec['galera-ready'],
469     }
470   }
471
472   # pre-install swift here so we can build rings
473   include ::swift
474
475   # Ceph
476   $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
477
478   if $enable_ceph {
479     class { '::ceph::profile::params':
480       mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
481     }
482     include ::ceph::conf
483     include ::ceph::profile::mon
484   }
485
486   if str2bool(hiera('enable_ceph_storage', false)) {
487     if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
488       exec { 'set selinux to permissive on boot':
489         command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
490         onlyif  => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
491         path    => ['/usr/bin', '/usr/sbin'],
492       }
493
494       exec { 'set selinux to permissive':
495         command => 'setenforce 0',
496         onlyif  => "which setenforce && getenforce | grep -i 'enforcing'",
497         path    => ['/usr/bin', '/usr/sbin'],
498       } -> Class['ceph::profile::osd']
499     }
500
501     include ::ceph::conf
502     include ::ceph::profile::osd
503   }
504
505   if str2bool(hiera('enable_external_ceph', false)) {
506     include ::ceph::conf
507     include ::ceph::profile::client
508   }
509
510
511 } #END STEP 2
512
513 if hiera('step') >= 3 {
514
515   class { '::keystone':
516     sync_db          => $sync_db,
517     manage_service   => false,
518     enabled          => false,
519     enable_bootstrap => $pacemaker_master,
520   }
521   include ::keystone::config
522
523   #TODO: need a cleanup-keystone-tokens.sh solution here
524
525   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
526     ensure  => 'directory',
527     owner   => 'keystone',
528     group   => 'keystone',
529     require => Package['keystone'],
530   }
531   file { '/etc/keystone/ssl/certs/signing_cert.pem':
532     content => hiera('keystone_signing_certificate'),
533     owner   => 'keystone',
534     group   => 'keystone',
535     notify  => Service['keystone'],
536     require => File['/etc/keystone/ssl/certs'],
537   }
538   file { '/etc/keystone/ssl/private/signing_key.pem':
539     content => hiera('keystone_signing_key'),
540     owner   => 'keystone',
541     group   => 'keystone',
542     notify  => Service['keystone'],
543     require => File['/etc/keystone/ssl/private'],
544   }
545   file { '/etc/keystone/ssl/certs/ca.pem':
546     content => hiera('keystone_ca_certificate'),
547     owner   => 'keystone',
548     group   => 'keystone',
549     notify  => Service['keystone'],
550     require => File['/etc/keystone/ssl/certs'],
551   }
552
553   $glance_backend = downcase(hiera('glance_backend', 'swift'))
554   case $glance_backend {
555       'swift': { $backend_store = 'glance.store.swift.Store' }
556       'file': { $backend_store = 'glance.store.filesystem.Store' }
557       'rbd': { $backend_store = 'glance.store.rbd.Store' }
558       default: { fail('Unrecognized glance_backend parameter.') }
559   }
560   $http_store = ['glance.store.http.Store']
561   $glance_store = concat($http_store, $backend_store)
562
563   if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
564     $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
565     pacemaker::resource::filesystem { 'glance-fs':
566       device       => hiera('glance_file_pcmk_device'),
567       directory    => hiera('glance_file_pcmk_directory'),
568       fstype       => hiera('glance_file_pcmk_fstype'),
569       fsoptions    => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
570       clone_params => '',
571     }
572   }
573
574   # TODO: notifications, scrubber, etc.
575   include ::glance
576   include ::glance::config
577   class { '::glance::api':
578     known_stores   => $glance_store,
579     manage_service => false,
580     enabled        => false,
581   }
582   class { '::glance::registry' :
583     sync_db        => $sync_db,
584     manage_service => false,
585     enabled        => false,
586   }
587   include ::glance::notify::rabbitmq
588   include join(['::glance::backend::', $glance_backend])
589
590   class { '::nova' :
591     memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'),
592   }
593
594   include ::nova::config
595
596   class { '::nova::api' :
597     sync_db        => $sync_db,
598     sync_db_api    => $sync_db,
599     manage_service => false,
600     enabled        => false,
601   }
602   class { '::nova::cert' :
603     manage_service => false,
604     enabled        => false,
605   }
606   class { '::nova::conductor' :
607     manage_service => false,
608     enabled        => false,
609   }
610   class { '::nova::consoleauth' :
611     manage_service => false,
612     enabled        => false,
613   }
614   class { '::nova::vncproxy' :
615     manage_service => false,
616     enabled        => false,
617   }
618   include ::nova::scheduler::filter
619   class { '::nova::scheduler' :
620     manage_service => false,
621     enabled        => false,
622   }
623   include ::nova::network::neutron
624
625   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
626
627     # TODO(devvesa) provide non-controller ips for these services
628     $zookeeper_node_ips = hiera('neutron_api_node_ips')
629     $cassandra_node_ips = hiera('neutron_api_node_ips')
630
631     # Run zookeeper in the controller if configured
632     if hiera('enable_zookeeper_on_controller') {
633       class {'::tripleo::cluster::zookeeper':
634         zookeeper_server_ips => $zookeeper_node_ips,
635         # TODO: create a 'bind' hiera key for zookeeper
636         zookeeper_client_ip  => hiera('neutron::bind_host'),
637         zookeeper_hostnames  => split(hiera('controller_node_names'), ',')
638       }
639     }
640
641     # Run cassandra in the controller if configured
642     if hiera('enable_cassandra_on_controller') {
643       class {'::tripleo::cluster::cassandra':
644         cassandra_servers => $cassandra_node_ips,
645         # TODO: create a 'bind' hiera key for cassandra
646         cassandra_ip      => hiera('neutron::bind_host'),
647       }
648     }
649
650     class {'::tripleo::network::midonet::agent':
651       zookeeper_servers => $zookeeper_node_ips,
652       cassandra_seeds   => $cassandra_node_ips
653     }
654
655     class {'::tripleo::network::midonet::api':
656       zookeeper_servers    => $zookeeper_node_ips,
657       vip                  => hiera('tripleo::loadbalancer::public_virtual_ip'),
658       keystone_ip          => hiera('tripleo::loadbalancer::public_virtual_ip'),
659       keystone_admin_token => hiera('keystone::admin_token'),
660       # TODO: create a 'bind' hiera key for api
661       bind_address         => hiera('neutron::bind_host'),
662       admin_password       => hiera('admin_password')
663     }
664
665     # Configure Neutron
666     class {'::neutron':
667       service_plugins => []
668     }
669
670   }
671   else {
672     # Neutron class definitions
673     include ::neutron
674   }
675
676   include ::neutron::config
677   class { '::neutron::server' :
678     sync_db        => $sync_db,
679     manage_service => false,
680     enabled        => false,
681   }
682   include ::neutron::server::notifications
683   if  hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
684     include ::neutron::plugins::nuage
685   }
686   if  hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
687     include ::neutron::plugins::opencontrail
688   }
689   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
690     class {'::neutron::plugins::midonet':
691       midonet_api_ip    => hiera('tripleo::loadbalancer::public_virtual_ip'),
692       keystone_tenant   => hiera('neutron::server::auth_tenant'),
693       keystone_password => hiera('neutron::server::auth_password')
694     }
695   }
696   if hiera('neutron::enable_dhcp_agent',true) {
697     class { '::neutron::agents::dhcp' :
698       manage_service => false,
699       enabled        => false,
700     }
701     file { '/etc/neutron/dnsmasq-neutron.conf':
702       content => hiera('neutron_dnsmasq_options'),
703       owner   => 'neutron',
704       group   => 'neutron',
705       notify  => Service['neutron-dhcp-service'],
706       require => Package['neutron'],
707     }
708   }
709   if hiera('neutron::enable_l3_agent',true) {
710     class { '::neutron::agents::l3' :
711       manage_service => false,
712       enabled        => false,
713     }
714   }
715   if hiera('neutron::enable_metadata_agent',true) {
716     class { '::neutron::agents::metadata':
717       manage_service => false,
718       enabled        => false,
719     }
720   }
721   include ::neutron::plugins::ml2
722   class { '::neutron::agents::ml2::ovs':
723     manage_service => false,
724     enabled        => false,
725   }
726
727   if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
728     include ::neutron::plugins::ml2::cisco::ucsm
729   }
730   if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
731     include ::neutron::plugins::ml2::cisco::nexus
732     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
733   }
734   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
735     include ::neutron::plugins::ml2::cisco::nexus1000v
736
737     class { '::neutron::agents::n1kv_vem':
738       n1kv_source  => hiera('n1kv_vem_source', undef),
739       n1kv_version => hiera('n1kv_vem_version', undef),
740     }
741
742     class { '::n1k_vsm':
743       n1kv_source  => hiera('n1kv_vsm_source', undef),
744       n1kv_version => hiera('n1kv_vsm_version', undef),
745     }
746   }
747
748   if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
749     include ::neutron::plugins::ml2::bigswitch::restproxy
750     include ::neutron::agents::bigswitch
751   }
752   neutron_l3_agent_config {
753     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
754   }
755   neutron_dhcp_agent_config {
756     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
757   }
758   neutron_config {
759     'DEFAULT/notification_driver': value => 'messaging';
760   }
761
762   include ::cinder
763   include ::cinder::config
764   include ::tripleo::ssl::cinder_config
765   class { '::cinder::api':
766     sync_db        => $sync_db,
767     manage_service => false,
768     enabled        => false,
769   }
770   class { '::cinder::scheduler' :
771     manage_service => false,
772     enabled        => false,
773   }
774   class { '::cinder::volume' :
775     manage_service => false,
776     enabled        => false,
777   }
778   include ::cinder::glance
779   include ::cinder::ceilometer
780   class { '::cinder::setup_test_volume':
781     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
782   }
783
784   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
785   if $cinder_enable_iscsi {
786     $cinder_iscsi_backend = 'tripleo_iscsi'
787
788     cinder::backend::iscsi { $cinder_iscsi_backend :
789       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
790       iscsi_helper     => hiera('cinder_iscsi_helper'),
791     }
792   }
793
794   if $enable_ceph {
795
796     $ceph_pools = hiera('ceph_pools')
797     ceph::pool { $ceph_pools :
798       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
799       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
800       size    => hiera('ceph::profile::params::osd_pool_default_size'),
801     }
802
803     $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
804
805   } else {
806     $cinder_pool_requires = []
807   }
808
809   if hiera('cinder_enable_rbd_backend', false) {
810     $cinder_rbd_backend = 'tripleo_ceph'
811
812     cinder::backend::rbd { $cinder_rbd_backend :
813       rbd_pool        => hiera('cinder_rbd_pool_name'),
814       rbd_user        => hiera('ceph_client_user_name'),
815       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
816       require         => $cinder_pool_requires,
817     }
818   }
819
820   if hiera('cinder_enable_eqlx_backend', false) {
821     $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
822
823     cinder::backend::eqlx { $cinder_eqlx_backend :
824       volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
825       san_ip              => hiera('cinder::backend::eqlx::san_ip', undef),
826       san_login           => hiera('cinder::backend::eqlx::san_login', undef),
827       san_password        => hiera('cinder::backend::eqlx::san_password', undef),
828       san_thin_provision  => hiera('cinder::backend::eqlx::san_thin_provision', undef),
829       eqlx_group_name     => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
830       eqlx_pool           => hiera('cinder::backend::eqlx::eqlx_pool', undef),
831       eqlx_use_chap       => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
832       eqlx_chap_login     => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
833       eqlx_chap_password  => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
834     }
835   }
836
837   if hiera('cinder_enable_dellsc_backend', false) {
838     $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
839
840     cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
841       volume_backend_name   => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
842       san_ip                => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
843       san_login             => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
844       san_password          => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
845       dell_sc_ssn           => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
846       iscsi_ip_address      => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
847       iscsi_port            => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
848       dell_sc_api_port      => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
849       dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
850       dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
851     }
852   }
853
854   if hiera('cinder_enable_netapp_backend', false) {
855     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
856
857     if hiera('cinder::backend::netapp::nfs_shares', undef) {
858       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
859     }
860
861     cinder::backend::netapp { $cinder_netapp_backend :
862       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
863       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
864       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
865       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
866       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
867       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
868       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
869       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
870       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
871       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
872       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
873       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
874       nfs_shares                   => $cinder_netapp_nfs_shares,
875       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
876       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
877       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
878       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
879       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
880       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
881       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
882     }
883   }
884
885   if hiera('cinder_enable_nfs_backend', false) {
886     $cinder_nfs_backend = 'tripleo_nfs'
887
888     if str2bool($::selinux) {
889       selboolean { 'virt_use_nfs':
890         value      => on,
891         persistent => true,
892       } -> Package['nfs-utils']
893     }
894
895     package { 'nfs-utils': } ->
896     cinder::backend::nfs { $cinder_nfs_backend:
897       nfs_servers       => hiera('cinder_nfs_servers'),
898       nfs_mount_options => hiera('cinder_nfs_mount_options',''),
899       nfs_shares_config => '/etc/cinder/shares-nfs.conf',
900     }
901   }
902
903   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
904   class { '::cinder::backends' :
905     enabled_backends => $cinder_enabled_backends,
906   }
907
908   class { '::sahara':
909     sync_db => $sync_db,
910   }
911   class { '::sahara::service::api':
912     manage_service => false,
913     enabled        => false,
914   }
915   class { '::sahara::service::engine':
916     manage_service => false,
917     enabled        => false,
918   }
919
920   # swift proxy
921   class { '::swift::proxy' :
922     manage_service => $non_pcmk_start,
923     enabled        => $non_pcmk_start,
924   }
925   include ::swift::proxy::proxy_logging
926   include ::swift::proxy::healthcheck
927   include ::swift::proxy::cache
928   include ::swift::proxy::keystone
929   include ::swift::proxy::authtoken
930   include ::swift::proxy::staticweb
931   include ::swift::proxy::ratelimit
932   include ::swift::proxy::catch_errors
933   include ::swift::proxy::tempurl
934   include ::swift::proxy::formpost
935
936   # swift storage
937   if str2bool(hiera('enable_swift_storage', true)) {
938     class {'::swift::storage::all':
939       mount_check => str2bool(hiera('swift_mount_check')),
940     }
941     class {'::swift::storage::account':
942       manage_service => $non_pcmk_start,
943       enabled        => $non_pcmk_start,
944     }
945     class {'::swift::storage::container':
946       manage_service => $non_pcmk_start,
947       enabled        => $non_pcmk_start,
948     }
949     class {'::swift::storage::object':
950       manage_service => $non_pcmk_start,
951       enabled        => $non_pcmk_start,
952     }
953     if(!defined(File['/srv/node'])) {
954       file { '/srv/node':
955         ensure  => directory,
956         owner   => 'swift',
957         group   => 'swift',
958         require => Package['openstack-swift'],
959       }
960     }
961     $swift_components = ['account', 'container', 'object']
962     swift::storage::filter::recon { $swift_components : }
963     swift::storage::filter::healthcheck { $swift_components : }
964   }
965
966   # Ceilometer
967   case downcase(hiera('ceilometer_backend')) {
968     /mysql/: {
969       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
970     }
971     default: {
972       $mongo_node_string = join($mongo_node_ips_with_port, ',')
973       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
974     }
975   }
976   include ::ceilometer
977   include ::ceilometer::config
978   class { '::ceilometer::api' :
979     manage_service => false,
980     enabled        => false,
981   }
982   class { '::ceilometer::agent::notification' :
983     manage_service => false,
984     enabled        => false,
985   }
986   class { '::ceilometer::agent::central' :
987     manage_service => false,
988     enabled        => false,
989   }
990   class { '::ceilometer::collector' :
991     manage_service => false,
992     enabled        => false,
993   }
994   include ::ceilometer::expirer
995   class { '::ceilometer::db' :
996     database_connection => $ceilometer_database_connection,
997     sync_db             => $sync_db,
998   }
999   include ::ceilometer::agent::auth
1000
1001   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
1002
1003   # Heat
1004   include ::heat::config
1005   class { '::heat' :
1006     sync_db             => $sync_db,
1007     notification_driver => 'messaging',
1008   }
1009   class { '::heat::api' :
1010     manage_service => false,
1011     enabled        => false,
1012   }
1013   class { '::heat::api_cfn' :
1014     manage_service => false,
1015     enabled        => false,
1016   }
1017   class { '::heat::api_cloudwatch' :
1018     manage_service => false,
1019     enabled        => false,
1020   }
1021   class { '::heat::engine' :
1022     manage_service => false,
1023     enabled        => false,
1024   }
1025
1026   # httpd/apache and horizon
1027   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
1028   class { '::apache' :
1029     service_enable => false,
1030     # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
1031   }
1032   include ::keystone::wsgi::apache
1033   include ::apache::mod::status
1034   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1035     $_profile_support = 'cisco'
1036   } else {
1037     $_profile_support = 'None'
1038   }
1039   $neutron_options   = {'profile_support' => $_profile_support }
1040   class { '::horizon':
1041     cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
1042     neutron_options => $neutron_options,
1043   }
1044
1045   $snmpd_user = hiera('snmpd_readonly_user_name')
1046   snmp::snmpv3_user { $snmpd_user:
1047     authtype => 'MD5',
1048     authpass => hiera('snmpd_readonly_user_password'),
1049   }
1050   class { '::snmp':
1051     agentaddress => ['udp:161','udp6:[::1]:161'],
1052     snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
1053   }
1054
1055   hiera_include('controller_classes')
1056
1057 } #END STEP 3
1058
1059 if hiera('step') >= 4 {
1060   $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
1061   $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
1062   $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
1063   $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
1064
1065   if $keystone_enable_db_purge {
1066     include ::keystone::cron::token_flush
1067   }
1068   if $nova_enable_db_purge {
1069     include ::nova::cron::archive_deleted_rows
1070   }
1071   if $cinder_enable_db_purge {
1072     include ::cinder::cron::db_purge
1073   }
1074   if $heat_enable_db_purge {
1075     include ::heat::cron::purge_deleted
1076   }
1077
1078   if $pacemaker_master {
1079
1080     if $enable_load_balancer {
1081       pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
1082         constraint_type => 'order',
1083         first_resource  => 'haproxy-clone',
1084         second_resource => "${::apache::params::service_name}-clone",
1085         first_action    => 'start',
1086         second_action   => 'start',
1087         require         => [Pacemaker::Resource::Service['haproxy'],
1088                             Pacemaker::Resource::Service[$::apache::params::service_name]],
1089       }
1090     }
1091     pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
1092       constraint_type => 'order',
1093       first_resource  => 'rabbitmq-clone',
1094       second_resource => "${::apache::params::service_name}-clone",
1095       first_action    => 'start',
1096       second_action   => 'start',
1097       require         => [Pacemaker::Resource::Ocf['rabbitmq'],
1098                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1099     }
1100     pacemaker::constraint::base { 'memcached-then-keystone-constraint':
1101       constraint_type => 'order',
1102       first_resource  => 'memcached-clone',
1103       second_resource => "${::apache::params::service_name}-clone",
1104       first_action    => 'start',
1105       second_action   => 'start',
1106       require         => [Pacemaker::Resource::Service['memcached'],
1107                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1108     }
1109     pacemaker::constraint::base { 'galera-then-keystone-constraint':
1110       constraint_type => 'order',
1111       first_resource  => 'galera-master',
1112       second_resource => "${::apache::params::service_name}-clone",
1113       first_action    => 'promote',
1114       second_action   => 'start',
1115       require         => [Pacemaker::Resource::Ocf['galera'],
1116                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1117     }
1118
1119     # Cinder
1120     pacemaker::resource::service { $::cinder::params::api_service :
1121       clone_params => 'interleave=true',
1122       require      => Pacemaker::Resource::Service[$::apache::params::service_name],
1123     }
1124     pacemaker::resource::service { $::cinder::params::scheduler_service :
1125       clone_params => 'interleave=true',
1126     }
1127     pacemaker::resource::service { $::cinder::params::volume_service : }
1128
1129     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
1130       constraint_type => 'order',
1131       first_resource  => "${::apache::params::service_name}-clone",
1132       second_resource => "${::cinder::params::api_service}-clone",
1133       first_action    => 'start',
1134       second_action   => 'start',
1135       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1136                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1137     }
1138     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
1139       constraint_type => 'order',
1140       first_resource  => "${::cinder::params::api_service}-clone",
1141       second_resource => "${::cinder::params::scheduler_service}-clone",
1142       first_action    => 'start',
1143       second_action   => 'start',
1144       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1145                           Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1146     }
1147     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
1148       source  => "${::cinder::params::scheduler_service}-clone",
1149       target  => "${::cinder::params::api_service}-clone",
1150       score   => 'INFINITY',
1151       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1152                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1153     }
1154     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
1155       constraint_type => 'order',
1156       first_resource  => "${::cinder::params::scheduler_service}-clone",
1157       second_resource => $::cinder::params::volume_service,
1158       first_action    => 'start',
1159       second_action   => 'start',
1160       require         => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1161                           Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1162     }
1163     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
1164       source  => $::cinder::params::volume_service,
1165       target  => "${::cinder::params::scheduler_service}-clone",
1166       score   => 'INFINITY',
1167       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1168                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1169     }
1170
1171     # Sahara
1172     pacemaker::resource::service { $::sahara::params::api_service_name :
1173       clone_params => 'interleave=true',
1174       require      => Pacemaker::Resource::Service[$::apache::params::service_name],
1175     }
1176     pacemaker::resource::service { $::sahara::params::engine_service_name :
1177       clone_params => 'interleave=true',
1178     }
1179     pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
1180       constraint_type => 'order',
1181       first_resource  => "${::apache::params::service_name}-clone",
1182       second_resource => "${::sahara::params::api_service_name}-clone",
1183       first_action    => 'start',
1184       second_action   => 'start',
1185       require         => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1186                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1187     }
1188
1189     # Glance
1190     pacemaker::resource::service { $::glance::params::registry_service_name :
1191       clone_params => 'interleave=true',
1192       require      => Pacemaker::Resource::Service[$::apache::params::service_name],
1193     }
1194     pacemaker::resource::service { $::glance::params::api_service_name :
1195       clone_params => 'interleave=true',
1196     }
1197
1198     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
1199       constraint_type => 'order',
1200       first_resource  => "${::apache::params::service_name}-clone",
1201       second_resource => "${::glance::params::registry_service_name}-clone",
1202       first_action    => 'start',
1203       second_action   => 'start',
1204       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1205                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1206     }
1207     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
1208       constraint_type => 'order',
1209       first_resource  => "${::glance::params::registry_service_name}-clone",
1210       second_resource => "${::glance::params::api_service_name}-clone",
1211       first_action    => 'start',
1212       second_action   => 'start',
1213       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1214                           Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1215     }
1216     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
1217       source  => "${::glance::params::api_service_name}-clone",
1218       target  => "${::glance::params::registry_service_name}-clone",
1219       score   => 'INFINITY',
1220       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1221                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1222     }
1223
1224     if hiera('step') == 4 {
1225       # Neutron
1226       # NOTE(gfidente): Neutron will try to populate the database with some data
1227       # as soon as neutron-server is started; to avoid races we want to make this
1228       # happen only on one node, before normal Pacemaker initialization
1229       # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1230       # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
1231       # will try to start the service while it's already started by Pacemaker
1232       # It would result to a deployment failure since systemd would return 1 to Puppet
1233       # and the overcloud would fail to deploy (6 would be returned).
1234       # This conditional prevents from a race condition during the deployment.
1235       # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
1236       exec { 'neutron-server-systemd-start-sleep' :
1237         command => 'systemctl start neutron-server && /usr/bin/sleep 5',
1238         path    => '/usr/bin',
1239         unless  => '/sbin/pcs resource show neutron-server',
1240       } ->
1241       pacemaker::resource::service { $::neutron::params::server_service:
1242         clone_params => 'interleave=true',
1243         require      => Pacemaker::Resource::Service[$::apache::params::service_name]
1244       }
1245     } else {
1246       pacemaker::resource::service { $::neutron::params::server_service:
1247         clone_params => 'interleave=true',
1248         require      => Pacemaker::Resource::Service[$::apache::params::service_name]
1249       }
1250     }
1251     if hiera('neutron::enable_l3_agent', true) {
1252       pacemaker::resource::service { $::neutron::params::l3_agent_service:
1253         clone_params => 'interleave=true',
1254       }
1255     }
1256     if hiera('neutron::enable_dhcp_agent', true) {
1257       pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1258         clone_params => 'interleave=true',
1259       }
1260     }
1261     if hiera('neutron::enable_ovs_agent', true) {
1262       pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1263         clone_params => 'interleave=true',
1264       }
1265     }
1266     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1267       pacemaker::resource::service {'tomcat':
1268         clone_params => 'interleave=true',
1269       }
1270     }
1271     if hiera('neutron::enable_metadata_agent', true) {
1272       pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1273         clone_params => 'interleave=true',
1274       }
1275     }
1276     if hiera('neutron::enable_ovs_agent', true) {
1277       pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1278         ocf_agent_name => 'neutron:OVSCleanup',
1279         clone_params   => 'interleave=true',
1280       }
1281       pacemaker::resource::ocf { 'neutron-netns-cleanup':
1282         ocf_agent_name => 'neutron:NetnsCleanup',
1283         clone_params   => 'interleave=true',
1284       }
1285
1286       # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1287       pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1288         constraint_type => 'order',
1289         first_resource  => "${::neutron::params::ovs_cleanup_service}-clone",
1290         second_resource => 'neutron-netns-cleanup-clone',
1291         first_action    => 'start',
1292         second_action   => 'start',
1293         require         => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1294                             Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1295       }
1296       pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1297         source  => 'neutron-netns-cleanup-clone',
1298         target  => "${::neutron::params::ovs_cleanup_service}-clone",
1299         score   => 'INFINITY',
1300         require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1301                     Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1302       }
1303       pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1304         constraint_type => 'order',
1305         first_resource  => 'neutron-netns-cleanup-clone',
1306         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1307         first_action    => 'start',
1308         second_action   => 'start',
1309         require         => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1310                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1311       }
1312       pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1313         source  => "${::neutron::params::ovs_agent_service}-clone",
1314         target  => 'neutron-netns-cleanup-clone',
1315         score   => 'INFINITY',
1316         require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1317                     Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1318       }
1319     }
1320     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1321       constraint_type   => 'order',
1322       first_resource    => "${::apache::params::service_name}-clone",
1323         second_resource => "${::neutron::params::server_service}-clone",
1324         first_action    => 'start',
1325         second_action   => 'start',
1326         require         => [Pacemaker::Resource::Service[$::apache::params::service_name],
1327                             Pacemaker::Resource::Service[$::neutron::params::server_service]],
1328       }
1329     if hiera('neutron::enable_ovs_agent',true) {
1330       pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1331         constraint_type => 'order',
1332         first_resource  => "${::neutron::params::ovs_agent_service}-clone",
1333         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1334         first_action    => 'start',
1335         second_action   => 'start',
1336         require         => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1337                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1338       }
1339     }
1340     if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
1341       pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
1342         constraint_type => 'order',
1343         first_resource  => "${::neutron::params::server_service}-clone",
1344         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1345         first_action    => 'start',
1346         second_action   => 'start',
1347         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1348                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1349     }
1350
1351       pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1352         source  => "${::neutron::params::dhcp_agent_service}-clone",
1353         target  => "${::neutron::params::ovs_agent_service}-clone",
1354         score   => 'INFINITY',
1355         require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1356                     Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1357       }
1358     }
1359     if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) {
1360       pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1361         constraint_type => 'order',
1362         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1363         second_resource => "${::neutron::params::l3_agent_service}-clone",
1364         first_action    => 'start',
1365         second_action   => 'start',
1366         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1367                             Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1368       }
1369       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1370         source  => "${::neutron::params::l3_agent_service}-clone",
1371         target  => "${::neutron::params::dhcp_agent_service}-clone",
1372         score   => 'INFINITY',
1373         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1374                     Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1375       }
1376     }
1377     if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
1378       pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1379         constraint_type => 'order',
1380         first_resource  => "${::neutron::params::l3_agent_service}-clone",
1381         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1382         first_action    => 'start',
1383         second_action   => 'start',
1384         require         => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1385                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1386       }
1387       pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1388         source  => "${::neutron::params::metadata_agent_service}-clone",
1389         target  => "${::neutron::params::l3_agent_service}-clone",
1390         score   => 'INFINITY',
1391         require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1392                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1393       }
1394     }
1395     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1396       #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
1397       pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
1398         constraint_type => 'order',
1399         first_resource  => "${::neutron::params::server_service}-clone",
1400         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1401         first_action    => 'start',
1402         second_action   => 'start',
1403         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1404                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1405       }
1406       pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
1407         constraint_type => 'order',
1408         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1409         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1410         first_action    => 'start',
1411         second_action   => 'start',
1412         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1413                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1414       }
1415       pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
1416         constraint_type => 'order',
1417         first_resource  => "${::neutron::params::metadata_agent_service}-clone",
1418         second_resource => 'tomcat-clone',
1419         first_action    => 'start',
1420         second_action   => 'start',
1421         require         => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
1422                             Pacemaker::Resource::Service['tomcat']],
1423       }
1424       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
1425         source  => "${::neutron::params::metadata_agent_service}-clone",
1426         target  => "${::neutron::params::dhcp_agent_service}-clone",
1427         score   => 'INFINITY',
1428         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1429                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1430       }
1431     }
1432
1433     # Nova
1434     pacemaker::resource::service { $::nova::params::api_service_name :
1435       clone_params => 'interleave=true',
1436     }
1437     pacemaker::resource::service { $::nova::params::conductor_service_name :
1438       clone_params => 'interleave=true',
1439     }
1440     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1441       clone_params => 'interleave=true',
1442       require      => Pacemaker::Resource::Service[$::apache::params::service_name],
1443     }
1444     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1445       clone_params => 'interleave=true',
1446     }
1447     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1448       clone_params => 'interleave=true',
1449     }
1450
1451     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1452       constraint_type => 'order',
1453       first_resource  => "${::apache::params::service_name}-clone",
1454       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1455       first_action    => 'start',
1456       second_action   => 'start',
1457       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1458                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1459     }
1460     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1461       constraint_type => 'order',
1462       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1463       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1464       first_action    => 'start',
1465       second_action   => 'start',
1466       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1467                           Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1468     }
1469     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1470       source  => "${::nova::params::vncproxy_service_name}-clone",
1471       target  => "${::nova::params::consoleauth_service_name}-clone",
1472       score   => 'INFINITY',
1473       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1474                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1475     }
1476     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1477       constraint_type => 'order',
1478       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1479       second_resource => "${::nova::params::api_service_name}-clone",
1480       first_action    => 'start',
1481       second_action   => 'start',
1482       require         => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1483                           Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1484     }
1485     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1486       source  => "${::nova::params::api_service_name}-clone",
1487       target  => "${::nova::params::vncproxy_service_name}-clone",
1488       score   => 'INFINITY',
1489       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1490                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1491     }
1492     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1493       constraint_type => 'order',
1494       first_resource  => "${::nova::params::api_service_name}-clone",
1495       second_resource => "${::nova::params::scheduler_service_name}-clone",
1496       first_action    => 'start',
1497       second_action   => 'start',
1498       require         => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1499                           Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1500     }
1501     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1502       source  => "${::nova::params::scheduler_service_name}-clone",
1503       target  => "${::nova::params::api_service_name}-clone",
1504       score   => 'INFINITY',
1505       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1506                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1507     }
1508     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1509       constraint_type => 'order',
1510       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1511       second_resource => "${::nova::params::conductor_service_name}-clone",
1512       first_action    => 'start',
1513       second_action   => 'start',
1514       require         => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1515                           Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1516     }
1517     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1518       source  => "${::nova::params::conductor_service_name}-clone",
1519       target  => "${::nova::params::scheduler_service_name}-clone",
1520       score   => 'INFINITY',
1521       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1522                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1523     }
1524
1525     # Ceilometer
1526     case downcase(hiera('ceilometer_backend')) {
1527       /mysql/: {
1528         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
1529           clone_params => 'interleave=true',
1530           require      => Pacemaker::Resource::Service[$::apache::params::service_name],
1531         }
1532       }
1533       default: {
1534         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
1535           clone_params => 'interleave=true',
1536           require      => [Pacemaker::Resource::Service[$::apache::params::service_name],
1537           Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1538         }
1539       }
1540     }
1541     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1542       clone_params => 'interleave=true',
1543     }
1544     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1545       clone_params => 'interleave=true',
1546     }
1547     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1548       clone_params => 'interleave=true',
1549     }
1550     pacemaker::resource::ocf { 'delay' :
1551       ocf_agent_name  => 'heartbeat:Delay',
1552       clone_params    => 'interleave=true',
1553       resource_params => 'startdelay=10',
1554     }
1555     # Fedora doesn't know `require-all` parameter for constraints yet
1556     if $::operatingsystem == 'Fedora' {
1557       $redis_ceilometer_constraint_params = undef
1558     } else {
1559       $redis_ceilometer_constraint_params = 'require-all=false'
1560     }
1561     pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1562       constraint_type   => 'order',
1563       first_resource    => 'redis-master',
1564       second_resource   => "${::ceilometer::params::agent_central_service_name}-clone",
1565       first_action      => 'promote',
1566       second_action     => 'start',
1567       constraint_params => $redis_ceilometer_constraint_params,
1568       require           => [Pacemaker::Resource::Ocf['redis'],
1569                             Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1570     }
1571     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1572       constraint_type => 'order',
1573       first_resource  => "${::apache::params::service_name}-clone",
1574       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1575       first_action    => 'start',
1576       second_action   => 'start',
1577       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1578                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1579     }
1580     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1581       constraint_type => 'order',
1582       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1583       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1584       first_action    => 'start',
1585       second_action   => 'start',
1586       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1587                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1588     }
1589     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1590       constraint_type => 'order',
1591       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1592       second_resource => "${::ceilometer::params::api_service_name}-clone",
1593       first_action    => 'start',
1594       second_action   => 'start',
1595       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1596                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1597     }
1598     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1599       source  => "${::ceilometer::params::api_service_name}-clone",
1600       target  => "${::ceilometer::params::collector_service_name}-clone",
1601       score   => 'INFINITY',
1602       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1603                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1604     }
1605     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1606       constraint_type => 'order',
1607       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1608       second_resource => 'delay-clone',
1609       first_action    => 'start',
1610       second_action   => 'start',
1611       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1612                           Pacemaker::Resource::Ocf['delay']],
1613     }
1614     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1615       source  => 'delay-clone',
1616       target  => "${::ceilometer::params::api_service_name}-clone",
1617       score   => 'INFINITY',
1618       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1619                   Pacemaker::Resource::Ocf['delay']],
1620     }
1621     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1622       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1623         constraint_type => 'order',
1624         first_resource  => "${::mongodb::params::service_name}-clone",
1625         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1626         first_action    => 'start',
1627         second_action   => 'start',
1628         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1629                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1630       }
1631     }
1632
1633     # Heat
1634     pacemaker::resource::service { $::heat::params::api_service_name :
1635       clone_params => 'interleave=true',
1636     }
1637     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1638       clone_params => 'interleave=true',
1639     }
1640     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1641       clone_params => 'interleave=true',
1642     }
1643     pacemaker::resource::service { $::heat::params::engine_service_name :
1644       clone_params => 'interleave=true',
1645     }
1646     pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1647       constraint_type => 'order',
1648       first_resource  => "${::apache::params::service_name}-clone",
1649       second_resource => "${::heat::params::api_service_name}-clone",
1650       first_action    => 'start',
1651       second_action   => 'start',
1652       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1653                           Pacemaker::Resource::Service[$::apache::params::service_name]],
1654     }
1655     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1656       constraint_type => 'order',
1657       first_resource  => "${::heat::params::api_service_name}-clone",
1658       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1659       first_action    => 'start',
1660       second_action   => 'start',
1661       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1662                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1663     }
1664     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1665       source  => "${::heat::params::api_cfn_service_name}-clone",
1666       target  => "${::heat::params::api_service_name}-clone",
1667       score   => 'INFINITY',
1668       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1669                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1670     }
1671     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1672       constraint_type => 'order',
1673       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1674       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1675       first_action    => 'start',
1676       second_action   => 'start',
1677       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1678                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1679     }
1680     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1681       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1682       target  => "${::heat::params::api_cfn_service_name}-clone",
1683       score   => 'INFINITY',
1684       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1685                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1686     }
1687     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1688       constraint_type => 'order',
1689       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1690       second_resource => "${::heat::params::engine_service_name}-clone",
1691       first_action    => 'start',
1692       second_action   => 'start',
1693       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1694                           Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1695     }
1696     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1697       source  => "${::heat::params::engine_service_name}-clone",
1698       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1699       score   => 'INFINITY',
1700       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1701                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1702     }
1703     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1704       constraint_type => 'order',
1705       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1706       second_resource => "${::heat::params::api_service_name}-clone",
1707       first_action    => 'start',
1708       second_action   => 'start',
1709       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1710                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1711     }
1712
1713     # Horizon and Keystone
1714     pacemaker::resource::service { $::apache::params::service_name:
1715       clone_params     => 'interleave=true',
1716       verify_on_create => true,
1717       require          => [File['/etc/keystone/ssl/certs/ca.pem'],
1718       File['/etc/keystone/ssl/private/signing_key.pem'],
1719       File['/etc/keystone/ssl/certs/signing_cert.pem']],
1720     }
1721
1722     #VSM
1723     if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1724       pacemaker::resource::ocf { 'vsm-p' :
1725         ocf_agent_name  => 'heartbeat:VirtualDomain',
1726         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1727         require         => Class['n1k_vsm'],
1728         meta_params     => 'resource-stickiness=INFINITY',
1729       }
1730       if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
1731         pacemaker::resource::ocf { 'vsm-s' :
1732           ocf_agent_name  => 'heartbeat:VirtualDomain',
1733           resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1734           require         => Class['n1k_vsm'],
1735           meta_params     => 'resource-stickiness=INFINITY',
1736         }
1737         pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1738           source  => 'vsm-p',
1739           target  => 'vsm-s',
1740           score   => '-INFINITY',
1741           require => [Pacemaker::Resource::Ocf['vsm-p'],
1742                       Pacemaker::Resource::Ocf['vsm-s']],
1743         }
1744       }
1745     }
1746
1747   }
1748
1749 } #END STEP 4
1750
1751 if hiera('step') >= 5 {
1752
1753   if $pacemaker_master {
1754
1755     class {'::keystone::roles::admin' :
1756       require => Pacemaker::Resource::Service[$::apache::params::service_name],
1757     } ->
1758     class {'::keystone::endpoint' :
1759       require => Pacemaker::Resource::Service[$::apache::params::service_name],
1760     }
1761   }
1762
1763 } #END STEP 5
1764
1765 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1766 package_manifest{$package_manifest_name: ensure => present}