db3d865225fbb357a7166588ed1ee58d9bfc65e4
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 include ::tripleo::packages
22 include ::tripleo::firewall
23
24 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
25   $pacemaker_master = true
26   $sync_db = true
27 } else {
28   $pacemaker_master = false
29   $sync_db = false
30 }
31
32 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
33 $enable_load_balancer = hiera('enable_load_balancer', true)
34
35 # When to start and enable services which haven't been Pacemakerized
36 # FIXME: remove when we start all OpenStack services using Pacemaker
37 # (occurrences of this variable will be gradually replaced with false)
38 $non_pcmk_start = hiera('step') >= 4
39
40 if hiera('step') >= 1 {
41
42   create_resources(kmod::load, hiera('kernel_modules'), {})
43   create_resources(sysctl::value, hiera('sysctl_settings'), {})
44   Exec <| tag == 'kmod::load' |>  -> Sysctl <| |>
45
46   include ::timezone
47
48   if count(hiera('ntp::servers')) > 0 {
49     include ::ntp
50   }
51
52   $controller_node_ips = split(hiera('controller_node_ips'), ',')
53   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
54   if $enable_load_balancer {
55     class { '::tripleo::loadbalancer' :
56       controller_hosts       => $controller_node_ips,
57       controller_hosts_names => $controller_node_names,
58       manage_vip             => false,
59       mysql_clustercheck     => true,
60       haproxy_service_manage => false,
61     }
62   }
63
64   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
65   $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
66   if $corosync_ipv6 {
67     $cluster_setup_extras = { '--ipv6' => '' }
68   } else {
69     $cluster_setup_extras = {}
70   }
71   user { 'hacluster':
72     ensure => present,
73   } ->
74   class { '::pacemaker':
75     hacluster_pwd => hiera('hacluster_pwd'),
76   } ->
77   class { '::pacemaker::corosync':
78     cluster_members      => $pacemaker_cluster_members,
79     setup_cluster        => $pacemaker_master,
80     cluster_setup_extras => $cluster_setup_extras,
81   }
82   class { '::pacemaker::stonith':
83     disable => !$enable_fencing,
84   }
85   if $enable_fencing {
86     include ::tripleo::fencing
87
88     # enable stonith after all fencing devices have been created
89     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
90   }
91
92   # FIXME(gfidente): sets 200secs as default start timeout op
93   # param; until we can use pcmk global defaults we'll still
94   # need to add it to every resource which redefines op params
95   Pacemaker::Resource::Service {
96     op_params => 'start timeout=200s stop timeout=200s',
97   }
98
99   # Only configure RabbitMQ in this step, don't start it yet to
100   # avoid races where non-master nodes attempt to start without
101   # config (eg. binding on 0.0.0.0)
102   # The module ignores erlang_cookie if cluster_config is false
103   $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
104   if $rabbit_ipv6 {
105       $rabbit_env = merge(hiera('rabbitmq_environment'), {
106         'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
107       })
108   } else {
109     $rabbit_env = hiera('rabbitmq_environment')
110   }
111
112   class { '::rabbitmq':
113     service_manage          => false,
114     tcp_keepalive           => false,
115     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
116     config_variables        => hiera('rabbitmq_config_variables'),
117     environment_variables   => $rabbit_env,
118   } ->
119   file { '/var/lib/rabbitmq/.erlang.cookie':
120     ensure  => file,
121     owner   => 'rabbitmq',
122     group   => 'rabbitmq',
123     mode    => '0400',
124     content => hiera('rabbitmq::erlang_cookie'),
125     replace => true,
126   }
127
128   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
129     include ::mongodb::globals
130     include ::mongodb::client
131     class { '::mongodb::server' :
132       service_manage => false,
133     }
134   }
135
136   # Memcached
137   class {'::memcached' :
138     service_manage => false,
139   }
140
141   # Redis
142   class { '::redis' :
143     service_manage => false,
144     notify_service => false,
145   }
146
147   # Galera
148   if str2bool(hiera('enable_galera', true)) {
149     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
150   } else {
151     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
152   }
153   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
154   $galera_nodes_count = count(split($galera_nodes, ','))
155
156   # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
157   # set bind-address to a hostname instead of an ip address; to move Mysql
158   # from internal_api on another network we'll have to customize both
159   # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
160   $mysql_bind_host = hiera('mysql_bind_host')
161   $mysqld_options = {
162     'mysqld' => {
163       'skip-name-resolve'             => '1',
164       'binlog_format'                 => 'ROW',
165       'default-storage-engine'        => 'innodb',
166       'innodb_autoinc_lock_mode'      => '2',
167       'innodb_locks_unsafe_for_binlog'=> '1',
168       'query_cache_size'              => '0',
169       'query_cache_type'              => '0',
170       'bind-address'                  => $::hostname,
171       'max_connections'               => hiera('mysql_max_connections'),
172       'open_files_limit'              => '-1',
173       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
174       'wsrep_cluster_name'            => 'galera_cluster',
175       'wsrep_slave_threads'           => '1',
176       'wsrep_certify_nonPK'           => '1',
177       'wsrep_max_ws_rows'             => '131072',
178       'wsrep_max_ws_size'             => '1073741824',
179       'wsrep_debug'                   => '0',
180       'wsrep_convert_LOCK_to_trx'     => '0',
181       'wsrep_retry_autocommit'        => '1',
182       'wsrep_auto_increment_control'  => '1',
183       'wsrep_drupal_282555_workaround'=> '0',
184       'wsrep_causal_reads'            => '0',
185       'wsrep_sst_method'              => 'rsync',
186       'wsrep_provider_options'        => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
187     },
188   }
189
190   class { '::mysql::server':
191     create_root_user        => false,
192     create_root_my_cnf      => false,
193     config_file             => $mysql_config_file,
194     override_options        => $mysqld_options,
195     remove_default_accounts => $pacemaker_master,
196     service_manage          => false,
197     service_enabled         => false,
198   }
199
200 }
201
202 if hiera('step') >= 2 {
203
204   # NOTE(gfidente): the following vars are needed on all nodes so they
205   # need to stay out of pacemaker_master conditional.
206   # The addresses mangling will hopefully go away when we'll be able to
207   # configure the connection string via hostnames, until then, we need to pass
208   # the list of IPv6 addresses *with* port and without the brackets as 'members'
209   # argument for the 'mongodb_replset' resource.
210   if str2bool(hiera('mongodb::server::ipv6', false)) {
211     $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
212     $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
213     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
214   } else {
215     $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
216     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
217   }
218   $mongodb_replset = hiera('mongodb::server::replset')
219
220   if $pacemaker_master {
221
222     if $enable_load_balancer {
223
224       include ::pacemaker::resource_defaults
225
226       # Create an openstack-core dummy resource. See RHBZ 1290121
227       pacemaker::resource::ocf { 'openstack-core':
228         ocf_agent_name => 'heartbeat:Dummy',
229         clone_params   => true,
230       }
231       # FIXME: we should not have to access tripleo::loadbalancer class
232       # parameters here to configure pacemaker VIPs. The configuration
233       # of pacemaker VIPs could move into puppet-tripleo or we should
234       # make use of less specific hiera parameters here for the settings.
235       pacemaker::resource::service { 'haproxy':
236         clone_params => true,
237       }
238
239       $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
240       if is_ipv6_address($control_vip) {
241         $control_vip_netmask = '64'
242       } else {
243         $control_vip_netmask = '32'
244       }
245       pacemaker::resource::ip { 'control_vip':
246         ip_address   => $control_vip,
247         cidr_netmask => $control_vip_netmask,
248       }
249       pacemaker::constraint::base { 'control_vip-then-haproxy':
250         constraint_type   => 'order',
251         first_resource    => "ip-${control_vip}",
252         second_resource   => 'haproxy-clone',
253         first_action      => 'start',
254         second_action     => 'start',
255         constraint_params => 'kind=Optional',
256         require           => [Pacemaker::Resource::Service['haproxy'],
257                               Pacemaker::Resource::Ip['control_vip']],
258       }
259       pacemaker::constraint::colocation { 'control_vip-with-haproxy':
260         source  => "ip-${control_vip}",
261         target  => 'haproxy-clone',
262         score   => 'INFINITY',
263         require => [Pacemaker::Resource::Service['haproxy'],
264                     Pacemaker::Resource::Ip['control_vip']],
265       }
266
267       $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
268       if is_ipv6_address($public_vip) {
269         $public_vip_netmask = '64'
270       } else {
271         $public_vip_netmask = '32'
272       }
273       if $public_vip and $public_vip != $control_vip {
274         pacemaker::resource::ip { 'public_vip':
275           ip_address   => $public_vip,
276           cidr_netmask => $public_vip_netmask,
277         }
278         pacemaker::constraint::base { 'public_vip-then-haproxy':
279           constraint_type   => 'order',
280           first_resource    => "ip-${public_vip}",
281           second_resource   => 'haproxy-clone',
282           first_action      => 'start',
283           second_action     => 'start',
284           constraint_params => 'kind=Optional',
285           require           => [Pacemaker::Resource::Service['haproxy'],
286                                 Pacemaker::Resource::Ip['public_vip']],
287         }
288         pacemaker::constraint::colocation { 'public_vip-with-haproxy':
289           source  => "ip-${public_vip}",
290           target  => 'haproxy-clone',
291           score   => 'INFINITY',
292           require => [Pacemaker::Resource::Service['haproxy'],
293                       Pacemaker::Resource::Ip['public_vip']],
294         }
295       }
296
297       $redis_vip = hiera('redis_vip')
298       if is_ipv6_address($redis_vip) {
299         $redis_vip_netmask = '64'
300       } else {
301         $redis_vip_netmask = '32'
302       }
303       if $redis_vip and $redis_vip != $control_vip {
304         pacemaker::resource::ip { 'redis_vip':
305           ip_address   => $redis_vip,
306           cidr_netmask => $redis_vip_netmask,
307         }
308         pacemaker::constraint::base { 'redis_vip-then-haproxy':
309           constraint_type   => 'order',
310           first_resource    => "ip-${redis_vip}",
311           second_resource   => 'haproxy-clone',
312           first_action      => 'start',
313           second_action     => 'start',
314           constraint_params => 'kind=Optional',
315           require           => [Pacemaker::Resource::Service['haproxy'],
316                                 Pacemaker::Resource::Ip['redis_vip']],
317         }
318         pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
319           source  => "ip-${redis_vip}",
320           target  => 'haproxy-clone',
321           score   => 'INFINITY',
322           require => [Pacemaker::Resource::Service['haproxy'],
323                       Pacemaker::Resource::Ip['redis_vip']],
324         }
325       }
326
327       $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
328       if is_ipv6_address($internal_api_vip) {
329         $internal_api_vip_netmask = '64'
330       } else {
331         $internal_api_vip_netmask = '32'
332       }
333       if $internal_api_vip and $internal_api_vip != $control_vip {
334         pacemaker::resource::ip { 'internal_api_vip':
335           ip_address   => $internal_api_vip,
336           cidr_netmask => $internal_api_vip_netmask,
337         }
338         pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
339           constraint_type   => 'order',
340           first_resource    => "ip-${internal_api_vip}",
341           second_resource   => 'haproxy-clone',
342           first_action      => 'start',
343           second_action     => 'start',
344           constraint_params => 'kind=Optional',
345           require           => [Pacemaker::Resource::Service['haproxy'],
346                                 Pacemaker::Resource::Ip['internal_api_vip']],
347         }
348         pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
349           source  => "ip-${internal_api_vip}",
350           target  => 'haproxy-clone',
351           score   => 'INFINITY',
352           require => [Pacemaker::Resource::Service['haproxy'],
353                       Pacemaker::Resource::Ip['internal_api_vip']],
354         }
355       }
356
357       $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
358       if is_ipv6_address($storage_vip) {
359         $storage_vip_netmask = '64'
360       } else {
361         $storage_vip_netmask = '32'
362       }
363       if $storage_vip and $storage_vip != $control_vip {
364         pacemaker::resource::ip { 'storage_vip':
365           ip_address   => $storage_vip,
366           cidr_netmask => $storage_vip_netmask,
367         }
368         pacemaker::constraint::base { 'storage_vip-then-haproxy':
369           constraint_type   => 'order',
370           first_resource    => "ip-${storage_vip}",
371           second_resource   => 'haproxy-clone',
372           first_action      => 'start',
373           second_action     => 'start',
374           constraint_params => 'kind=Optional',
375           require           => [Pacemaker::Resource::Service['haproxy'],
376                                 Pacemaker::Resource::Ip['storage_vip']],
377         }
378         pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
379           source  => "ip-${storage_vip}",
380           target  => 'haproxy-clone',
381           score   => 'INFINITY',
382           require => [Pacemaker::Resource::Service['haproxy'],
383                       Pacemaker::Resource::Ip['storage_vip']],
384         }
385       }
386
387       $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
388       if is_ipv6_address($storage_mgmt_vip) {
389         $storage_mgmt_vip_netmask = '64'
390       } else {
391         $storage_mgmt_vip_netmask = '32'
392       }
393       if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
394         pacemaker::resource::ip { 'storage_mgmt_vip':
395           ip_address   => $storage_mgmt_vip,
396           cidr_netmask => $storage_mgmt_vip_netmask,
397         }
398         pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
399           constraint_type   => 'order',
400           first_resource    => "ip-${storage_mgmt_vip}",
401           second_resource   => 'haproxy-clone',
402           first_action      => 'start',
403           second_action     => 'start',
404           constraint_params => 'kind=Optional',
405           require           => [Pacemaker::Resource::Service['haproxy'],
406                                 Pacemaker::Resource::Ip['storage_mgmt_vip']],
407         }
408         pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
409           source  => "ip-${storage_mgmt_vip}",
410           target  => 'haproxy-clone',
411           score   => 'INFINITY',
412           require => [Pacemaker::Resource::Service['haproxy'],
413                       Pacemaker::Resource::Ip['storage_mgmt_vip']],
414         }
415       }
416
417     }
418
419     pacemaker::resource::service { $::memcached::params::service_name :
420       clone_params => 'interleave=true',
421       require      => Class['::memcached'],
422     }
423
424     pacemaker::resource::ocf { 'rabbitmq':
425       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
426       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
427       clone_params    => 'ordered=true interleave=true',
428       meta_params     => 'notify=true',
429       require         => Class['::rabbitmq'],
430     }
431
432     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
433       pacemaker::resource::service { $::mongodb::params::service_name :
434         op_params    => 'start timeout=370s stop timeout=200s',
435         clone_params => true,
436         require      => Class['::mongodb::server'],
437       }
438       # NOTE (spredzy) : The replset can only be run
439       # once all the nodes have joined the cluster.
440       mongodb_conn_validator { $mongo_node_ips_with_port :
441         timeout => '600',
442         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
443         before  => Mongodb_replset[$mongodb_replset],
444       }
445       mongodb_replset { $mongodb_replset :
446         members => $mongo_node_ips_with_port_nobr,
447       }
448     }
449
450     pacemaker::resource::ocf { 'galera' :
451       ocf_agent_name  => 'heartbeat:galera',
452       op_params       => 'promote timeout=300s on-fail=block',
453       master_params   => '',
454       meta_params     => "master-max=${galera_nodes_count} ordered=true",
455       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
456       require         => Class['::mysql::server'],
457       before          => Exec['galera-ready'],
458     }
459
460     pacemaker::resource::ocf { 'redis':
461       ocf_agent_name  => 'heartbeat:redis',
462       master_params   => '',
463       meta_params     => 'notify=true ordered=true interleave=true',
464       resource_params => 'wait_last_known_master=true',
465       require         => Class['::redis'],
466     }
467
468   }
469
470   exec { 'galera-ready' :
471     command     => '/usr/bin/clustercheck >/dev/null',
472     timeout     => 30,
473     tries       => 180,
474     try_sleep   => 10,
475     environment => ['AVAILABLE_WHEN_READONLY=0'],
476     require     => File['/etc/sysconfig/clustercheck'],
477   }
478
479   file { '/etc/sysconfig/clustercheck' :
480     ensure  => file,
481     content => "MYSQL_USERNAME=root\n
482 MYSQL_PASSWORD=''\n
483 MYSQL_HOST=localhost\n",
484   }
485
486   xinetd::service { 'galera-monitor' :
487     port           => '9200',
488     server         => '/usr/bin/clustercheck',
489     per_source     => 'UNLIMITED',
490     log_on_success => '',
491     log_on_failure => 'HOST',
492     flags          => 'REUSE',
493     service_type   => 'UNLISTED',
494     user           => 'root',
495     group          => 'root',
496     require        => File['/etc/sysconfig/clustercheck'],
497   }
498
499   # Create all the database schemas
500   if $sync_db {
501     class { '::keystone::db::mysql':
502       require => Exec['galera-ready'],
503     }
504     class { '::glance::db::mysql':
505       require => Exec['galera-ready'],
506     }
507     class { '::nova::db::mysql':
508       require => Exec['galera-ready'],
509     }
510     class { '::nova::db::mysql_api':
511       require => Exec['galera-ready'],
512     }
513     class { '::neutron::db::mysql':
514       require => Exec['galera-ready'],
515     }
516     class { '::cinder::db::mysql':
517       require => Exec['galera-ready'],
518     }
519     class { '::heat::db::mysql':
520       require => Exec['galera-ready'],
521     }
522
523     if downcase(hiera('ceilometer_backend')) == 'mysql' {
524       class { '::ceilometer::db::mysql':
525         require => Exec['galera-ready'],
526       }
527     }
528
529     class { '::sahara::db::mysql':
530       require       => Exec['galera-ready'],
531     }
532   }
533
534   # pre-install swift here so we can build rings
535   include ::swift
536
537   # Ceph
538   $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
539
540   if $enable_ceph {
541     $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
542     if str2bool(hiera('ceph_ipv6', false)) {
543       $mon_host = hiera('ceph_mon_host_v6')
544     } else {
545       $mon_host = hiera('ceph_mon_host')
546     }
547     class { '::ceph::profile::params':
548       mon_initial_members => $mon_initial_members,
549       mon_host            => $mon_host,
550     }
551     include ::ceph::conf
552     include ::ceph::profile::mon
553   }
554
555   if str2bool(hiera('enable_ceph_storage', false)) {
556     if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
557       exec { 'set selinux to permissive on boot':
558         command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
559         onlyif  => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
560         path    => ['/usr/bin', '/usr/sbin'],
561       }
562
563       exec { 'set selinux to permissive':
564         command => 'setenforce 0',
565         onlyif  => "which setenforce && getenforce | grep -i 'enforcing'",
566         path    => ['/usr/bin', '/usr/sbin'],
567       } -> Class['ceph::profile::osd']
568     }
569
570     include ::ceph::conf
571     include ::ceph::profile::osd
572   }
573
574   if str2bool(hiera('enable_external_ceph', false)) {
575     if str2bool(hiera('ceph_ipv6', false)) {
576       $mon_host = hiera('ceph_mon_host_v6')
577     } else {
578       $mon_host = hiera('ceph_mon_host')
579     }
580     class { '::ceph::profile::params':
581       mon_host            => $mon_host,
582     }
583     include ::ceph::conf
584     include ::ceph::profile::client
585   }
586
587
588 } #END STEP 2
589
590 if hiera('step') >= 3 {
591
592   class { '::keystone':
593     sync_db          => $sync_db,
594     manage_service   => false,
595     enabled          => false,
596     enable_bootstrap => $pacemaker_master,
597   }
598   include ::keystone::config
599
600   #TODO: need a cleanup-keystone-tokens.sh solution here
601
602   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
603     ensure  => 'directory',
604     owner   => 'keystone',
605     group   => 'keystone',
606     require => Package['keystone'],
607   }
608   file { '/etc/keystone/ssl/certs/signing_cert.pem':
609     content => hiera('keystone_signing_certificate'),
610     owner   => 'keystone',
611     group   => 'keystone',
612     notify  => Service['keystone'],
613     require => File['/etc/keystone/ssl/certs'],
614   }
615   file { '/etc/keystone/ssl/private/signing_key.pem':
616     content => hiera('keystone_signing_key'),
617     owner   => 'keystone',
618     group   => 'keystone',
619     notify  => Service['keystone'],
620     require => File['/etc/keystone/ssl/private'],
621   }
622   file { '/etc/keystone/ssl/certs/ca.pem':
623     content => hiera('keystone_ca_certificate'),
624     owner   => 'keystone',
625     group   => 'keystone',
626     notify  => Service['keystone'],
627     require => File['/etc/keystone/ssl/certs'],
628   }
629
630   $glance_backend = downcase(hiera('glance_backend', 'swift'))
631   case $glance_backend {
632       'swift': { $backend_store = 'glance.store.swift.Store' }
633       'file': { $backend_store = 'glance.store.filesystem.Store' }
634       'rbd': { $backend_store = 'glance.store.rbd.Store' }
635       default: { fail('Unrecognized glance_backend parameter.') }
636   }
637   $http_store = ['glance.store.http.Store']
638   $glance_store = concat($http_store, $backend_store)
639
640   if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
641     $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
642     pacemaker::resource::filesystem { 'glance-fs':
643       device       => hiera('glance_file_pcmk_device'),
644       directory    => hiera('glance_file_pcmk_directory'),
645       fstype       => hiera('glance_file_pcmk_fstype'),
646       fsoptions    => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
647       clone_params => '',
648     }
649   }
650
651   # TODO: notifications, scrubber, etc.
652   include ::glance
653   include ::glance::config
654   class { '::glance::api':
655     known_stores   => $glance_store,
656     manage_service => false,
657     enabled        => false,
658   }
659   class { '::glance::registry' :
660     sync_db        => $sync_db,
661     manage_service => false,
662     enabled        => false,
663   }
664   include ::glance::notify::rabbitmq
665   include join(['::glance::backend::', $glance_backend])
666
667   $nova_ipv6 = hiera('nova::use_ipv6', false)
668   if $nova_ipv6 {
669     $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
670   } else {
671     $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
672   }
673
674   class { '::nova' :
675     memcached_servers => $memcached_servers
676   }
677
678   include ::nova::config
679
680   class { '::nova::api' :
681     sync_db        => $sync_db,
682     sync_db_api    => $sync_db,
683     manage_service => false,
684     enabled        => false,
685   }
686   class { '::nova::cert' :
687     manage_service => false,
688     enabled        => false,
689   }
690   class { '::nova::conductor' :
691     manage_service => false,
692     enabled        => false,
693   }
694   class { '::nova::consoleauth' :
695     manage_service => false,
696     enabled        => false,
697   }
698   class { '::nova::vncproxy' :
699     manage_service => false,
700     enabled        => false,
701   }
702   include ::nova::scheduler::filter
703   class { '::nova::scheduler' :
704     manage_service => false,
705     enabled        => false,
706   }
707   include ::nova::network::neutron
708
709   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
710
711     # TODO(devvesa) provide non-controller ips for these services
712     $zookeeper_node_ips = hiera('neutron_api_node_ips')
713     $cassandra_node_ips = hiera('neutron_api_node_ips')
714
715     # Run zookeeper in the controller if configured
716     if hiera('enable_zookeeper_on_controller') {
717       class {'::tripleo::cluster::zookeeper':
718         zookeeper_server_ips => $zookeeper_node_ips,
719         # TODO: create a 'bind' hiera key for zookeeper
720         zookeeper_client_ip  => hiera('neutron::bind_host'),
721         zookeeper_hostnames  => split(hiera('controller_node_names'), ',')
722       }
723     }
724
725     # Run cassandra in the controller if configured
726     if hiera('enable_cassandra_on_controller') {
727       class {'::tripleo::cluster::cassandra':
728         cassandra_servers => $cassandra_node_ips,
729         # TODO: create a 'bind' hiera key for cassandra
730         cassandra_ip      => hiera('neutron::bind_host'),
731       }
732     }
733
734     class {'::tripleo::network::midonet::agent':
735       zookeeper_servers => $zookeeper_node_ips,
736       cassandra_seeds   => $cassandra_node_ips
737     }
738
739     class {'::tripleo::network::midonet::api':
740       zookeeper_servers    => $zookeeper_node_ips,
741       vip                  => hiera('tripleo::loadbalancer::public_virtual_ip'),
742       keystone_ip          => hiera('tripleo::loadbalancer::public_virtual_ip'),
743       keystone_admin_token => hiera('keystone::admin_token'),
744       # TODO: create a 'bind' hiera key for api
745       bind_address         => hiera('neutron::bind_host'),
746       admin_password       => hiera('admin_password')
747     }
748
749     # Configure Neutron
750     class {'::neutron':
751       service_plugins => []
752     }
753
754   }
755   else {
756     # Neutron class definitions
757     include ::neutron
758   }
759
760   include ::neutron::config
761   class { '::neutron::server' :
762     sync_db        => $sync_db,
763     manage_service => false,
764     enabled        => false,
765   }
766   include ::neutron::server::notifications
767   if  hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
768     include ::neutron::plugins::nuage
769   }
770   if  hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
771     include ::neutron::plugins::opencontrail
772   }
773   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
774     class {'::neutron::plugins::midonet':
775       midonet_api_ip    => hiera('tripleo::loadbalancer::public_virtual_ip'),
776       keystone_tenant   => hiera('neutron::server::auth_tenant'),
777       keystone_password => hiera('neutron::server::auth_password')
778     }
779   }
780   if hiera('neutron::enable_dhcp_agent',true) {
781     class { '::neutron::agents::dhcp' :
782       manage_service => false,
783       enabled        => false,
784     }
785     file { '/etc/neutron/dnsmasq-neutron.conf':
786       content => hiera('neutron_dnsmasq_options'),
787       owner   => 'neutron',
788       group   => 'neutron',
789       notify  => Service['neutron-dhcp-service'],
790       require => Package['neutron'],
791     }
792   }
793   if hiera('neutron::enable_l3_agent',true) {
794     class { '::neutron::agents::l3' :
795       manage_service => false,
796       enabled        => false,
797     }
798   }
799   if hiera('neutron::enable_metadata_agent',true) {
800     class { '::neutron::agents::metadata':
801       manage_service => false,
802       enabled        => false,
803     }
804   }
805   include ::neutron::plugins::ml2
806   class { '::neutron::agents::ml2::ovs':
807     manage_service => false,
808     enabled        => false,
809   }
810
811   if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
812     include ::neutron::plugins::ml2::cisco::ucsm
813   }
814   if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
815     include ::neutron::plugins::ml2::cisco::nexus
816     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
817   }
818   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
819     include ::neutron::plugins::ml2::cisco::nexus1000v
820
821     class { '::neutron::agents::n1kv_vem':
822       n1kv_source  => hiera('n1kv_vem_source', undef),
823       n1kv_version => hiera('n1kv_vem_version', undef),
824     }
825
826     class { '::n1k_vsm':
827       n1kv_source  => hiera('n1kv_vsm_source', undef),
828       n1kv_version => hiera('n1kv_vsm_version', undef),
829     }
830   }
831
832   if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
833     include ::neutron::plugins::ml2::bigswitch::restproxy
834     include ::neutron::agents::bigswitch
835   }
836   neutron_l3_agent_config {
837     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
838   }
839   neutron_dhcp_agent_config {
840     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
841   }
842   neutron_config {
843     'DEFAULT/notification_driver': value => 'messaging';
844   }
845
846   include ::cinder
847   include ::cinder::config
848   include ::tripleo::ssl::cinder_config
849   class { '::cinder::api':
850     sync_db        => $sync_db,
851     manage_service => false,
852     enabled        => false,
853   }
854   class { '::cinder::scheduler' :
855     manage_service => false,
856     enabled        => false,
857   }
858   class { '::cinder::volume' :
859     manage_service => false,
860     enabled        => false,
861   }
862   include ::cinder::glance
863   include ::cinder::ceilometer
864   class { '::cinder::setup_test_volume':
865     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
866   }
867
868   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
869   if $cinder_enable_iscsi {
870     $cinder_iscsi_backend = 'tripleo_iscsi'
871
872     cinder::backend::iscsi { $cinder_iscsi_backend :
873       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
874       iscsi_helper     => hiera('cinder_iscsi_helper'),
875     }
876   }
877
878   if $enable_ceph {
879
880     $ceph_pools = hiera('ceph_pools')
881     ceph::pool { $ceph_pools :
882       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
883       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
884       size    => hiera('ceph::profile::params::osd_pool_default_size'),
885     }
886
887     $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
888
889   } else {
890     $cinder_pool_requires = []
891   }
892
893   if hiera('cinder_enable_rbd_backend', false) {
894     $cinder_rbd_backend = 'tripleo_ceph'
895
896     cinder::backend::rbd { $cinder_rbd_backend :
897       rbd_pool        => hiera('cinder_rbd_pool_name'),
898       rbd_user        => hiera('ceph_client_user_name'),
899       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
900       require         => $cinder_pool_requires,
901     }
902   }
903
904   if hiera('cinder_enable_eqlx_backend', false) {
905     $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
906
907     cinder::backend::eqlx { $cinder_eqlx_backend :
908       volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
909       san_ip              => hiera('cinder::backend::eqlx::san_ip', undef),
910       san_login           => hiera('cinder::backend::eqlx::san_login', undef),
911       san_password        => hiera('cinder::backend::eqlx::san_password', undef),
912       san_thin_provision  => hiera('cinder::backend::eqlx::san_thin_provision', undef),
913       eqlx_group_name     => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
914       eqlx_pool           => hiera('cinder::backend::eqlx::eqlx_pool', undef),
915       eqlx_use_chap       => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
916       eqlx_chap_login     => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
917       eqlx_chap_password  => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
918     }
919   }
920
921   if hiera('cinder_enable_dellsc_backend', false) {
922     $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
923
924     cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
925       volume_backend_name   => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
926       san_ip                => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
927       san_login             => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
928       san_password          => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
929       dell_sc_ssn           => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
930       iscsi_ip_address      => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
931       iscsi_port            => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
932       dell_sc_api_port      => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
933       dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
934       dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
935     }
936   }
937
938   if hiera('cinder_enable_netapp_backend', false) {
939     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
940
941     if hiera('cinder::backend::netapp::nfs_shares', undef) {
942       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
943     }
944
945     cinder::backend::netapp { $cinder_netapp_backend :
946       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
947       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
948       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
949       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
950       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
951       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
952       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
953       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
954       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
955       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
956       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
957       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
958       nfs_shares                   => $cinder_netapp_nfs_shares,
959       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
960       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
961       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
962       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
963       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
964       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
965       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
966     }
967   }
968
969   if hiera('cinder_enable_nfs_backend', false) {
970     $cinder_nfs_backend = 'tripleo_nfs'
971
972     if str2bool($::selinux) {
973       selboolean { 'virt_use_nfs':
974         value      => on,
975         persistent => true,
976       } -> Package['nfs-utils']
977     }
978
979     package { 'nfs-utils': } ->
980     cinder::backend::nfs { $cinder_nfs_backend:
981       nfs_servers       => hiera('cinder_nfs_servers'),
982       nfs_mount_options => hiera('cinder_nfs_mount_options',''),
983       nfs_shares_config => '/etc/cinder/shares-nfs.conf',
984     }
985   }
986
987   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
988   class { '::cinder::backends' :
989     enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
990   }
991
992   class { '::sahara':
993     sync_db => $sync_db,
994   }
995   class { '::sahara::service::api':
996     manage_service => false,
997     enabled        => false,
998   }
999   class { '::sahara::service::engine':
1000     manage_service => false,
1001     enabled        => false,
1002   }
1003
1004   # swift proxy
1005   class { '::swift::proxy' :
1006     manage_service => $non_pcmk_start,
1007     enabled        => $non_pcmk_start,
1008   }
1009   include ::swift::proxy::proxy_logging
1010   include ::swift::proxy::healthcheck
1011   include ::swift::proxy::cache
1012   include ::swift::proxy::keystone
1013   include ::swift::proxy::authtoken
1014   include ::swift::proxy::staticweb
1015   include ::swift::proxy::ratelimit
1016   include ::swift::proxy::catch_errors
1017   include ::swift::proxy::tempurl
1018   include ::swift::proxy::formpost
1019
1020   # swift storage
1021   if str2bool(hiera('enable_swift_storage', true)) {
1022     class {'::swift::storage::all':
1023       mount_check => str2bool(hiera('swift_mount_check')),
1024     }
1025     class {'::swift::storage::account':
1026       manage_service => $non_pcmk_start,
1027       enabled        => $non_pcmk_start,
1028     }
1029     class {'::swift::storage::container':
1030       manage_service => $non_pcmk_start,
1031       enabled        => $non_pcmk_start,
1032     }
1033     class {'::swift::storage::object':
1034       manage_service => $non_pcmk_start,
1035       enabled        => $non_pcmk_start,
1036     }
1037     if(!defined(File['/srv/node'])) {
1038       file { '/srv/node':
1039         ensure  => directory,
1040         owner   => 'swift',
1041         group   => 'swift',
1042         require => Package['openstack-swift'],
1043       }
1044     }
1045     $swift_components = ['account', 'container', 'object']
1046     swift::storage::filter::recon { $swift_components : }
1047     swift::storage::filter::healthcheck { $swift_components : }
1048   }
1049
1050   # Ceilometer
1051   case downcase(hiera('ceilometer_backend')) {
1052     /mysql/: {
1053       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
1054     }
1055     default: {
1056       $mongo_node_string = join($mongo_node_ips_with_port, ',')
1057       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
1058     }
1059   }
1060   include ::ceilometer
1061   include ::ceilometer::config
1062   class { '::ceilometer::api' :
1063     manage_service => false,
1064     enabled        => false,
1065   }
1066   class { '::ceilometer::agent::notification' :
1067     manage_service => false,
1068     enabled        => false,
1069   }
1070   class { '::ceilometer::agent::central' :
1071     manage_service => false,
1072     enabled        => false,
1073   }
1074   class { '::ceilometer::collector' :
1075     manage_service => false,
1076     enabled        => false,
1077   }
1078   include ::ceilometer::expirer
1079   class { '::ceilometer::db' :
1080     database_connection => $ceilometer_database_connection,
1081     sync_db             => $sync_db,
1082   }
1083   include ::ceilometer::agent::auth
1084
1085   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
1086
1087   # Heat
1088   include ::heat::config
1089   class { '::heat' :
1090     sync_db             => $sync_db,
1091     notification_driver => 'messaging',
1092   }
1093   class { '::heat::api' :
1094     manage_service => false,
1095     enabled        => false,
1096   }
1097   class { '::heat::api_cfn' :
1098     manage_service => false,
1099     enabled        => false,
1100   }
1101   class { '::heat::api_cloudwatch' :
1102     manage_service => false,
1103     enabled        => false,
1104   }
1105   class { '::heat::engine' :
1106     manage_service => false,
1107     enabled        => false,
1108   }
1109
1110   # httpd/apache and horizon
1111   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
1112   class { '::apache' :
1113     service_enable => false,
1114     # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
1115   }
1116   include ::keystone::wsgi::apache
1117   include ::apache::mod::status
1118   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1119     $_profile_support = 'cisco'
1120   } else {
1121     $_profile_support = 'None'
1122   }
1123   $neutron_options   = {'profile_support' => $_profile_support }
1124   class { '::horizon':
1125     cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
1126     neutron_options => $neutron_options,
1127   }
1128
1129   $snmpd_user = hiera('snmpd_readonly_user_name')
1130   snmp::snmpv3_user { $snmpd_user:
1131     authtype => 'MD5',
1132     authpass => hiera('snmpd_readonly_user_password'),
1133   }
1134   class { '::snmp':
1135     agentaddress => ['udp:161','udp6:[::1]:161'],
1136     snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
1137   }
1138
1139   hiera_include('controller_classes')
1140
1141 } #END STEP 3
1142
1143 if hiera('step') >= 4 {
1144   $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
1145   $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
1146   $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
1147   $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
1148
1149   if $keystone_enable_db_purge {
1150     include ::keystone::cron::token_flush
1151   }
1152   if $nova_enable_db_purge {
1153     include ::nova::cron::archive_deleted_rows
1154   }
1155   if $cinder_enable_db_purge {
1156     include ::cinder::cron::db_purge
1157   }
1158   if $heat_enable_db_purge {
1159     include ::heat::cron::purge_deleted
1160   }
1161
1162   if $pacemaker_master {
1163
1164     if $enable_load_balancer {
1165       pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
1166         constraint_type => 'order',
1167         first_resource  => 'haproxy-clone',
1168         second_resource => 'openstack-core-clone',
1169         first_action    => 'start',
1170         second_action   => 'start',
1171         require         => [Pacemaker::Resource::Service['haproxy'],
1172                             Pacemaker::Resource::Ocf['openstack-core']],
1173       }
1174     }
1175
1176     pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
1177       constraint_type => 'order',
1178       first_resource  => 'openstack-core-clone',
1179       second_resource => "${::apache::params::service_name}-clone",
1180       first_action    => 'start',
1181       second_action   => 'start',
1182       require         => [Pacemaker::Resource::Service[$::apache::params::service_name],
1183                           Pacemaker::Resource::Ocf['openstack-core']],
1184     }
1185     pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
1186       constraint_type => 'order',
1187       first_resource  => 'rabbitmq-clone',
1188       second_resource => 'openstack-core-clone',
1189       first_action    => 'start',
1190       second_action   => 'start',
1191       require         => [Pacemaker::Resource::Ocf['rabbitmq'],
1192                           Pacemaker::Resource::Ocf['openstack-core']],
1193     }
1194     pacemaker::constraint::base { 'memcached-then-openstack-core-constraint':
1195       constraint_type => 'order',
1196       first_resource  => 'memcached-clone',
1197       second_resource => 'openstack-core-clone',
1198       first_action    => 'start',
1199       second_action   => 'start',
1200       require         => [Pacemaker::Resource::Service['memcached'],
1201                           Pacemaker::Resource::Ocf['openstack-core']],
1202     }
1203     pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
1204       constraint_type => 'order',
1205       first_resource  => 'galera-master',
1206       second_resource => 'openstack-core-clone',
1207       first_action    => 'promote',
1208       second_action   => 'start',
1209       require         => [Pacemaker::Resource::Ocf['galera'],
1210                           Pacemaker::Resource::Ocf['openstack-core']],
1211     }
1212
1213     # Cinder
1214     pacemaker::resource::service { $::cinder::params::api_service :
1215       clone_params => 'interleave=true',
1216       require      => Pacemaker::Resource::Ocf['openstack-core'],
1217     }
1218     pacemaker::resource::service { $::cinder::params::scheduler_service :
1219       clone_params => 'interleave=true',
1220     }
1221     pacemaker::resource::service { $::cinder::params::volume_service : }
1222
1223     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
1224       constraint_type => 'order',
1225       first_resource  => 'openstack-core-clone',
1226       second_resource => "${::cinder::params::api_service}-clone",
1227       first_action    => 'start',
1228       second_action   => 'start',
1229       require         => [Pacemaker::Resource::Ocf['openstack-core'],
1230                           Pacemaker::Resource::Service[$::cinder::params::api_service]],
1231     }
1232     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
1233       constraint_type => 'order',
1234       first_resource  => "${::cinder::params::api_service}-clone",
1235       second_resource => "${::cinder::params::scheduler_service}-clone",
1236       first_action    => 'start',
1237       second_action   => 'start',
1238       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1239                           Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1240     }
1241     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
1242       source  => "${::cinder::params::scheduler_service}-clone",
1243       target  => "${::cinder::params::api_service}-clone",
1244       score   => 'INFINITY',
1245       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1246                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1247     }
1248     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
1249       constraint_type => 'order',
1250       first_resource  => "${::cinder::params::scheduler_service}-clone",
1251       second_resource => $::cinder::params::volume_service,
1252       first_action    => 'start',
1253       second_action   => 'start',
1254       require         => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1255                           Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1256     }
1257     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
1258       source  => $::cinder::params::volume_service,
1259       target  => "${::cinder::params::scheduler_service}-clone",
1260       score   => 'INFINITY',
1261       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1262                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1263     }
1264
1265     # Sahara
1266     pacemaker::resource::service { $::sahara::params::api_service_name :
1267       clone_params => 'interleave=true',
1268       require      => Pacemaker::Resource::Ocf['openstack-core'],
1269     }
1270     pacemaker::resource::service { $::sahara::params::engine_service_name :
1271       clone_params => 'interleave=true',
1272     }
1273     pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
1274       constraint_type => 'order',
1275       first_resource  => 'openstack-core-clone',
1276       second_resource => "${::sahara::params::api_service_name}-clone",
1277       first_action    => 'start',
1278       second_action   => 'start',
1279       require         => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1280                           Pacemaker::Resource::Ocf['openstack-core']],
1281     }
1282
1283     # Glance
1284     pacemaker::resource::service { $::glance::params::registry_service_name :
1285       clone_params => 'interleave=true',
1286       require      => Pacemaker::Resource::Ocf['openstack-core'],
1287     }
1288     pacemaker::resource::service { $::glance::params::api_service_name :
1289       clone_params => 'interleave=true',
1290     }
1291
1292     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
1293       constraint_type => 'order',
1294       first_resource  => 'openstack-core-clone',
1295       second_resource => "${::glance::params::registry_service_name}-clone",
1296       first_action    => 'start',
1297       second_action   => 'start',
1298       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1299                           Pacemaker::Resource::Ocf['openstack-core']],
1300     }
1301     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
1302       constraint_type => 'order',
1303       first_resource  => "${::glance::params::registry_service_name}-clone",
1304       second_resource => "${::glance::params::api_service_name}-clone",
1305       first_action    => 'start',
1306       second_action   => 'start',
1307       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1308                           Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1309     }
1310     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
1311       source  => "${::glance::params::api_service_name}-clone",
1312       target  => "${::glance::params::registry_service_name}-clone",
1313       score   => 'INFINITY',
1314       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1315                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1316     }
1317
1318     if hiera('step') == 4 {
1319       # Neutron
1320       # NOTE(gfidente): Neutron will try to populate the database with some data
1321       # as soon as neutron-server is started; to avoid races we want to make this
1322       # happen only on one node, before normal Pacemaker initialization
1323       # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1324       # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
1325       # will try to start the service while it's already started by Pacemaker
1326       # It would result to a deployment failure since systemd would return 1 to Puppet
1327       # and the overcloud would fail to deploy (6 would be returned).
1328       # This conditional prevents from a race condition during the deployment.
1329       # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
1330       exec { 'neutron-server-systemd-start-sleep' :
1331         command => 'systemctl start neutron-server && /usr/bin/sleep 5',
1332         path    => '/usr/bin',
1333         unless  => '/sbin/pcs resource show neutron-server',
1334       } ->
1335       pacemaker::resource::service { $::neutron::params::server_service:
1336         clone_params => 'interleave=true',
1337         require      => Pacemaker::Resource::Ocf['openstack-core']
1338       }
1339     } else {
1340       pacemaker::resource::service { $::neutron::params::server_service:
1341         clone_params => 'interleave=true',
1342         require      => Pacemaker::Resource::Ocf['openstack-core']
1343       }
1344     }
1345     if hiera('neutron::enable_l3_agent', true) {
1346       pacemaker::resource::service { $::neutron::params::l3_agent_service:
1347         clone_params => 'interleave=true',
1348       }
1349     }
1350     if hiera('neutron::enable_dhcp_agent', true) {
1351       pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1352         clone_params => 'interleave=true',
1353       }
1354     }
1355     if hiera('neutron::enable_ovs_agent', true) {
1356       pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1357         clone_params => 'interleave=true',
1358       }
1359     }
1360     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1361       pacemaker::resource::service {'tomcat':
1362         clone_params => 'interleave=true',
1363       }
1364     }
1365     if hiera('neutron::enable_metadata_agent', true) {
1366       pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1367         clone_params => 'interleave=true',
1368       }
1369     }
1370     if hiera('neutron::enable_ovs_agent', true) {
1371       pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1372         ocf_agent_name => 'neutron:OVSCleanup',
1373         clone_params   => 'interleave=true',
1374       }
1375       pacemaker::resource::ocf { 'neutron-netns-cleanup':
1376         ocf_agent_name => 'neutron:NetnsCleanup',
1377         clone_params   => 'interleave=true',
1378       }
1379
1380       # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1381       pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1382         constraint_type => 'order',
1383         first_resource  => "${::neutron::params::ovs_cleanup_service}-clone",
1384         second_resource => 'neutron-netns-cleanup-clone',
1385         first_action    => 'start',
1386         second_action   => 'start',
1387         require         => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1388                             Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1389       }
1390       pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1391         source  => 'neutron-netns-cleanup-clone',
1392         target  => "${::neutron::params::ovs_cleanup_service}-clone",
1393         score   => 'INFINITY',
1394         require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1395                     Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1396       }
1397       pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1398         constraint_type => 'order',
1399         first_resource  => 'neutron-netns-cleanup-clone',
1400         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1401         first_action    => 'start',
1402         second_action   => 'start',
1403         require         => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1404                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1405       }
1406       pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1407         source  => "${::neutron::params::ovs_agent_service}-clone",
1408         target  => 'neutron-netns-cleanup-clone',
1409         score   => 'INFINITY',
1410         require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1411                     Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1412       }
1413     }
1414     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1415       constraint_type => 'order',
1416       first_resource  => 'openstack-core-clone',
1417       second_resource => "${::neutron::params::server_service}-clone",
1418       first_action    => 'start',
1419       second_action   => 'start',
1420       require         => [Pacemaker::Resource::Ocf['openstack-core'],
1421                           Pacemaker::Resource::Service[$::neutron::params::server_service]],
1422     }
1423     if hiera('neutron::enable_ovs_agent',true) {
1424       pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1425         constraint_type => 'order',
1426         first_resource  => "${::neutron::params::ovs_agent_service}-clone",
1427         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1428         first_action    => 'start',
1429         second_action   => 'start',
1430         require         => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1431                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1432       }
1433     }
1434     if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
1435       pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
1436         constraint_type => 'order',
1437         first_resource  => "${::neutron::params::server_service}-clone",
1438         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1439         first_action    => 'start',
1440         second_action   => 'start',
1441         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1442                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1443     }
1444
1445       pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1446         source  => "${::neutron::params::dhcp_agent_service}-clone",
1447         target  => "${::neutron::params::ovs_agent_service}-clone",
1448         score   => 'INFINITY',
1449         require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1450                     Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1451       }
1452     }
1453     if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) {
1454       pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1455         constraint_type => 'order',
1456         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1457         second_resource => "${::neutron::params::l3_agent_service}-clone",
1458         first_action    => 'start',
1459         second_action   => 'start',
1460         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1461                             Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1462       }
1463       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1464         source  => "${::neutron::params::l3_agent_service}-clone",
1465         target  => "${::neutron::params::dhcp_agent_service}-clone",
1466         score   => 'INFINITY',
1467         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1468                     Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1469       }
1470     }
1471     if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
1472       pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1473         constraint_type => 'order',
1474         first_resource  => "${::neutron::params::l3_agent_service}-clone",
1475         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1476         first_action    => 'start',
1477         second_action   => 'start',
1478         require         => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1479                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1480       }
1481       pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1482         source  => "${::neutron::params::metadata_agent_service}-clone",
1483         target  => "${::neutron::params::l3_agent_service}-clone",
1484         score   => 'INFINITY',
1485         require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1486                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1487       }
1488     }
1489     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1490       #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
1491       pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
1492         constraint_type => 'order',
1493         first_resource  => "${::neutron::params::server_service}-clone",
1494         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1495         first_action    => 'start',
1496         second_action   => 'start',
1497         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1498                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1499       }
1500       pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
1501         constraint_type => 'order',
1502         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1503         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1504         first_action    => 'start',
1505         second_action   => 'start',
1506         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1507                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1508       }
1509       pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
1510         constraint_type => 'order',
1511         first_resource  => "${::neutron::params::metadata_agent_service}-clone",
1512         second_resource => 'tomcat-clone',
1513         first_action    => 'start',
1514         second_action   => 'start',
1515         require         => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
1516                             Pacemaker::Resource::Service['tomcat']],
1517       }
1518       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
1519         source  => "${::neutron::params::metadata_agent_service}-clone",
1520         target  => "${::neutron::params::dhcp_agent_service}-clone",
1521         score   => 'INFINITY',
1522         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1523                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1524       }
1525     }
1526
1527     # Nova
1528     pacemaker::resource::service { $::nova::params::api_service_name :
1529       clone_params => 'interleave=true',
1530     }
1531     pacemaker::resource::service { $::nova::params::conductor_service_name :
1532       clone_params => 'interleave=true',
1533     }
1534     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1535       clone_params => 'interleave=true',
1536       require      => Pacemaker::Resource::Ocf['openstack-core'],
1537     }
1538     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1539       clone_params => 'interleave=true',
1540     }
1541     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1542       clone_params => 'interleave=true',
1543     }
1544
1545     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1546       constraint_type => 'order',
1547       first_resource  => 'openstack-core-clone',
1548       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1549       first_action    => 'start',
1550       second_action   => 'start',
1551       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1552                           Pacemaker::Resource::Ocf['openstack-core']],
1553     }
1554     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1555       constraint_type => 'order',
1556       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1557       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1558       first_action    => 'start',
1559       second_action   => 'start',
1560       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1561                           Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1562     }
1563     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1564       source  => "${::nova::params::vncproxy_service_name}-clone",
1565       target  => "${::nova::params::consoleauth_service_name}-clone",
1566       score   => 'INFINITY',
1567       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1568                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1569     }
1570     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1571       constraint_type => 'order',
1572       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1573       second_resource => "${::nova::params::api_service_name}-clone",
1574       first_action    => 'start',
1575       second_action   => 'start',
1576       require         => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1577                           Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1578     }
1579     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1580       source  => "${::nova::params::api_service_name}-clone",
1581       target  => "${::nova::params::vncproxy_service_name}-clone",
1582       score   => 'INFINITY',
1583       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1584                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1585     }
1586     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1587       constraint_type => 'order',
1588       first_resource  => "${::nova::params::api_service_name}-clone",
1589       second_resource => "${::nova::params::scheduler_service_name}-clone",
1590       first_action    => 'start',
1591       second_action   => 'start',
1592       require         => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1593                           Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1594     }
1595     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1596       source  => "${::nova::params::scheduler_service_name}-clone",
1597       target  => "${::nova::params::api_service_name}-clone",
1598       score   => 'INFINITY',
1599       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1600                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1601     }
1602     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1603       constraint_type => 'order',
1604       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1605       second_resource => "${::nova::params::conductor_service_name}-clone",
1606       first_action    => 'start',
1607       second_action   => 'start',
1608       require         => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1609                           Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1610     }
1611     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1612       source  => "${::nova::params::conductor_service_name}-clone",
1613       target  => "${::nova::params::scheduler_service_name}-clone",
1614       score   => 'INFINITY',
1615       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1616                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1617     }
1618
1619     # Ceilometer
1620     case downcase(hiera('ceilometer_backend')) {
1621       /mysql/: {
1622         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1623           clone_params => 'interleave=true',
1624           require      => Pacemaker::Resource::Ocf['openstack-core'],
1625         }
1626       }
1627       default: {
1628         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1629           clone_params => 'interleave=true',
1630           require      => [Pacemaker::Resource::Ocf['openstack-core'],
1631                           Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1632         }
1633       }
1634     }
1635     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1636       clone_params => 'interleave=true',
1637     }
1638     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1639       clone_params => 'interleave=true',
1640     }
1641     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1642       clone_params => 'interleave=true',
1643     }
1644     pacemaker::resource::ocf { 'delay' :
1645       ocf_agent_name  => 'heartbeat:Delay',
1646       clone_params    => 'interleave=true',
1647       resource_params => 'startdelay=10',
1648     }
1649     # Fedora doesn't know `require-all` parameter for constraints yet
1650     if $::operatingsystem == 'Fedora' {
1651       $redis_ceilometer_constraint_params = undef
1652     } else {
1653       $redis_ceilometer_constraint_params = 'require-all=false'
1654     }
1655     pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1656       constraint_type   => 'order',
1657       first_resource    => 'redis-master',
1658       second_resource   => "${::ceilometer::params::agent_central_service_name}-clone",
1659       first_action      => 'promote',
1660       second_action     => 'start',
1661       constraint_params => $redis_ceilometer_constraint_params,
1662       require           => [Pacemaker::Resource::Ocf['redis'],
1663                             Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1664     }
1665     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1666       constraint_type => 'order',
1667       first_resource  => 'openstack-core-clone',
1668       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1669       first_action    => 'start',
1670       second_action   => 'start',
1671       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1672                           Pacemaker::Resource::Ocf['openstack-core']],
1673     }
1674     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1675       constraint_type => 'order',
1676       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1677       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1678       first_action    => 'start',
1679       second_action   => 'start',
1680       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1681                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1682     }
1683     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1684       constraint_type => 'order',
1685       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1686       second_resource => "${::ceilometer::params::api_service_name}-clone",
1687       first_action    => 'start',
1688       second_action   => 'start',
1689       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1690                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1691     }
1692     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1693       source  => "${::ceilometer::params::api_service_name}-clone",
1694       target  => "${::ceilometer::params::collector_service_name}-clone",
1695       score   => 'INFINITY',
1696       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1697                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1698     }
1699     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1700       constraint_type => 'order',
1701       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1702       second_resource => 'delay-clone',
1703       first_action    => 'start',
1704       second_action   => 'start',
1705       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1706                           Pacemaker::Resource::Ocf['delay']],
1707     }
1708     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1709       source  => 'delay-clone',
1710       target  => "${::ceilometer::params::api_service_name}-clone",
1711       score   => 'INFINITY',
1712       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1713                   Pacemaker::Resource::Ocf['delay']],
1714     }
1715     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1716       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1717         constraint_type => 'order',
1718         first_resource  => "${::mongodb::params::service_name}-clone",
1719         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1720         first_action    => 'start',
1721         second_action   => 'start',
1722         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1723                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1724       }
1725     }
1726
1727     # Heat
1728     pacemaker::resource::service { $::heat::params::api_service_name :
1729       clone_params => 'interleave=true',
1730     }
1731     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1732       clone_params => 'interleave=true',
1733     }
1734     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1735       clone_params => 'interleave=true',
1736     }
1737     pacemaker::resource::service { $::heat::params::engine_service_name :
1738       clone_params => 'interleave=true',
1739     }
1740     pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1741       constraint_type => 'order',
1742       first_resource  => 'openstack-core-clone',
1743       second_resource => "${::heat::params::api_service_name}-clone",
1744       first_action    => 'start',
1745       second_action   => 'start',
1746       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1747                           Pacemaker::Resource::Ocf['openstack-core']],
1748     }
1749     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1750       constraint_type => 'order',
1751       first_resource  => "${::heat::params::api_service_name}-clone",
1752       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1753       first_action    => 'start',
1754       second_action   => 'start',
1755       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1756                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1757     }
1758     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1759       source  => "${::heat::params::api_cfn_service_name}-clone",
1760       target  => "${::heat::params::api_service_name}-clone",
1761       score   => 'INFINITY',
1762       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1763                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1764     }
1765     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1766       constraint_type => 'order',
1767       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1768       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1769       first_action    => 'start',
1770       second_action   => 'start',
1771       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1772                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1773     }
1774     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1775       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1776       target  => "${::heat::params::api_cfn_service_name}-clone",
1777       score   => 'INFINITY',
1778       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1779                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1780     }
1781     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1782       constraint_type => 'order',
1783       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1784       second_resource => "${::heat::params::engine_service_name}-clone",
1785       first_action    => 'start',
1786       second_action   => 'start',
1787       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1788                           Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1789     }
1790     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1791       source  => "${::heat::params::engine_service_name}-clone",
1792       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1793       score   => 'INFINITY',
1794       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1795                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1796     }
1797     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1798       constraint_type => 'order',
1799       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1800       second_resource => "${::heat::params::api_service_name}-clone",
1801       first_action    => 'start',
1802       second_action   => 'start',
1803       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1804                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1805     }
1806
1807     # Horizon and Keystone
1808     pacemaker::resource::service { $::apache::params::service_name:
1809       clone_params     => 'interleave=true',
1810       verify_on_create => true,
1811       require          => [File['/etc/keystone/ssl/certs/ca.pem'],
1812       File['/etc/keystone/ssl/private/signing_key.pem'],
1813       File['/etc/keystone/ssl/certs/signing_cert.pem']],
1814     }
1815
1816     #VSM
1817     if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1818       pacemaker::resource::ocf { 'vsm-p' :
1819         ocf_agent_name  => 'heartbeat:VirtualDomain',
1820         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1821         require         => Class['n1k_vsm'],
1822         meta_params     => 'resource-stickiness=INFINITY',
1823       }
1824       if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
1825         pacemaker::resource::ocf { 'vsm-s' :
1826           ocf_agent_name  => 'heartbeat:VirtualDomain',
1827           resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1828           require         => Class['n1k_vsm'],
1829           meta_params     => 'resource-stickiness=INFINITY',
1830         }
1831         pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1832           source  => 'vsm-p',
1833           target  => 'vsm-s',
1834           score   => '-INFINITY',
1835           require => [Pacemaker::Resource::Ocf['vsm-p'],
1836                       Pacemaker::Resource::Ocf['vsm-s']],
1837         }
1838       }
1839     }
1840
1841   }
1842
1843 } #END STEP 4
1844
1845 if hiera('step') >= 5 {
1846
1847   if $pacemaker_master {
1848
1849     class {'::keystone::roles::admin' :
1850       require => Pacemaker::Resource::Service[$::apache::params::service_name],
1851     } ->
1852     class {'::keystone::endpoint' :
1853       require => Pacemaker::Resource::Service[$::apache::params::service_name],
1854     }
1855     include ::heat::keystone::domain
1856     Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
1857
1858   } else {
1859     # On non-master controller we don't need to create Keystone resources again
1860     class { '::heat::keystone::domain':
1861       manage_domain => false,
1862       manage_user   => false,
1863       manage_role   => false,
1864     }
1865   }
1866
1867 } #END STEP 5
1868
1869 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1870 package_manifest{$package_manifest_name: ensure => present}