Merge "Upgrades: initialization command/snippet"
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 include ::tripleo::packages
22 include ::tripleo::firewall
23
24 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
25   $pacemaker_master = true
26   $sync_db = true
27 } else {
28   $pacemaker_master = false
29   $sync_db = false
30 }
31
32 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
33 $enable_load_balancer = hiera('enable_load_balancer', true)
34
35 # When to start and enable services which haven't been Pacemakerized
36 # FIXME: remove when we start all OpenStack services using Pacemaker
37 # (occurences of this variable will be gradually replaced with false)
38 $non_pcmk_start = hiera('step') >= 4
39
40 if hiera('step') >= 1 {
41
42   create_resources(kmod::load, hiera('kernel_modules'), {})
43   create_resources(sysctl::value, hiera('sysctl_settings'), {})
44   Exec <| tag == 'kmod::load' |>  -> Sysctl <| |>
45
46   include ::timezone
47
48   if count(hiera('ntp::servers')) > 0 {
49     include ::ntp
50   }
51
52   $controller_node_ips = split(hiera('controller_node_ips'), ',')
53   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
54   if $enable_load_balancer {
55     class { '::tripleo::loadbalancer' :
56       controller_hosts       => $controller_node_ips,
57       controller_hosts_names => $controller_node_names,
58       manage_vip             => false,
59       mysql_clustercheck     => true,
60       haproxy_service_manage => false,
61     }
62   }
63
64   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
65   $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
66   if $corosync_ipv6 {
67     $cluster_setup_extras = { '--ipv6' => '' }
68   } else {
69     $cluster_setup_extras = {}
70   }
71   user { 'hacluster':
72     ensure => present,
73   } ->
74   class { '::pacemaker':
75     hacluster_pwd => hiera('hacluster_pwd'),
76   } ->
77   class { '::pacemaker::corosync':
78     cluster_members      => $pacemaker_cluster_members,
79     setup_cluster        => $pacemaker_master,
80     cluster_setup_extras => $cluster_setup_extras,
81   }
82   class { '::pacemaker::stonith':
83     disable => !$enable_fencing,
84   }
85   if $enable_fencing {
86     include ::tripleo::fencing
87
88     # enable stonith after all fencing devices have been created
89     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
90   }
91
92   # FIXME(gfidente): sets 200secs as default start timeout op
93   # param; until we can use pcmk global defaults we'll still
94   # need to add it to every resource which redefines op params
95   Pacemaker::Resource::Service {
96     op_params => 'start timeout=200s stop timeout=200s',
97   }
98
99   # Only configure RabbitMQ in this step, don't start it yet to
100   # avoid races where non-master nodes attempt to start without
101   # config (eg. binding on 0.0.0.0)
102   # The module ignores erlang_cookie if cluster_config is false
103   $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
104   if $rabbit_ipv6 {
105       $rabbit_env = merge(hiera('rabbitmq_environment'), {
106         'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
107       })
108   } else {
109     $rabbit_env = hiera('rabbitmq_environment')
110   }
111
112   class { '::rabbitmq':
113     service_manage          => false,
114     tcp_keepalive           => false,
115     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
116     config_variables        => hiera('rabbitmq_config_variables'),
117     environment_variables   => $rabbit_env,
118   } ->
119   file { '/var/lib/rabbitmq/.erlang.cookie':
120     ensure  => file,
121     owner   => 'rabbitmq',
122     group   => 'rabbitmq',
123     mode    => '0400',
124     content => hiera('rabbitmq::erlang_cookie'),
125     replace => true,
126   }
127
128   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
129     include ::mongodb::globals
130     class { '::mongodb::server' :
131       service_manage => false,
132     }
133   }
134
135   # Memcached
136   class {'::memcached' :
137     service_manage => false,
138   }
139
140   # Redis
141   class { '::redis' :
142     service_manage => false,
143     notify_service => false,
144   }
145
146   # Galera
147   if str2bool(hiera('enable_galera', true)) {
148     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
149   } else {
150     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
151   }
152   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
153   $galera_nodes_count = count(split($galera_nodes, ','))
154
155   # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
156   # set bind-address to a hostname instead of an ip address; to move Mysql
157   # from internal_api on another network we'll have to customize both
158   # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
159   $mysql_bind_host = hiera('mysql_bind_host')
160   $mysqld_options = {
161     'mysqld' => {
162       'skip-name-resolve'             => '1',
163       'binlog_format'                 => 'ROW',
164       'default-storage-engine'        => 'innodb',
165       'innodb_autoinc_lock_mode'      => '2',
166       'innodb_locks_unsafe_for_binlog'=> '1',
167       'query_cache_size'              => '0',
168       'query_cache_type'              => '0',
169       'bind-address'                  => $::hostname,
170       'max_connections'               => hiera('mysql_max_connections'),
171       'open_files_limit'              => '-1',
172       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
173       'wsrep_cluster_name'            => 'galera_cluster',
174       'wsrep_slave_threads'           => '1',
175       'wsrep_certify_nonPK'           => '1',
176       'wsrep_max_ws_rows'             => '131072',
177       'wsrep_max_ws_size'             => '1073741824',
178       'wsrep_debug'                   => '0',
179       'wsrep_convert_LOCK_to_trx'     => '0',
180       'wsrep_retry_autocommit'        => '1',
181       'wsrep_auto_increment_control'  => '1',
182       'wsrep_drupal_282555_workaround'=> '0',
183       'wsrep_causal_reads'            => '0',
184       'wsrep_sst_method'              => 'rsync',
185       'wsrep_provider_options'        => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
186     },
187   }
188
189   class { '::mysql::server':
190     create_root_user        => false,
191     create_root_my_cnf      => false,
192     config_file             => $mysql_config_file,
193     override_options        => $mysqld_options,
194     remove_default_accounts => $pacemaker_master,
195     service_manage          => false,
196     service_enabled         => false,
197   }
198
199 }
200
201 if hiera('step') >= 2 {
202
203   # NOTE(gfidente): the following vars are needed on all nodes so they
204   # need to stay out of pacemaker_master conditional
205   $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
206   $mongodb_replset = hiera('mongodb::server::replset')
207
208   if $pacemaker_master {
209
210     if $enable_load_balancer {
211
212       include ::pacemaker::resource_defaults
213
214       # Create an openstack-core dummy resource. See RHBZ 1290121
215       pacemaker::resource::ocf { 'openstack-core':
216         ocf_agent_name => 'heartbeat:Dummy',
217         clone_params   => true,
218       }
219       # FIXME: we should not have to access tripleo::loadbalancer class
220       # parameters here to configure pacemaker VIPs. The configuration
221       # of pacemaker VIPs could move into puppet-tripleo or we should
222       # make use of less specific hiera parameters here for the settings.
223       pacemaker::resource::service { 'haproxy':
224         clone_params => true,
225       }
226
227       $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
228       if is_ipv6_address($control_vip) {
229         $control_vip_netmask = '64'
230       } else {
231         $control_vip_netmask = '32'
232       }
233       pacemaker::resource::ip { 'control_vip':
234         ip_address   => $control_vip,
235         cidr_netmask => $control_vip_netmask,
236       }
237       pacemaker::constraint::base { 'control_vip-then-haproxy':
238         constraint_type   => 'order',
239         first_resource    => "ip-${control_vip}",
240         second_resource   => 'haproxy-clone',
241         first_action      => 'start',
242         second_action     => 'start',
243         constraint_params => 'kind=Optional',
244         require           => [Pacemaker::Resource::Service['haproxy'],
245                               Pacemaker::Resource::Ip['control_vip']],
246       }
247       pacemaker::constraint::colocation { 'control_vip-with-haproxy':
248         source  => "ip-${control_vip}",
249         target  => 'haproxy-clone',
250         score   => 'INFINITY',
251         require => [Pacemaker::Resource::Service['haproxy'],
252                     Pacemaker::Resource::Ip['control_vip']],
253       }
254
255       $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
256       if is_ipv6_address($public_vip) {
257         $public_vip_netmask = '64'
258       } else {
259         $public_vip_netmask = '32'
260       }
261       if $public_vip and $public_vip != $control_vip {
262         pacemaker::resource::ip { 'public_vip':
263           ip_address   => $public_vip,
264           cidr_netmask => $public_vip_netmask,
265         }
266         pacemaker::constraint::base { 'public_vip-then-haproxy':
267           constraint_type   => 'order',
268           first_resource    => "ip-${public_vip}",
269           second_resource   => 'haproxy-clone',
270           first_action      => 'start',
271           second_action     => 'start',
272           constraint_params => 'kind=Optional',
273           require           => [Pacemaker::Resource::Service['haproxy'],
274                                 Pacemaker::Resource::Ip['public_vip']],
275         }
276         pacemaker::constraint::colocation { 'public_vip-with-haproxy':
277           source  => "ip-${public_vip}",
278           target  => 'haproxy-clone',
279           score   => 'INFINITY',
280           require => [Pacemaker::Resource::Service['haproxy'],
281                       Pacemaker::Resource::Ip['public_vip']],
282         }
283       }
284
285       $redis_vip = hiera('redis_vip')
286       if is_ipv6_address($redis_vip) {
287         $redis_vip_netmask = '64'
288       } else {
289         $redis_vip_netmask = '32'
290       }
291       if $redis_vip and $redis_vip != $control_vip {
292         pacemaker::resource::ip { 'redis_vip':
293           ip_address   => $redis_vip,
294           cidr_netmask => $redis_vip_netmask,
295         }
296         pacemaker::constraint::base { 'redis_vip-then-haproxy':
297           constraint_type   => 'order',
298           first_resource    => "ip-${redis_vip}",
299           second_resource   => 'haproxy-clone',
300           first_action      => 'start',
301           second_action     => 'start',
302           constraint_params => 'kind=Optional',
303           require           => [Pacemaker::Resource::Service['haproxy'],
304                                 Pacemaker::Resource::Ip['redis_vip']],
305         }
306         pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
307           source  => "ip-${redis_vip}",
308           target  => 'haproxy-clone',
309           score   => 'INFINITY',
310           require => [Pacemaker::Resource::Service['haproxy'],
311                       Pacemaker::Resource::Ip['redis_vip']],
312         }
313       }
314
315       $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
316       if is_ipv6_address($internal_api_vip) {
317         $internal_api_vip_netmask = '64'
318       } else {
319         $internal_api_vip_netmask = '32'
320       }
321       if $internal_api_vip and $internal_api_vip != $control_vip {
322         pacemaker::resource::ip { 'internal_api_vip':
323           ip_address   => $internal_api_vip,
324           cidr_netmask => $internal_api_vip_netmask,
325         }
326         pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
327           constraint_type   => 'order',
328           first_resource    => "ip-${internal_api_vip}",
329           second_resource   => 'haproxy-clone',
330           first_action      => 'start',
331           second_action     => 'start',
332           constraint_params => 'kind=Optional',
333           require           => [Pacemaker::Resource::Service['haproxy'],
334                                 Pacemaker::Resource::Ip['internal_api_vip']],
335         }
336         pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
337           source  => "ip-${internal_api_vip}",
338           target  => 'haproxy-clone',
339           score   => 'INFINITY',
340           require => [Pacemaker::Resource::Service['haproxy'],
341                       Pacemaker::Resource::Ip['internal_api_vip']],
342         }
343       }
344
345       $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
346       if is_ipv6_address($storage_vip) {
347         $storage_vip_netmask = '64'
348       } else {
349         $storage_vip_netmask = '32'
350       }
351       if $storage_vip and $storage_vip != $control_vip {
352         pacemaker::resource::ip { 'storage_vip':
353           ip_address   => $storage_vip,
354           cidr_netmask => $storage_vip_netmask,
355         }
356         pacemaker::constraint::base { 'storage_vip-then-haproxy':
357           constraint_type   => 'order',
358           first_resource    => "ip-${storage_vip}",
359           second_resource   => 'haproxy-clone',
360           first_action      => 'start',
361           second_action     => 'start',
362           constraint_params => 'kind=Optional',
363           require           => [Pacemaker::Resource::Service['haproxy'],
364                                 Pacemaker::Resource::Ip['storage_vip']],
365         }
366         pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
367           source  => "ip-${storage_vip}",
368           target  => 'haproxy-clone',
369           score   => 'INFINITY',
370           require => [Pacemaker::Resource::Service['haproxy'],
371                       Pacemaker::Resource::Ip['storage_vip']],
372         }
373       }
374
375       $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
376       if is_ipv6_address($storage_mgmt_vip) {
377         $storage_mgmt_vip_netmask = '64'
378       } else {
379         $storage_mgmt_vip_netmask = '32'
380       }
381       if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
382         pacemaker::resource::ip { 'storage_mgmt_vip':
383           ip_address   => $storage_mgmt_vip,
384           cidr_netmask => $storage_mgmt_vip_netmask,
385         }
386         pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
387           constraint_type   => 'order',
388           first_resource    => "ip-${storage_mgmt_vip}",
389           second_resource   => 'haproxy-clone',
390           first_action      => 'start',
391           second_action     => 'start',
392           constraint_params => 'kind=Optional',
393           require           => [Pacemaker::Resource::Service['haproxy'],
394                                 Pacemaker::Resource::Ip['storage_mgmt_vip']],
395         }
396         pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
397           source  => "ip-${storage_mgmt_vip}",
398           target  => 'haproxy-clone',
399           score   => 'INFINITY',
400           require => [Pacemaker::Resource::Service['haproxy'],
401                       Pacemaker::Resource::Ip['storage_mgmt_vip']],
402         }
403       }
404
405     }
406
407     pacemaker::resource::service { $::memcached::params::service_name :
408       clone_params => 'interleave=true',
409       require      => Class['::memcached'],
410     }
411
412     pacemaker::resource::ocf { 'rabbitmq':
413       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
414       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
415       clone_params    => 'ordered=true interleave=true',
416       meta_params     => 'notify=true',
417       require         => Class['::rabbitmq'],
418     }
419
420     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
421       pacemaker::resource::service { $::mongodb::params::service_name :
422         op_params    => 'start timeout=370s stop timeout=200s',
423         clone_params => true,
424         require      => Class['::mongodb::server'],
425       }
426       # NOTE (spredzy) : The replset can only be run
427       # once all the nodes have joined the cluster.
428       mongodb_conn_validator { $mongo_node_ips_with_port :
429         timeout => '600',
430         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
431         before  => Mongodb_replset[$mongodb_replset],
432       }
433       mongodb_replset { $mongodb_replset :
434         members => $mongo_node_ips_with_port,
435       }
436     }
437
438     pacemaker::resource::ocf { 'galera' :
439       ocf_agent_name  => 'heartbeat:galera',
440       op_params       => 'promote timeout=300s on-fail=block',
441       master_params   => '',
442       meta_params     => "master-max=${galera_nodes_count} ordered=true",
443       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
444       require         => Class['::mysql::server'],
445       before          => Exec['galera-ready'],
446     }
447
448     pacemaker::resource::ocf { 'redis':
449       ocf_agent_name  => 'heartbeat:redis',
450       master_params   => '',
451       meta_params     => 'notify=true ordered=true interleave=true',
452       resource_params => 'wait_last_known_master=true',
453       require         => Class['::redis'],
454     }
455
456   }
457
458   exec { 'galera-ready' :
459     command     => '/usr/bin/clustercheck >/dev/null',
460     timeout     => 30,
461     tries       => 180,
462     try_sleep   => 10,
463     environment => ['AVAILABLE_WHEN_READONLY=0'],
464     require     => File['/etc/sysconfig/clustercheck'],
465   }
466
467   file { '/etc/sysconfig/clustercheck' :
468     ensure  => file,
469     content => "MYSQL_USERNAME=root\n
470 MYSQL_PASSWORD=''\n
471 MYSQL_HOST=localhost\n",
472   }
473
474   xinetd::service { 'galera-monitor' :
475     port           => '9200',
476     server         => '/usr/bin/clustercheck',
477     per_source     => 'UNLIMITED',
478     log_on_success => '',
479     log_on_failure => 'HOST',
480     flags          => 'REUSE',
481     service_type   => 'UNLISTED',
482     user           => 'root',
483     group          => 'root',
484     require        => File['/etc/sysconfig/clustercheck'],
485   }
486
487   # Create all the database schemas
488   if $sync_db {
489     class { '::keystone::db::mysql':
490       require => Exec['galera-ready'],
491     }
492     class { '::glance::db::mysql':
493       require => Exec['galera-ready'],
494     }
495     class { '::nova::db::mysql':
496       require => Exec['galera-ready'],
497     }
498     class { '::nova::db::mysql_api':
499       require => Exec['galera-ready'],
500     }
501     class { '::neutron::db::mysql':
502       require => Exec['galera-ready'],
503     }
504     class { '::cinder::db::mysql':
505       require => Exec['galera-ready'],
506     }
507     class { '::heat::db::mysql':
508       require => Exec['galera-ready'],
509     }
510
511     if downcase(hiera('ceilometer_backend')) == 'mysql' {
512       class { '::ceilometer::db::mysql':
513         require => Exec['galera-ready'],
514       }
515     }
516
517     class { '::sahara::db::mysql':
518       require       => Exec['galera-ready'],
519     }
520   }
521
522   # pre-install swift here so we can build rings
523   include ::swift
524
525   # Ceph
526   $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
527
528   if $enable_ceph {
529     class { '::ceph::profile::params':
530       mon_initial_members => downcase(hiera('ceph_mon_initial_members')),
531     }
532     include ::ceph::conf
533     include ::ceph::profile::mon
534   }
535
536   if str2bool(hiera('enable_ceph_storage', false)) {
537     if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
538       exec { 'set selinux to permissive on boot':
539         command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
540         onlyif  => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
541         path    => ['/usr/bin', '/usr/sbin'],
542       }
543
544       exec { 'set selinux to permissive':
545         command => 'setenforce 0',
546         onlyif  => "which setenforce && getenforce | grep -i 'enforcing'",
547         path    => ['/usr/bin', '/usr/sbin'],
548       } -> Class['ceph::profile::osd']
549     }
550
551     include ::ceph::conf
552     include ::ceph::profile::osd
553   }
554
555   if str2bool(hiera('enable_external_ceph', false)) {
556     include ::ceph::conf
557     include ::ceph::profile::client
558   }
559
560
561 } #END STEP 2
562
563 if hiera('step') >= 3 {
564
565   class { '::keystone':
566     sync_db          => $sync_db,
567     manage_service   => false,
568     enabled          => false,
569     enable_bootstrap => $pacemaker_master,
570   }
571   include ::keystone::config
572
573   #TODO: need a cleanup-keystone-tokens.sh solution here
574
575   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
576     ensure  => 'directory',
577     owner   => 'keystone',
578     group   => 'keystone',
579     require => Package['keystone'],
580   }
581   file { '/etc/keystone/ssl/certs/signing_cert.pem':
582     content => hiera('keystone_signing_certificate'),
583     owner   => 'keystone',
584     group   => 'keystone',
585     notify  => Service['keystone'],
586     require => File['/etc/keystone/ssl/certs'],
587   }
588   file { '/etc/keystone/ssl/private/signing_key.pem':
589     content => hiera('keystone_signing_key'),
590     owner   => 'keystone',
591     group   => 'keystone',
592     notify  => Service['keystone'],
593     require => File['/etc/keystone/ssl/private'],
594   }
595   file { '/etc/keystone/ssl/certs/ca.pem':
596     content => hiera('keystone_ca_certificate'),
597     owner   => 'keystone',
598     group   => 'keystone',
599     notify  => Service['keystone'],
600     require => File['/etc/keystone/ssl/certs'],
601   }
602
603   $glance_backend = downcase(hiera('glance_backend', 'swift'))
604   case $glance_backend {
605       'swift': { $backend_store = 'glance.store.swift.Store' }
606       'file': { $backend_store = 'glance.store.filesystem.Store' }
607       'rbd': { $backend_store = 'glance.store.rbd.Store' }
608       default: { fail('Unrecognized glance_backend parameter.') }
609   }
610   $http_store = ['glance.store.http.Store']
611   $glance_store = concat($http_store, $backend_store)
612
613   if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
614     $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
615     pacemaker::resource::filesystem { 'glance-fs':
616       device       => hiera('glance_file_pcmk_device'),
617       directory    => hiera('glance_file_pcmk_directory'),
618       fstype       => hiera('glance_file_pcmk_fstype'),
619       fsoptions    => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
620       clone_params => '',
621     }
622   }
623
624   # TODO: notifications, scrubber, etc.
625   include ::glance
626   include ::glance::config
627   class { '::glance::api':
628     known_stores   => $glance_store,
629     manage_service => false,
630     enabled        => false,
631   }
632   class { '::glance::registry' :
633     sync_db        => $sync_db,
634     manage_service => false,
635     enabled        => false,
636   }
637   include ::glance::notify::rabbitmq
638   include join(['::glance::backend::', $glance_backend])
639
640   $nova_ipv6 = hiera('nova::use_ipv6', false)
641   if $nova_ipv6 {
642     $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
643   } else {
644     $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
645   }
646
647   class { '::nova' :
648     memcached_servers => $memcached_servers
649   }
650
651   include ::nova::config
652
653   class { '::nova::api' :
654     sync_db        => $sync_db,
655     sync_db_api    => $sync_db,
656     manage_service => false,
657     enabled        => false,
658   }
659   class { '::nova::cert' :
660     manage_service => false,
661     enabled        => false,
662   }
663   class { '::nova::conductor' :
664     manage_service => false,
665     enabled        => false,
666   }
667   class { '::nova::consoleauth' :
668     manage_service => false,
669     enabled        => false,
670   }
671   class { '::nova::vncproxy' :
672     manage_service => false,
673     enabled        => false,
674   }
675   include ::nova::scheduler::filter
676   class { '::nova::scheduler' :
677     manage_service => false,
678     enabled        => false,
679   }
680   include ::nova::network::neutron
681
682   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
683
684     # TODO(devvesa) provide non-controller ips for these services
685     $zookeeper_node_ips = hiera('neutron_api_node_ips')
686     $cassandra_node_ips = hiera('neutron_api_node_ips')
687
688     # Run zookeeper in the controller if configured
689     if hiera('enable_zookeeper_on_controller') {
690       class {'::tripleo::cluster::zookeeper':
691         zookeeper_server_ips => $zookeeper_node_ips,
692         # TODO: create a 'bind' hiera key for zookeeper
693         zookeeper_client_ip  => hiera('neutron::bind_host'),
694         zookeeper_hostnames  => split(hiera('controller_node_names'), ',')
695       }
696     }
697
698     # Run cassandra in the controller if configured
699     if hiera('enable_cassandra_on_controller') {
700       class {'::tripleo::cluster::cassandra':
701         cassandra_servers => $cassandra_node_ips,
702         # TODO: create a 'bind' hiera key for cassandra
703         cassandra_ip      => hiera('neutron::bind_host'),
704       }
705     }
706
707     class {'::tripleo::network::midonet::agent':
708       zookeeper_servers => $zookeeper_node_ips,
709       cassandra_seeds   => $cassandra_node_ips
710     }
711
712     class {'::tripleo::network::midonet::api':
713       zookeeper_servers    => $zookeeper_node_ips,
714       vip                  => hiera('tripleo::loadbalancer::public_virtual_ip'),
715       keystone_ip          => hiera('tripleo::loadbalancer::public_virtual_ip'),
716       keystone_admin_token => hiera('keystone::admin_token'),
717       # TODO: create a 'bind' hiera key for api
718       bind_address         => hiera('neutron::bind_host'),
719       admin_password       => hiera('admin_password')
720     }
721
722     # Configure Neutron
723     class {'::neutron':
724       service_plugins => []
725     }
726
727   }
728   else {
729     # Neutron class definitions
730     include ::neutron
731   }
732
733   include ::neutron::config
734   class { '::neutron::server' :
735     sync_db        => $sync_db,
736     manage_service => false,
737     enabled        => false,
738   }
739   include ::neutron::server::notifications
740   if  hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
741     include ::neutron::plugins::nuage
742   }
743   if  hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
744     include ::neutron::plugins::opencontrail
745   }
746   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
747     class {'::neutron::plugins::midonet':
748       midonet_api_ip    => hiera('tripleo::loadbalancer::public_virtual_ip'),
749       keystone_tenant   => hiera('neutron::server::auth_tenant'),
750       keystone_password => hiera('neutron::server::auth_password')
751     }
752   }
753   if hiera('neutron::enable_dhcp_agent',true) {
754     class { '::neutron::agents::dhcp' :
755       manage_service => false,
756       enabled        => false,
757     }
758     file { '/etc/neutron/dnsmasq-neutron.conf':
759       content => hiera('neutron_dnsmasq_options'),
760       owner   => 'neutron',
761       group   => 'neutron',
762       notify  => Service['neutron-dhcp-service'],
763       require => Package['neutron'],
764     }
765   }
766   if hiera('neutron::enable_l3_agent',true) {
767     class { '::neutron::agents::l3' :
768       manage_service => false,
769       enabled        => false,
770     }
771   }
772   if hiera('neutron::enable_metadata_agent',true) {
773     class { '::neutron::agents::metadata':
774       manage_service => false,
775       enabled        => false,
776     }
777   }
778   include ::neutron::plugins::ml2
779   class { '::neutron::agents::ml2::ovs':
780     manage_service => false,
781     enabled        => false,
782   }
783
784   if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
785     include ::neutron::plugins::ml2::cisco::ucsm
786   }
787   if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
788     include ::neutron::plugins::ml2::cisco::nexus
789     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
790   }
791   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
792     include ::neutron::plugins::ml2::cisco::nexus1000v
793
794     class { '::neutron::agents::n1kv_vem':
795       n1kv_source  => hiera('n1kv_vem_source', undef),
796       n1kv_version => hiera('n1kv_vem_version', undef),
797     }
798
799     class { '::n1k_vsm':
800       n1kv_source  => hiera('n1kv_vsm_source', undef),
801       n1kv_version => hiera('n1kv_vsm_version', undef),
802     }
803   }
804
805   if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
806     include ::neutron::plugins::ml2::bigswitch::restproxy
807     include ::neutron::agents::bigswitch
808   }
809   neutron_l3_agent_config {
810     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
811   }
812   neutron_dhcp_agent_config {
813     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
814   }
815   neutron_config {
816     'DEFAULT/notification_driver': value => 'messaging';
817   }
818
819   include ::cinder
820   include ::cinder::config
821   include ::tripleo::ssl::cinder_config
822   class { '::cinder::api':
823     sync_db        => $sync_db,
824     manage_service => false,
825     enabled        => false,
826   }
827   class { '::cinder::scheduler' :
828     manage_service => false,
829     enabled        => false,
830   }
831   class { '::cinder::volume' :
832     manage_service => false,
833     enabled        => false,
834   }
835   include ::cinder::glance
836   include ::cinder::ceilometer
837   class { '::cinder::setup_test_volume':
838     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
839   }
840
841   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
842   if $cinder_enable_iscsi {
843     $cinder_iscsi_backend = 'tripleo_iscsi'
844
845     cinder::backend::iscsi { $cinder_iscsi_backend :
846       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
847       iscsi_helper     => hiera('cinder_iscsi_helper'),
848     }
849   }
850
851   if $enable_ceph {
852
853     $ceph_pools = hiera('ceph_pools')
854     ceph::pool { $ceph_pools :
855       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
856       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
857       size    => hiera('ceph::profile::params::osd_pool_default_size'),
858     }
859
860     $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
861
862   } else {
863     $cinder_pool_requires = []
864   }
865
866   if hiera('cinder_enable_rbd_backend', false) {
867     $cinder_rbd_backend = 'tripleo_ceph'
868
869     cinder::backend::rbd { $cinder_rbd_backend :
870       rbd_pool        => hiera('cinder_rbd_pool_name'),
871       rbd_user        => hiera('ceph_client_user_name'),
872       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
873       require         => $cinder_pool_requires,
874     }
875   }
876
877   if hiera('cinder_enable_eqlx_backend', false) {
878     $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
879
880     cinder::backend::eqlx { $cinder_eqlx_backend :
881       volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
882       san_ip              => hiera('cinder::backend::eqlx::san_ip', undef),
883       san_login           => hiera('cinder::backend::eqlx::san_login', undef),
884       san_password        => hiera('cinder::backend::eqlx::san_password', undef),
885       san_thin_provision  => hiera('cinder::backend::eqlx::san_thin_provision', undef),
886       eqlx_group_name     => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
887       eqlx_pool           => hiera('cinder::backend::eqlx::eqlx_pool', undef),
888       eqlx_use_chap       => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
889       eqlx_chap_login     => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
890       eqlx_chap_password  => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
891     }
892   }
893
894   if hiera('cinder_enable_dellsc_backend', false) {
895     $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
896
897     cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
898       volume_backend_name   => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
899       san_ip                => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
900       san_login             => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
901       san_password          => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
902       dell_sc_ssn           => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
903       iscsi_ip_address      => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
904       iscsi_port            => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
905       dell_sc_api_port      => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
906       dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
907       dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
908     }
909   }
910
911   if hiera('cinder_enable_netapp_backend', false) {
912     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
913
914     if hiera('cinder::backend::netapp::nfs_shares', undef) {
915       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
916     }
917
918     cinder::backend::netapp { $cinder_netapp_backend :
919       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
920       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
921       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
922       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
923       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
924       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
925       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
926       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
927       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
928       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
929       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
930       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
931       nfs_shares                   => $cinder_netapp_nfs_shares,
932       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
933       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
934       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
935       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
936       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
937       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
938       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
939     }
940   }
941
942   if hiera('cinder_enable_nfs_backend', false) {
943     $cinder_nfs_backend = 'tripleo_nfs'
944
945     if str2bool($::selinux) {
946       selboolean { 'virt_use_nfs':
947         value      => on,
948         persistent => true,
949       } -> Package['nfs-utils']
950     }
951
952     package { 'nfs-utils': } ->
953     cinder::backend::nfs { $cinder_nfs_backend:
954       nfs_servers       => hiera('cinder_nfs_servers'),
955       nfs_mount_options => hiera('cinder_nfs_mount_options',''),
956       nfs_shares_config => '/etc/cinder/shares-nfs.conf',
957     }
958   }
959
960   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
961   class { '::cinder::backends' :
962     enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
963   }
964
965   class { '::sahara':
966     sync_db => $sync_db,
967   }
968   class { '::sahara::service::api':
969     manage_service => false,
970     enabled        => false,
971   }
972   class { '::sahara::service::engine':
973     manage_service => false,
974     enabled        => false,
975   }
976
977   # swift proxy
978   class { '::swift::proxy' :
979     manage_service => $non_pcmk_start,
980     enabled        => $non_pcmk_start,
981   }
982   include ::swift::proxy::proxy_logging
983   include ::swift::proxy::healthcheck
984   include ::swift::proxy::cache
985   include ::swift::proxy::keystone
986   include ::swift::proxy::authtoken
987   include ::swift::proxy::staticweb
988   include ::swift::proxy::ratelimit
989   include ::swift::proxy::catch_errors
990   include ::swift::proxy::tempurl
991   include ::swift::proxy::formpost
992
993   # swift storage
994   if str2bool(hiera('enable_swift_storage', true)) {
995     class {'::swift::storage::all':
996       mount_check => str2bool(hiera('swift_mount_check')),
997     }
998     class {'::swift::storage::account':
999       manage_service => $non_pcmk_start,
1000       enabled        => $non_pcmk_start,
1001     }
1002     class {'::swift::storage::container':
1003       manage_service => $non_pcmk_start,
1004       enabled        => $non_pcmk_start,
1005     }
1006     class {'::swift::storage::object':
1007       manage_service => $non_pcmk_start,
1008       enabled        => $non_pcmk_start,
1009     }
1010     if(!defined(File['/srv/node'])) {
1011       file { '/srv/node':
1012         ensure  => directory,
1013         owner   => 'swift',
1014         group   => 'swift',
1015         require => Package['openstack-swift'],
1016       }
1017     }
1018     $swift_components = ['account', 'container', 'object']
1019     swift::storage::filter::recon { $swift_components : }
1020     swift::storage::filter::healthcheck { $swift_components : }
1021   }
1022
1023   # Ceilometer
1024   case downcase(hiera('ceilometer_backend')) {
1025     /mysql/: {
1026       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
1027     }
1028     default: {
1029       $mongo_node_string = join($mongo_node_ips_with_port, ',')
1030       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
1031     }
1032   }
1033   include ::ceilometer
1034   include ::ceilometer::config
1035   class { '::ceilometer::api' :
1036     manage_service => false,
1037     enabled        => false,
1038   }
1039   class { '::ceilometer::agent::notification' :
1040     manage_service => false,
1041     enabled        => false,
1042   }
1043   class { '::ceilometer::agent::central' :
1044     manage_service => false,
1045     enabled        => false,
1046   }
1047   class { '::ceilometer::collector' :
1048     manage_service => false,
1049     enabled        => false,
1050   }
1051   include ::ceilometer::expirer
1052   class { '::ceilometer::db' :
1053     database_connection => $ceilometer_database_connection,
1054     sync_db             => $sync_db,
1055   }
1056   include ::ceilometer::agent::auth
1057
1058   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
1059
1060   # Heat
1061   include ::heat::config
1062   class { '::heat' :
1063     sync_db             => $sync_db,
1064     notification_driver => 'messaging',
1065   }
1066   class { '::heat::api' :
1067     manage_service => false,
1068     enabled        => false,
1069   }
1070   class { '::heat::api_cfn' :
1071     manage_service => false,
1072     enabled        => false,
1073   }
1074   class { '::heat::api_cloudwatch' :
1075     manage_service => false,
1076     enabled        => false,
1077   }
1078   class { '::heat::engine' :
1079     manage_service => false,
1080     enabled        => false,
1081   }
1082
1083   # httpd/apache and horizon
1084   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
1085   class { '::apache' :
1086     service_enable => false,
1087     # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
1088   }
1089   include ::keystone::wsgi::apache
1090   include ::apache::mod::status
1091   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1092     $_profile_support = 'cisco'
1093   } else {
1094     $_profile_support = 'None'
1095   }
1096   $neutron_options   = {'profile_support' => $_profile_support }
1097   class { '::horizon':
1098     cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
1099     neutron_options => $neutron_options,
1100   }
1101
1102   $snmpd_user = hiera('snmpd_readonly_user_name')
1103   snmp::snmpv3_user { $snmpd_user:
1104     authtype => 'MD5',
1105     authpass => hiera('snmpd_readonly_user_password'),
1106   }
1107   class { '::snmp':
1108     agentaddress => ['udp:161','udp6:[::1]:161'],
1109     snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
1110   }
1111
1112   hiera_include('controller_classes')
1113
1114 } #END STEP 3
1115
1116 if hiera('step') >= 4 {
1117   $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
1118   $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
1119   $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
1120   $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
1121
1122   if $keystone_enable_db_purge {
1123     include ::keystone::cron::token_flush
1124   }
1125   if $nova_enable_db_purge {
1126     include ::nova::cron::archive_deleted_rows
1127   }
1128   if $cinder_enable_db_purge {
1129     include ::cinder::cron::db_purge
1130   }
1131   if $heat_enable_db_purge {
1132     include ::heat::cron::purge_deleted
1133   }
1134
1135   if $pacemaker_master {
1136
1137     if $enable_load_balancer {
1138       pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
1139         constraint_type => 'order',
1140         first_resource  => 'haproxy-clone',
1141         second_resource => 'openstack-core-clone',
1142         first_action    => 'start',
1143         second_action   => 'start',
1144         require         => [Pacemaker::Resource::Service['haproxy'],
1145                             Pacemaker::Resource::Ocf['openstack-core']],
1146       }
1147     }
1148
1149     pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
1150       constraint_type => 'order',
1151       first_resource  => 'openstack-core-clone',
1152       second_resource => "${::apache::params::service_name}-clone",
1153       first_action    => 'start',
1154       second_action   => 'start',
1155       require         => [Pacemaker::Resource::Service[$::apache::params::service_name],
1156                           Pacemaker::Resource::Ocf['openstack-core']],
1157     }
1158     pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
1159       constraint_type => 'order',
1160       first_resource  => 'rabbitmq-clone',
1161       second_resource => 'openstack-core-clone',
1162       first_action    => 'start',
1163       second_action   => 'start',
1164       require         => [Pacemaker::Resource::Ocf['rabbitmq'],
1165                           Pacemaker::Resource::Ocf['openstack-core']],
1166     }
1167     pacemaker::constraint::base { 'memcached-then-openstack-core-constraint':
1168       constraint_type => 'order',
1169       first_resource  => 'memcached-clone',
1170       second_resource => 'openstack-core-clone',
1171       first_action    => 'start',
1172       second_action   => 'start',
1173       require         => [Pacemaker::Resource::Service['memcached'],
1174                           Pacemaker::Resource::Ocf['openstack-core']],
1175     }
1176     pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
1177       constraint_type => 'order',
1178       first_resource  => 'galera-master',
1179       second_resource => 'openstack-core-clone',
1180       first_action    => 'promote',
1181       second_action   => 'start',
1182       require         => [Pacemaker::Resource::Ocf['galera'],
1183                           Pacemaker::Resource::Ocf['openstack-core']],
1184     }
1185
1186     # Cinder
1187     pacemaker::resource::service { $::cinder::params::api_service :
1188       clone_params => 'interleave=true',
1189       require      => Pacemaker::Resource::Ocf['openstack-core'],
1190     }
1191     pacemaker::resource::service { $::cinder::params::scheduler_service :
1192       clone_params => 'interleave=true',
1193     }
1194     pacemaker::resource::service { $::cinder::params::volume_service : }
1195
1196     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
1197       constraint_type => 'order',
1198       first_resource  => 'openstack-core-clone',
1199       second_resource => "${::cinder::params::api_service}-clone",
1200       first_action    => 'start',
1201       second_action   => 'start',
1202       require         => [Pacemaker::Resource::Ocf['openstack-core'],
1203                           Pacemaker::Resource::Service[$::cinder::params::api_service]],
1204     }
1205     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
1206       constraint_type => 'order',
1207       first_resource  => "${::cinder::params::api_service}-clone",
1208       second_resource => "${::cinder::params::scheduler_service}-clone",
1209       first_action    => 'start',
1210       second_action   => 'start',
1211       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1212                           Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1213     }
1214     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
1215       source  => "${::cinder::params::scheduler_service}-clone",
1216       target  => "${::cinder::params::api_service}-clone",
1217       score   => 'INFINITY',
1218       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1219                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1220     }
1221     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
1222       constraint_type => 'order',
1223       first_resource  => "${::cinder::params::scheduler_service}-clone",
1224       second_resource => $::cinder::params::volume_service,
1225       first_action    => 'start',
1226       second_action   => 'start',
1227       require         => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1228                           Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1229     }
1230     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
1231       source  => $::cinder::params::volume_service,
1232       target  => "${::cinder::params::scheduler_service}-clone",
1233       score   => 'INFINITY',
1234       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1235                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1236     }
1237
1238     # Sahara
1239     pacemaker::resource::service { $::sahara::params::api_service_name :
1240       clone_params => 'interleave=true',
1241       require      => Pacemaker::Resource::Ocf['openstack-core'],
1242     }
1243     pacemaker::resource::service { $::sahara::params::engine_service_name :
1244       clone_params => 'interleave=true',
1245     }
1246     pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
1247       constraint_type => 'order',
1248       first_resource  => 'openstack-core-clone',
1249       second_resource => "${::sahara::params::api_service_name}-clone",
1250       first_action    => 'start',
1251       second_action   => 'start',
1252       require         => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1253                           Pacemaker::Resource::Ocf['openstack-core']],
1254     }
1255
1256     # Glance
1257     pacemaker::resource::service { $::glance::params::registry_service_name :
1258       clone_params => 'interleave=true',
1259       require      => Pacemaker::Resource::Ocf['openstack-core'],
1260     }
1261     pacemaker::resource::service { $::glance::params::api_service_name :
1262       clone_params => 'interleave=true',
1263     }
1264
1265     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
1266       constraint_type => 'order',
1267       first_resource  => 'openstack-core-clone',
1268       second_resource => "${::glance::params::registry_service_name}-clone",
1269       first_action    => 'start',
1270       second_action   => 'start',
1271       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1272                           Pacemaker::Resource::Ocf['openstack-core']],
1273     }
1274     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
1275       constraint_type => 'order',
1276       first_resource  => "${::glance::params::registry_service_name}-clone",
1277       second_resource => "${::glance::params::api_service_name}-clone",
1278       first_action    => 'start',
1279       second_action   => 'start',
1280       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1281                           Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1282     }
1283     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
1284       source  => "${::glance::params::api_service_name}-clone",
1285       target  => "${::glance::params::registry_service_name}-clone",
1286       score   => 'INFINITY',
1287       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1288                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1289     }
1290
1291     if hiera('step') == 4 {
1292       # Neutron
1293       # NOTE(gfidente): Neutron will try to populate the database with some data
1294       # as soon as neutron-server is started; to avoid races we want to make this
1295       # happen only on one node, before normal Pacemaker initialization
1296       # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1297       # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
1298       # will try to start the service while it's already started by Pacemaker
1299       # It would result to a deployment failure since systemd would return 1 to Puppet
1300       # and the overcloud would fail to deploy (6 would be returned).
1301       # This conditional prevents from a race condition during the deployment.
1302       # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
1303       exec { 'neutron-server-systemd-start-sleep' :
1304         command => 'systemctl start neutron-server && /usr/bin/sleep 5',
1305         path    => '/usr/bin',
1306         unless  => '/sbin/pcs resource show neutron-server',
1307       } ->
1308       pacemaker::resource::service { $::neutron::params::server_service:
1309         clone_params => 'interleave=true',
1310         require      => Pacemaker::Resource::Ocf['openstack-core']
1311       }
1312     } else {
1313       pacemaker::resource::service { $::neutron::params::server_service:
1314         clone_params => 'interleave=true',
1315         require      => Pacemaker::Resource::Ocf['openstack-core']
1316       }
1317     }
1318     if hiera('neutron::enable_l3_agent', true) {
1319       pacemaker::resource::service { $::neutron::params::l3_agent_service:
1320         clone_params => 'interleave=true',
1321       }
1322     }
1323     if hiera('neutron::enable_dhcp_agent', true) {
1324       pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1325         clone_params => 'interleave=true',
1326       }
1327     }
1328     if hiera('neutron::enable_ovs_agent', true) {
1329       pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1330         clone_params => 'interleave=true',
1331       }
1332     }
1333     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1334       pacemaker::resource::service {'tomcat':
1335         clone_params => 'interleave=true',
1336       }
1337     }
1338     if hiera('neutron::enable_metadata_agent', true) {
1339       pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1340         clone_params => 'interleave=true',
1341       }
1342     }
1343     if hiera('neutron::enable_ovs_agent', true) {
1344       pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1345         ocf_agent_name => 'neutron:OVSCleanup',
1346         clone_params   => 'interleave=true',
1347       }
1348       pacemaker::resource::ocf { 'neutron-netns-cleanup':
1349         ocf_agent_name => 'neutron:NetnsCleanup',
1350         clone_params   => 'interleave=true',
1351       }
1352
1353       # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1354       pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1355         constraint_type => 'order',
1356         first_resource  => "${::neutron::params::ovs_cleanup_service}-clone",
1357         second_resource => 'neutron-netns-cleanup-clone',
1358         first_action    => 'start',
1359         second_action   => 'start',
1360         require         => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1361                             Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1362       }
1363       pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1364         source  => 'neutron-netns-cleanup-clone',
1365         target  => "${::neutron::params::ovs_cleanup_service}-clone",
1366         score   => 'INFINITY',
1367         require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1368                     Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1369       }
1370       pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1371         constraint_type => 'order',
1372         first_resource  => 'neutron-netns-cleanup-clone',
1373         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1374         first_action    => 'start',
1375         second_action   => 'start',
1376         require         => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1377                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1378       }
1379       pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1380         source  => "${::neutron::params::ovs_agent_service}-clone",
1381         target  => 'neutron-netns-cleanup-clone',
1382         score   => 'INFINITY',
1383         require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1384                     Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1385       }
1386     }
1387     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1388       constraint_type => 'order',
1389       first_resource  => 'openstack-core-clone',
1390       second_resource => "${::neutron::params::server_service}-clone",
1391       first_action    => 'start',
1392       second_action   => 'start',
1393       require         => [Pacemaker::Resource::Ocf['openstack-core'],
1394                           Pacemaker::Resource::Service[$::neutron::params::server_service]],
1395     }
1396     if hiera('neutron::enable_ovs_agent',true) {
1397       pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1398         constraint_type => 'order',
1399         first_resource  => "${::neutron::params::ovs_agent_service}-clone",
1400         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1401         first_action    => 'start',
1402         second_action   => 'start',
1403         require         => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1404                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1405       }
1406     }
1407     if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
1408       pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
1409         constraint_type => 'order',
1410         first_resource  => "${::neutron::params::server_service}-clone",
1411         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1412         first_action    => 'start',
1413         second_action   => 'start',
1414         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1415                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1416     }
1417
1418       pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1419         source  => "${::neutron::params::dhcp_agent_service}-clone",
1420         target  => "${::neutron::params::ovs_agent_service}-clone",
1421         score   => 'INFINITY',
1422         require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1423                     Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1424       }
1425     }
1426     if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) {
1427       pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1428         constraint_type => 'order',
1429         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1430         second_resource => "${::neutron::params::l3_agent_service}-clone",
1431         first_action    => 'start',
1432         second_action   => 'start',
1433         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1434                             Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1435       }
1436       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1437         source  => "${::neutron::params::l3_agent_service}-clone",
1438         target  => "${::neutron::params::dhcp_agent_service}-clone",
1439         score   => 'INFINITY',
1440         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1441                     Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1442       }
1443     }
1444     if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
1445       pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1446         constraint_type => 'order',
1447         first_resource  => "${::neutron::params::l3_agent_service}-clone",
1448         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1449         first_action    => 'start',
1450         second_action   => 'start',
1451         require         => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1452                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1453       }
1454       pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1455         source  => "${::neutron::params::metadata_agent_service}-clone",
1456         target  => "${::neutron::params::l3_agent_service}-clone",
1457         score   => 'INFINITY',
1458         require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1459                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1460       }
1461     }
1462     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1463       #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
1464       pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
1465         constraint_type => 'order',
1466         first_resource  => "${::neutron::params::server_service}-clone",
1467         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1468         first_action    => 'start',
1469         second_action   => 'start',
1470         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1471                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1472       }
1473       pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
1474         constraint_type => 'order',
1475         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1476         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1477         first_action    => 'start',
1478         second_action   => 'start',
1479         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1480                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1481       }
1482       pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
1483         constraint_type => 'order',
1484         first_resource  => "${::neutron::params::metadata_agent_service}-clone",
1485         second_resource => 'tomcat-clone',
1486         first_action    => 'start',
1487         second_action   => 'start',
1488         require         => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
1489                             Pacemaker::Resource::Service['tomcat']],
1490       }
1491       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
1492         source  => "${::neutron::params::metadata_agent_service}-clone",
1493         target  => "${::neutron::params::dhcp_agent_service}-clone",
1494         score   => 'INFINITY',
1495         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1496                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1497       }
1498     }
1499
1500     # Nova
1501     pacemaker::resource::service { $::nova::params::api_service_name :
1502       clone_params => 'interleave=true',
1503     }
1504     pacemaker::resource::service { $::nova::params::conductor_service_name :
1505       clone_params => 'interleave=true',
1506     }
1507     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1508       clone_params => 'interleave=true',
1509       require      => Pacemaker::Resource::Ocf['openstack-core'],
1510     }
1511     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1512       clone_params => 'interleave=true',
1513     }
1514     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1515       clone_params => 'interleave=true',
1516     }
1517
1518     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1519       constraint_type => 'order',
1520       first_resource  => 'openstack-core-clone',
1521       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1522       first_action    => 'start',
1523       second_action   => 'start',
1524       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1525                           Pacemaker::Resource::Ocf['openstack-core']],
1526     }
1527     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1528       constraint_type => 'order',
1529       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1530       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1531       first_action    => 'start',
1532       second_action   => 'start',
1533       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1534                           Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1535     }
1536     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1537       source  => "${::nova::params::vncproxy_service_name}-clone",
1538       target  => "${::nova::params::consoleauth_service_name}-clone",
1539       score   => 'INFINITY',
1540       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1541                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1542     }
1543     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1544       constraint_type => 'order',
1545       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1546       second_resource => "${::nova::params::api_service_name}-clone",
1547       first_action    => 'start',
1548       second_action   => 'start',
1549       require         => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1550                           Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1551     }
1552     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1553       source  => "${::nova::params::api_service_name}-clone",
1554       target  => "${::nova::params::vncproxy_service_name}-clone",
1555       score   => 'INFINITY',
1556       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1557                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1558     }
1559     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1560       constraint_type => 'order',
1561       first_resource  => "${::nova::params::api_service_name}-clone",
1562       second_resource => "${::nova::params::scheduler_service_name}-clone",
1563       first_action    => 'start',
1564       second_action   => 'start',
1565       require         => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1566                           Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1567     }
1568     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1569       source  => "${::nova::params::scheduler_service_name}-clone",
1570       target  => "${::nova::params::api_service_name}-clone",
1571       score   => 'INFINITY',
1572       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1573                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1574     }
1575     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1576       constraint_type => 'order',
1577       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1578       second_resource => "${::nova::params::conductor_service_name}-clone",
1579       first_action    => 'start',
1580       second_action   => 'start',
1581       require         => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1582                           Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1583     }
1584     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1585       source  => "${::nova::params::conductor_service_name}-clone",
1586       target  => "${::nova::params::scheduler_service_name}-clone",
1587       score   => 'INFINITY',
1588       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1589                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1590     }
1591
1592     # Ceilometer
1593     case downcase(hiera('ceilometer_backend')) {
1594       /mysql/: {
1595         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1596           clone_params => 'interleave=true',
1597           require      => Pacemaker::Resource::Ocf['openstack-core'],
1598         }
1599       }
1600       default: {
1601         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1602           clone_params => 'interleave=true',
1603           require      => [Pacemaker::Resource::Ocf['openstack-core'],
1604                           Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1605         }
1606       }
1607     }
1608     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1609       clone_params => 'interleave=true',
1610     }
1611     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1612       clone_params => 'interleave=true',
1613     }
1614     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1615       clone_params => 'interleave=true',
1616     }
1617     pacemaker::resource::ocf { 'delay' :
1618       ocf_agent_name  => 'heartbeat:Delay',
1619       clone_params    => 'interleave=true',
1620       resource_params => 'startdelay=10',
1621     }
1622     # Fedora doesn't know `require-all` parameter for constraints yet
1623     if $::operatingsystem == 'Fedora' {
1624       $redis_ceilometer_constraint_params = undef
1625     } else {
1626       $redis_ceilometer_constraint_params = 'require-all=false'
1627     }
1628     pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1629       constraint_type   => 'order',
1630       first_resource    => 'redis-master',
1631       second_resource   => "${::ceilometer::params::agent_central_service_name}-clone",
1632       first_action      => 'promote',
1633       second_action     => 'start',
1634       constraint_params => $redis_ceilometer_constraint_params,
1635       require           => [Pacemaker::Resource::Ocf['redis'],
1636                             Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1637     }
1638     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1639       constraint_type => 'order',
1640       first_resource  => 'openstack-core-clone',
1641       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1642       first_action    => 'start',
1643       second_action   => 'start',
1644       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1645                           Pacemaker::Resource::Ocf['openstack-core']],
1646     }
1647     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1648       constraint_type => 'order',
1649       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1650       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1651       first_action    => 'start',
1652       second_action   => 'start',
1653       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1654                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1655     }
1656     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1657       constraint_type => 'order',
1658       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1659       second_resource => "${::ceilometer::params::api_service_name}-clone",
1660       first_action    => 'start',
1661       second_action   => 'start',
1662       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1663                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1664     }
1665     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1666       source  => "${::ceilometer::params::api_service_name}-clone",
1667       target  => "${::ceilometer::params::collector_service_name}-clone",
1668       score   => 'INFINITY',
1669       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1670                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1671     }
1672     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1673       constraint_type => 'order',
1674       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1675       second_resource => 'delay-clone',
1676       first_action    => 'start',
1677       second_action   => 'start',
1678       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1679                           Pacemaker::Resource::Ocf['delay']],
1680     }
1681     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1682       source  => 'delay-clone',
1683       target  => "${::ceilometer::params::api_service_name}-clone",
1684       score   => 'INFINITY',
1685       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1686                   Pacemaker::Resource::Ocf['delay']],
1687     }
1688     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1689       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1690         constraint_type => 'order',
1691         first_resource  => "${::mongodb::params::service_name}-clone",
1692         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1693         first_action    => 'start',
1694         second_action   => 'start',
1695         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1696                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1697       }
1698     }
1699
1700     # Heat
1701     pacemaker::resource::service { $::heat::params::api_service_name :
1702       clone_params => 'interleave=true',
1703     }
1704     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1705       clone_params => 'interleave=true',
1706     }
1707     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1708       clone_params => 'interleave=true',
1709     }
1710     pacemaker::resource::service { $::heat::params::engine_service_name :
1711       clone_params => 'interleave=true',
1712     }
1713     pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1714       constraint_type => 'order',
1715       first_resource  => 'openstack-core-clone',
1716       second_resource => "${::heat::params::api_service_name}-clone",
1717       first_action    => 'start',
1718       second_action   => 'start',
1719       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1720                           Pacemaker::Resource::Ocf['openstack-core']],
1721     }
1722     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1723       constraint_type => 'order',
1724       first_resource  => "${::heat::params::api_service_name}-clone",
1725       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1726       first_action    => 'start',
1727       second_action   => 'start',
1728       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1729                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1730     }
1731     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1732       source  => "${::heat::params::api_cfn_service_name}-clone",
1733       target  => "${::heat::params::api_service_name}-clone",
1734       score   => 'INFINITY',
1735       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1736                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1737     }
1738     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1739       constraint_type => 'order',
1740       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1741       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1742       first_action    => 'start',
1743       second_action   => 'start',
1744       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1745                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1746     }
1747     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1748       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1749       target  => "${::heat::params::api_cfn_service_name}-clone",
1750       score   => 'INFINITY',
1751       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1752                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1753     }
1754     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1755       constraint_type => 'order',
1756       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1757       second_resource => "${::heat::params::engine_service_name}-clone",
1758       first_action    => 'start',
1759       second_action   => 'start',
1760       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1761                           Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1762     }
1763     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1764       source  => "${::heat::params::engine_service_name}-clone",
1765       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1766       score   => 'INFINITY',
1767       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1768                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1769     }
1770     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1771       constraint_type => 'order',
1772       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1773       second_resource => "${::heat::params::api_service_name}-clone",
1774       first_action    => 'start',
1775       second_action   => 'start',
1776       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1777                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1778     }
1779
1780     # Horizon and Keystone
1781     pacemaker::resource::service { $::apache::params::service_name:
1782       clone_params     => 'interleave=true',
1783       verify_on_create => true,
1784       require          => [File['/etc/keystone/ssl/certs/ca.pem'],
1785       File['/etc/keystone/ssl/private/signing_key.pem'],
1786       File['/etc/keystone/ssl/certs/signing_cert.pem']],
1787     }
1788
1789     #VSM
1790     if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1791       pacemaker::resource::ocf { 'vsm-p' :
1792         ocf_agent_name  => 'heartbeat:VirtualDomain',
1793         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1794         require         => Class['n1k_vsm'],
1795         meta_params     => 'resource-stickiness=INFINITY',
1796       }
1797       if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
1798         pacemaker::resource::ocf { 'vsm-s' :
1799           ocf_agent_name  => 'heartbeat:VirtualDomain',
1800           resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1801           require         => Class['n1k_vsm'],
1802           meta_params     => 'resource-stickiness=INFINITY',
1803         }
1804         pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1805           source  => 'vsm-p',
1806           target  => 'vsm-s',
1807           score   => '-INFINITY',
1808           require => [Pacemaker::Resource::Ocf['vsm-p'],
1809                       Pacemaker::Resource::Ocf['vsm-s']],
1810         }
1811       }
1812     }
1813
1814   }
1815
1816 } #END STEP 4
1817
1818 if hiera('step') >= 5 {
1819
1820   if $pacemaker_master {
1821
1822     class {'::keystone::roles::admin' :
1823       require => Pacemaker::Resource::Service[$::apache::params::service_name],
1824     } ->
1825     class {'::keystone::endpoint' :
1826       require => Pacemaker::Resource::Service[$::apache::params::service_name],
1827     }
1828   }
1829
1830 } #END STEP 5
1831
1832 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1833 package_manifest{$package_manifest_name: ensure => present}