Merge "configure horizon with memcached ipv6 when needed"
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 include ::tripleo::packages
22 include ::tripleo::firewall
23
24 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
25   $pacemaker_master = true
26   $sync_db = true
27 } else {
28   $pacemaker_master = false
29   $sync_db = false
30 }
31
32 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
33 $enable_load_balancer = hiera('enable_load_balancer', true)
34
35 # When to start and enable services which haven't been Pacemakerized
36 # FIXME: remove when we start all OpenStack services using Pacemaker
37 # (occurrences of this variable will be gradually replaced with false)
38 $non_pcmk_start = hiera('step') >= 4
39
40 if hiera('step') >= 1 {
41
42   create_resources(kmod::load, hiera('kernel_modules'), {})
43   create_resources(sysctl::value, hiera('sysctl_settings'), {})
44   Exec <| tag == 'kmod::load' |>  -> Sysctl <| |>
45
46   include ::timezone
47
48   if count(hiera('ntp::servers')) > 0 {
49     include ::ntp
50   }
51
52   $controller_node_ips = split(hiera('controller_node_ips'), ',')
53   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
54   if $enable_load_balancer {
55     class { '::tripleo::loadbalancer' :
56       controller_hosts       => $controller_node_ips,
57       controller_hosts_names => $controller_node_names,
58       manage_vip             => false,
59       mysql_clustercheck     => true,
60       haproxy_service_manage => false,
61     }
62   }
63
64   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
65   $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
66   if $corosync_ipv6 {
67     $cluster_setup_extras = { '--ipv6' => '' }
68   } else {
69     $cluster_setup_extras = {}
70   }
71   user { 'hacluster':
72     ensure => present,
73   } ->
74   class { '::pacemaker':
75     hacluster_pwd => hiera('hacluster_pwd'),
76   } ->
77   class { '::pacemaker::corosync':
78     cluster_members      => $pacemaker_cluster_members,
79     setup_cluster        => $pacemaker_master,
80     cluster_setup_extras => $cluster_setup_extras,
81   }
82   class { '::pacemaker::stonith':
83     disable => !$enable_fencing,
84   }
85   if $enable_fencing {
86     include ::tripleo::fencing
87
88     # enable stonith after all fencing devices have been created
89     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
90   }
91
92   # FIXME(gfidente): sets 200secs as default start timeout op
93   # param; until we can use pcmk global defaults we'll still
94   # need to add it to every resource which redefines op params
95   Pacemaker::Resource::Service {
96     op_params => 'start timeout=200s stop timeout=200s',
97   }
98
99   # Only configure RabbitMQ in this step, don't start it yet to
100   # avoid races where non-master nodes attempt to start without
101   # config (eg. binding on 0.0.0.0)
102   # The module ignores erlang_cookie if cluster_config is false
103   $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
104   if $rabbit_ipv6 {
105       $rabbit_env = merge(hiera('rabbitmq_environment'), {
106         'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
107       })
108   } else {
109     $rabbit_env = hiera('rabbitmq_environment')
110   }
111
112   class { '::rabbitmq':
113     service_manage          => false,
114     tcp_keepalive           => false,
115     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
116     config_variables        => hiera('rabbitmq_config_variables'),
117     environment_variables   => $rabbit_env,
118   } ->
119   file { '/var/lib/rabbitmq/.erlang.cookie':
120     ensure  => file,
121     owner   => 'rabbitmq',
122     group   => 'rabbitmq',
123     mode    => '0400',
124     content => hiera('rabbitmq::erlang_cookie'),
125     replace => true,
126   }
127
128   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
129     include ::mongodb::globals
130     include ::mongodb::client
131     class { '::mongodb::server' :
132       service_manage => false,
133     }
134   }
135
136   # Memcached
137   class {'::memcached' :
138     service_manage => false,
139   }
140
141   # Redis
142   class { '::redis' :
143     service_manage => false,
144     notify_service => false,
145   }
146
147   # Galera
148   if str2bool(hiera('enable_galera', true)) {
149     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
150   } else {
151     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
152   }
153   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
154   $galera_nodes_count = count(split($galera_nodes, ','))
155
156   # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
157   # set bind-address to a hostname instead of an ip address; to move Mysql
158   # from internal_api on another network we'll have to customize both
159   # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
160   $mysql_bind_host = hiera('mysql_bind_host')
161   $mysqld_options = {
162     'mysqld' => {
163       'skip-name-resolve'             => '1',
164       'binlog_format'                 => 'ROW',
165       'default-storage-engine'        => 'innodb',
166       'innodb_autoinc_lock_mode'      => '2',
167       'innodb_locks_unsafe_for_binlog'=> '1',
168       'query_cache_size'              => '0',
169       'query_cache_type'              => '0',
170       'bind-address'                  => $::hostname,
171       'max_connections'               => hiera('mysql_max_connections'),
172       'open_files_limit'              => '-1',
173       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
174       'wsrep_cluster_name'            => 'galera_cluster',
175       'wsrep_slave_threads'           => '1',
176       'wsrep_certify_nonPK'           => '1',
177       'wsrep_max_ws_rows'             => '131072',
178       'wsrep_max_ws_size'             => '1073741824',
179       'wsrep_debug'                   => '0',
180       'wsrep_convert_LOCK_to_trx'     => '0',
181       'wsrep_retry_autocommit'        => '1',
182       'wsrep_auto_increment_control'  => '1',
183       'wsrep_drupal_282555_workaround'=> '0',
184       'wsrep_causal_reads'            => '0',
185       'wsrep_sst_method'              => 'rsync',
186       'wsrep_provider_options'        => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
187     },
188   }
189
190   class { '::mysql::server':
191     create_root_user        => false,
192     create_root_my_cnf      => false,
193     config_file             => $mysql_config_file,
194     override_options        => $mysqld_options,
195     remove_default_accounts => $pacemaker_master,
196     service_manage          => false,
197     service_enabled         => false,
198   }
199
200 }
201
202 if hiera('step') >= 2 {
203
204   # NOTE(gfidente): the following vars are needed on all nodes so they
205   # need to stay out of pacemaker_master conditional.
206   # The addresses mangling will hopefully go away when we'll be able to
207   # configure the connection string via hostnames, until then, we need to pass
208   # the list of IPv6 addresses *with* port and without the brackets as 'members'
209   # argument for the 'mongodb_replset' resource.
210   if str2bool(hiera('mongodb::server::ipv6', false)) {
211     $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
212     $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
213     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
214   } else {
215     $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
216     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
217   }
218   $mongodb_replset = hiera('mongodb::server::replset')
219
220   if $pacemaker_master {
221
222     if $enable_load_balancer {
223
224       include ::pacemaker::resource_defaults
225
226       # Create an openstack-core dummy resource. See RHBZ 1290121
227       pacemaker::resource::ocf { 'openstack-core':
228         ocf_agent_name => 'heartbeat:Dummy',
229         clone_params   => true,
230       }
231       # FIXME: we should not have to access tripleo::loadbalancer class
232       # parameters here to configure pacemaker VIPs. The configuration
233       # of pacemaker VIPs could move into puppet-tripleo or we should
234       # make use of less specific hiera parameters here for the settings.
235       pacemaker::resource::service { 'haproxy':
236         clone_params => true,
237       }
238
239       $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
240       if is_ipv6_address($control_vip) {
241         $control_vip_netmask = '64'
242       } else {
243         $control_vip_netmask = '32'
244       }
245       pacemaker::resource::ip { 'control_vip':
246         ip_address   => $control_vip,
247         cidr_netmask => $control_vip_netmask,
248       }
249       pacemaker::constraint::base { 'control_vip-then-haproxy':
250         constraint_type   => 'order',
251         first_resource    => "ip-${control_vip}",
252         second_resource   => 'haproxy-clone',
253         first_action      => 'start',
254         second_action     => 'start',
255         constraint_params => 'kind=Optional',
256         require           => [Pacemaker::Resource::Service['haproxy'],
257                               Pacemaker::Resource::Ip['control_vip']],
258       }
259       pacemaker::constraint::colocation { 'control_vip-with-haproxy':
260         source  => "ip-${control_vip}",
261         target  => 'haproxy-clone',
262         score   => 'INFINITY',
263         require => [Pacemaker::Resource::Service['haproxy'],
264                     Pacemaker::Resource::Ip['control_vip']],
265       }
266
267       $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
268       if is_ipv6_address($public_vip) {
269         $public_vip_netmask = '64'
270       } else {
271         $public_vip_netmask = '32'
272       }
273       if $public_vip and $public_vip != $control_vip {
274         pacemaker::resource::ip { 'public_vip':
275           ip_address   => $public_vip,
276           cidr_netmask => $public_vip_netmask,
277         }
278         pacemaker::constraint::base { 'public_vip-then-haproxy':
279           constraint_type   => 'order',
280           first_resource    => "ip-${public_vip}",
281           second_resource   => 'haproxy-clone',
282           first_action      => 'start',
283           second_action     => 'start',
284           constraint_params => 'kind=Optional',
285           require           => [Pacemaker::Resource::Service['haproxy'],
286                                 Pacemaker::Resource::Ip['public_vip']],
287         }
288         pacemaker::constraint::colocation { 'public_vip-with-haproxy':
289           source  => "ip-${public_vip}",
290           target  => 'haproxy-clone',
291           score   => 'INFINITY',
292           require => [Pacemaker::Resource::Service['haproxy'],
293                       Pacemaker::Resource::Ip['public_vip']],
294         }
295       }
296
297       $redis_vip = hiera('redis_vip')
298       if is_ipv6_address($redis_vip) {
299         $redis_vip_netmask = '64'
300       } else {
301         $redis_vip_netmask = '32'
302       }
303       if $redis_vip and $redis_vip != $control_vip {
304         pacemaker::resource::ip { 'redis_vip':
305           ip_address   => $redis_vip,
306           cidr_netmask => $redis_vip_netmask,
307         }
308         pacemaker::constraint::base { 'redis_vip-then-haproxy':
309           constraint_type   => 'order',
310           first_resource    => "ip-${redis_vip}",
311           second_resource   => 'haproxy-clone',
312           first_action      => 'start',
313           second_action     => 'start',
314           constraint_params => 'kind=Optional',
315           require           => [Pacemaker::Resource::Service['haproxy'],
316                                 Pacemaker::Resource::Ip['redis_vip']],
317         }
318         pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
319           source  => "ip-${redis_vip}",
320           target  => 'haproxy-clone',
321           score   => 'INFINITY',
322           require => [Pacemaker::Resource::Service['haproxy'],
323                       Pacemaker::Resource::Ip['redis_vip']],
324         }
325       }
326
327       $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
328       if is_ipv6_address($internal_api_vip) {
329         $internal_api_vip_netmask = '64'
330       } else {
331         $internal_api_vip_netmask = '32'
332       }
333       if $internal_api_vip and $internal_api_vip != $control_vip {
334         pacemaker::resource::ip { 'internal_api_vip':
335           ip_address   => $internal_api_vip,
336           cidr_netmask => $internal_api_vip_netmask,
337         }
338         pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
339           constraint_type   => 'order',
340           first_resource    => "ip-${internal_api_vip}",
341           second_resource   => 'haproxy-clone',
342           first_action      => 'start',
343           second_action     => 'start',
344           constraint_params => 'kind=Optional',
345           require           => [Pacemaker::Resource::Service['haproxy'],
346                                 Pacemaker::Resource::Ip['internal_api_vip']],
347         }
348         pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
349           source  => "ip-${internal_api_vip}",
350           target  => 'haproxy-clone',
351           score   => 'INFINITY',
352           require => [Pacemaker::Resource::Service['haproxy'],
353                       Pacemaker::Resource::Ip['internal_api_vip']],
354         }
355       }
356
357       $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
358       if is_ipv6_address($storage_vip) {
359         $storage_vip_netmask = '64'
360       } else {
361         $storage_vip_netmask = '32'
362       }
363       if $storage_vip and $storage_vip != $control_vip {
364         pacemaker::resource::ip { 'storage_vip':
365           ip_address   => $storage_vip,
366           cidr_netmask => $storage_vip_netmask,
367         }
368         pacemaker::constraint::base { 'storage_vip-then-haproxy':
369           constraint_type   => 'order',
370           first_resource    => "ip-${storage_vip}",
371           second_resource   => 'haproxy-clone',
372           first_action      => 'start',
373           second_action     => 'start',
374           constraint_params => 'kind=Optional',
375           require           => [Pacemaker::Resource::Service['haproxy'],
376                                 Pacemaker::Resource::Ip['storage_vip']],
377         }
378         pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
379           source  => "ip-${storage_vip}",
380           target  => 'haproxy-clone',
381           score   => 'INFINITY',
382           require => [Pacemaker::Resource::Service['haproxy'],
383                       Pacemaker::Resource::Ip['storage_vip']],
384         }
385       }
386
387       $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
388       if is_ipv6_address($storage_mgmt_vip) {
389         $storage_mgmt_vip_netmask = '64'
390       } else {
391         $storage_mgmt_vip_netmask = '32'
392       }
393       if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
394         pacemaker::resource::ip { 'storage_mgmt_vip':
395           ip_address   => $storage_mgmt_vip,
396           cidr_netmask => $storage_mgmt_vip_netmask,
397         }
398         pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
399           constraint_type   => 'order',
400           first_resource    => "ip-${storage_mgmt_vip}",
401           second_resource   => 'haproxy-clone',
402           first_action      => 'start',
403           second_action     => 'start',
404           constraint_params => 'kind=Optional',
405           require           => [Pacemaker::Resource::Service['haproxy'],
406                                 Pacemaker::Resource::Ip['storage_mgmt_vip']],
407         }
408         pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
409           source  => "ip-${storage_mgmt_vip}",
410           target  => 'haproxy-clone',
411           score   => 'INFINITY',
412           require => [Pacemaker::Resource::Service['haproxy'],
413                       Pacemaker::Resource::Ip['storage_mgmt_vip']],
414         }
415       }
416
417     }
418
419     pacemaker::resource::service { $::memcached::params::service_name :
420       clone_params => 'interleave=true',
421       require      => Class['::memcached'],
422     }
423
424     pacemaker::resource::ocf { 'rabbitmq':
425       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
426       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
427       clone_params    => 'ordered=true interleave=true',
428       meta_params     => 'notify=true',
429       require         => Class['::rabbitmq'],
430     }
431
432     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
433       pacemaker::resource::service { $::mongodb::params::service_name :
434         op_params    => 'start timeout=370s stop timeout=200s',
435         clone_params => true,
436         require      => Class['::mongodb::server'],
437       }
438       # NOTE (spredzy) : The replset can only be run
439       # once all the nodes have joined the cluster.
440       mongodb_conn_validator { $mongo_node_ips_with_port :
441         timeout => '600',
442         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
443         before  => Mongodb_replset[$mongodb_replset],
444       }
445       mongodb_replset { $mongodb_replset :
446         members => $mongo_node_ips_with_port_nobr,
447       }
448     }
449
450     pacemaker::resource::ocf { 'galera' :
451       ocf_agent_name  => 'heartbeat:galera',
452       op_params       => 'promote timeout=300s on-fail=block',
453       master_params   => '',
454       meta_params     => "master-max=${galera_nodes_count} ordered=true",
455       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
456       require         => Class['::mysql::server'],
457       before          => Exec['galera-ready'],
458     }
459
460     pacemaker::resource::ocf { 'redis':
461       ocf_agent_name  => 'heartbeat:redis',
462       master_params   => '',
463       meta_params     => 'notify=true ordered=true interleave=true',
464       resource_params => 'wait_last_known_master=true',
465       require         => Class['::redis'],
466     }
467
468   }
469
470   exec { 'galera-ready' :
471     command     => '/usr/bin/clustercheck >/dev/null',
472     timeout     => 30,
473     tries       => 180,
474     try_sleep   => 10,
475     environment => ['AVAILABLE_WHEN_READONLY=0'],
476     require     => File['/etc/sysconfig/clustercheck'],
477   }
478
479   file { '/etc/sysconfig/clustercheck' :
480     ensure  => file,
481     content => "MYSQL_USERNAME=root\n
482 MYSQL_PASSWORD=''\n
483 MYSQL_HOST=localhost\n",
484   }
485
486   xinetd::service { 'galera-monitor' :
487     port           => '9200',
488     server         => '/usr/bin/clustercheck',
489     per_source     => 'UNLIMITED',
490     log_on_success => '',
491     log_on_failure => 'HOST',
492     flags          => 'REUSE',
493     service_type   => 'UNLISTED',
494     user           => 'root',
495     group          => 'root',
496     require        => File['/etc/sysconfig/clustercheck'],
497   }
498
499   # Create all the database schemas
500   if $sync_db {
501     class { '::keystone::db::mysql':
502       require => Exec['galera-ready'],
503     }
504     class { '::glance::db::mysql':
505       require => Exec['galera-ready'],
506     }
507     class { '::nova::db::mysql':
508       require => Exec['galera-ready'],
509     }
510     class { '::nova::db::mysql_api':
511       require => Exec['galera-ready'],
512     }
513     class { '::neutron::db::mysql':
514       require => Exec['galera-ready'],
515     }
516     class { '::cinder::db::mysql':
517       require => Exec['galera-ready'],
518     }
519     class { '::heat::db::mysql':
520       require => Exec['galera-ready'],
521     }
522
523     if downcase(hiera('ceilometer_backend')) == 'mysql' {
524       class { '::ceilometer::db::mysql':
525         require => Exec['galera-ready'],
526       }
527     }
528
529     class { '::sahara::db::mysql':
530       require       => Exec['galera-ready'],
531     }
532   }
533
534   # pre-install swift here so we can build rings
535   include ::swift
536
537   # Ceph
538   $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
539
540   if $enable_ceph {
541     $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
542     if str2bool(hiera('ceph_ipv6', false)) {
543       $mon_host = hiera('ceph_mon_host_v6')
544     } else {
545       $mon_host = hiera('ceph_mon_host')
546     }
547     class { '::ceph::profile::params':
548       mon_initial_members => $mon_initial_members,
549       mon_host            => $mon_host,
550     }
551     include ::ceph::conf
552     include ::ceph::profile::mon
553   }
554
555   if str2bool(hiera('enable_ceph_storage', false)) {
556     if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
557       exec { 'set selinux to permissive on boot':
558         command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
559         onlyif  => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
560         path    => ['/usr/bin', '/usr/sbin'],
561       }
562
563       exec { 'set selinux to permissive':
564         command => 'setenforce 0',
565         onlyif  => "which setenforce && getenforce | grep -i 'enforcing'",
566         path    => ['/usr/bin', '/usr/sbin'],
567       } -> Class['ceph::profile::osd']
568     }
569
570     include ::ceph::conf
571     include ::ceph::profile::osd
572   }
573
574   if str2bool(hiera('enable_external_ceph', false)) {
575     if str2bool(hiera('ceph_ipv6', false)) {
576       $mon_host = hiera('ceph_mon_host_v6')
577     } else {
578       $mon_host = hiera('ceph_mon_host')
579     }
580     class { '::ceph::profile::params':
581       mon_host            => $mon_host,
582     }
583     include ::ceph::conf
584     include ::ceph::profile::client
585   }
586
587
588 } #END STEP 2
589
590 if hiera('step') >= 3 {
591
592   class { '::keystone':
593     sync_db          => $sync_db,
594     manage_service   => false,
595     enabled          => false,
596     enable_bootstrap => $pacemaker_master,
597   }
598   include ::keystone::config
599
600   #TODO: need a cleanup-keystone-tokens.sh solution here
601
602   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
603     ensure  => 'directory',
604     owner   => 'keystone',
605     group   => 'keystone',
606     require => Package['keystone'],
607   }
608   file { '/etc/keystone/ssl/certs/signing_cert.pem':
609     content => hiera('keystone_signing_certificate'),
610     owner   => 'keystone',
611     group   => 'keystone',
612     notify  => Service['keystone'],
613     require => File['/etc/keystone/ssl/certs'],
614   }
615   file { '/etc/keystone/ssl/private/signing_key.pem':
616     content => hiera('keystone_signing_key'),
617     owner   => 'keystone',
618     group   => 'keystone',
619     notify  => Service['keystone'],
620     require => File['/etc/keystone/ssl/private'],
621   }
622   file { '/etc/keystone/ssl/certs/ca.pem':
623     content => hiera('keystone_ca_certificate'),
624     owner   => 'keystone',
625     group   => 'keystone',
626     notify  => Service['keystone'],
627     require => File['/etc/keystone/ssl/certs'],
628   }
629
630   $glance_backend = downcase(hiera('glance_backend', 'swift'))
631   case $glance_backend {
632       'swift': { $backend_store = 'glance.store.swift.Store' }
633       'file': { $backend_store = 'glance.store.filesystem.Store' }
634       'rbd': { $backend_store = 'glance.store.rbd.Store' }
635       default: { fail('Unrecognized glance_backend parameter.') }
636   }
637   $http_store = ['glance.store.http.Store']
638   $glance_store = concat($http_store, $backend_store)
639
640   if $glance_backend == 'file' and hiera('glance_file_pcmk_manage', false) {
641     $secontext = 'context="system_u:object_r:glance_var_lib_t:s0"'
642     pacemaker::resource::filesystem { 'glance-fs':
643       device       => hiera('glance_file_pcmk_device'),
644       directory    => hiera('glance_file_pcmk_directory'),
645       fstype       => hiera('glance_file_pcmk_fstype'),
646       fsoptions    => join([$secontext, hiera('glance_file_pcmk_options', '')],','),
647       clone_params => '',
648     }
649   }
650
651   # TODO: notifications, scrubber, etc.
652   include ::glance
653   include ::glance::config
654   class { '::glance::api':
655     known_stores   => $glance_store,
656     manage_service => false,
657     enabled        => false,
658   }
659   class { '::glance::registry' :
660     sync_db        => $sync_db,
661     manage_service => false,
662     enabled        => false,
663   }
664   include ::glance::notify::rabbitmq
665   include join(['::glance::backend::', $glance_backend])
666
667   $nova_ipv6 = hiera('nova::use_ipv6', false)
668   if $nova_ipv6 {
669     $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
670   } else {
671     $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
672   }
673
674   class { '::nova' :
675     memcached_servers => $memcached_servers
676   }
677
678   include ::nova::config
679
680   class { '::nova::api' :
681     sync_db        => $sync_db,
682     sync_db_api    => $sync_db,
683     manage_service => false,
684     enabled        => false,
685   }
686   class { '::nova::cert' :
687     manage_service => false,
688     enabled        => false,
689   }
690   class { '::nova::conductor' :
691     manage_service => false,
692     enabled        => false,
693   }
694   class { '::nova::consoleauth' :
695     manage_service => false,
696     enabled        => false,
697   }
698   class { '::nova::vncproxy' :
699     manage_service => false,
700     enabled        => false,
701   }
702   include ::nova::scheduler::filter
703   class { '::nova::scheduler' :
704     manage_service => false,
705     enabled        => false,
706   }
707   include ::nova::network::neutron
708
709   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
710
711     # TODO(devvesa) provide non-controller ips for these services
712     $zookeeper_node_ips = hiera('neutron_api_node_ips')
713     $cassandra_node_ips = hiera('neutron_api_node_ips')
714
715     # Run zookeeper in the controller if configured
716     if hiera('enable_zookeeper_on_controller') {
717       class {'::tripleo::cluster::zookeeper':
718         zookeeper_server_ips => $zookeeper_node_ips,
719         # TODO: create a 'bind' hiera key for zookeeper
720         zookeeper_client_ip  => hiera('neutron::bind_host'),
721         zookeeper_hostnames  => split(hiera('controller_node_names'), ',')
722       }
723     }
724
725     # Run cassandra in the controller if configured
726     if hiera('enable_cassandra_on_controller') {
727       class {'::tripleo::cluster::cassandra':
728         cassandra_servers => $cassandra_node_ips,
729         # TODO: create a 'bind' hiera key for cassandra
730         cassandra_ip      => hiera('neutron::bind_host'),
731       }
732     }
733
734     class {'::tripleo::network::midonet::agent':
735       zookeeper_servers => $zookeeper_node_ips,
736       cassandra_seeds   => $cassandra_node_ips
737     }
738
739     class {'::tripleo::network::midonet::api':
740       zookeeper_servers    => $zookeeper_node_ips,
741       vip                  => hiera('tripleo::loadbalancer::public_virtual_ip'),
742       keystone_ip          => hiera('tripleo::loadbalancer::public_virtual_ip'),
743       keystone_admin_token => hiera('keystone::admin_token'),
744       # TODO: create a 'bind' hiera key for api
745       bind_address         => hiera('neutron::bind_host'),
746       admin_password       => hiera('admin_password')
747     }
748
749     # Configure Neutron
750     class {'::neutron':
751       service_plugins => []
752     }
753
754   }
755   else {
756     # Neutron class definitions
757     include ::neutron
758   }
759
760   include ::neutron::config
761   class { '::neutron::server' :
762     sync_db        => $sync_db,
763     manage_service => false,
764     enabled        => false,
765   }
766   include ::neutron::server::notifications
767   if  hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
768     include ::neutron::plugins::nuage
769   }
770   if  hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
771     include ::neutron::plugins::opencontrail
772   }
773   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
774     class {'::neutron::plugins::midonet':
775       midonet_api_ip    => hiera('tripleo::loadbalancer::public_virtual_ip'),
776       keystone_tenant   => hiera('neutron::server::auth_tenant'),
777       keystone_password => hiera('neutron::server::auth_password')
778     }
779   }
780   if hiera('neutron::enable_dhcp_agent',true) {
781     class { '::neutron::agents::dhcp' :
782       manage_service => false,
783       enabled        => false,
784     }
785     file { '/etc/neutron/dnsmasq-neutron.conf':
786       content => hiera('neutron_dnsmasq_options'),
787       owner   => 'neutron',
788       group   => 'neutron',
789       notify  => Service['neutron-dhcp-service'],
790       require => Package['neutron'],
791     }
792   }
793   if hiera('neutron::enable_l3_agent',true) {
794     class { '::neutron::agents::l3' :
795       manage_service => false,
796       enabled        => false,
797     }
798   }
799   if hiera('neutron::enable_metadata_agent',true) {
800     class { '::neutron::agents::metadata':
801       manage_service => false,
802       enabled        => false,
803     }
804   }
805   include ::neutron::plugins::ml2
806   class { '::neutron::agents::ml2::ovs':
807     manage_service => false,
808     enabled        => false,
809   }
810
811   if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
812     include ::neutron::plugins::ml2::cisco::ucsm
813   }
814   if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
815     include ::neutron::plugins::ml2::cisco::nexus
816     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
817   }
818   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
819     include ::neutron::plugins::ml2::cisco::nexus1000v
820
821     class { '::neutron::agents::n1kv_vem':
822       n1kv_source  => hiera('n1kv_vem_source', undef),
823       n1kv_version => hiera('n1kv_vem_version', undef),
824     }
825
826     class { '::n1k_vsm':
827       n1kv_source  => hiera('n1kv_vsm_source', undef),
828       n1kv_version => hiera('n1kv_vsm_version', undef),
829     }
830   }
831
832   if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
833     include ::neutron::plugins::ml2::bigswitch::restproxy
834     include ::neutron::agents::bigswitch
835   }
836   neutron_l3_agent_config {
837     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
838   }
839   neutron_dhcp_agent_config {
840     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
841   }
842   neutron_config {
843     'DEFAULT/notification_driver': value => 'messaging';
844   }
845
846   include ::cinder
847   include ::cinder::config
848   include ::tripleo::ssl::cinder_config
849   class { '::cinder::api':
850     sync_db        => $sync_db,
851     manage_service => false,
852     enabled        => false,
853   }
854   class { '::cinder::scheduler' :
855     manage_service => false,
856     enabled        => false,
857   }
858   class { '::cinder::volume' :
859     manage_service => false,
860     enabled        => false,
861   }
862   include ::cinder::glance
863   include ::cinder::ceilometer
864   class { '::cinder::setup_test_volume':
865     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
866   }
867
868   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
869   if $cinder_enable_iscsi {
870     $cinder_iscsi_backend = 'tripleo_iscsi'
871
872     cinder::backend::iscsi { $cinder_iscsi_backend :
873       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
874       iscsi_helper     => hiera('cinder_iscsi_helper'),
875     }
876   }
877
878   if $enable_ceph {
879
880     $ceph_pools = hiera('ceph_pools')
881     ceph::pool { $ceph_pools :
882       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
883       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
884       size    => hiera('ceph::profile::params::osd_pool_default_size'),
885     }
886
887     $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
888
889   } else {
890     $cinder_pool_requires = []
891   }
892
893   if hiera('cinder_enable_rbd_backend', false) {
894     $cinder_rbd_backend = 'tripleo_ceph'
895
896     cinder::backend::rbd { $cinder_rbd_backend :
897       rbd_pool        => hiera('cinder_rbd_pool_name'),
898       rbd_user        => hiera('ceph_client_user_name'),
899       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
900       require         => $cinder_pool_requires,
901     }
902   }
903
904   if hiera('cinder_enable_eqlx_backend', false) {
905     $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
906
907     cinder::backend::eqlx { $cinder_eqlx_backend :
908       volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
909       san_ip              => hiera('cinder::backend::eqlx::san_ip', undef),
910       san_login           => hiera('cinder::backend::eqlx::san_login', undef),
911       san_password        => hiera('cinder::backend::eqlx::san_password', undef),
912       san_thin_provision  => hiera('cinder::backend::eqlx::san_thin_provision', undef),
913       eqlx_group_name     => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
914       eqlx_pool           => hiera('cinder::backend::eqlx::eqlx_pool', undef),
915       eqlx_use_chap       => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
916       eqlx_chap_login     => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
917       eqlx_chap_password  => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
918     }
919   }
920
921   if hiera('cinder_enable_dellsc_backend', false) {
922     $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
923
924     cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
925       volume_backend_name   => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
926       san_ip                => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
927       san_login             => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
928       san_password          => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
929       dell_sc_ssn           => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
930       iscsi_ip_address      => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
931       iscsi_port            => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
932       dell_sc_api_port      => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
933       dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
934       dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
935     }
936   }
937
938   if hiera('cinder_enable_netapp_backend', false) {
939     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
940
941     if hiera('cinder::backend::netapp::nfs_shares', undef) {
942       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
943     }
944
945     cinder::backend::netapp { $cinder_netapp_backend :
946       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
947       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
948       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
949       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
950       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
951       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
952       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
953       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
954       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
955       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
956       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
957       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
958       nfs_shares                   => $cinder_netapp_nfs_shares,
959       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
960       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
961       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
962       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
963       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
964       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
965       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
966     }
967   }
968
969   if hiera('cinder_enable_nfs_backend', false) {
970     $cinder_nfs_backend = 'tripleo_nfs'
971
972     if str2bool($::selinux) {
973       selboolean { 'virt_use_nfs':
974         value      => on,
975         persistent => true,
976       } -> Package['nfs-utils']
977     }
978
979     package { 'nfs-utils': } ->
980     cinder::backend::nfs { $cinder_nfs_backend:
981       nfs_servers       => hiera('cinder_nfs_servers'),
982       nfs_mount_options => hiera('cinder_nfs_mount_options',''),
983       nfs_shares_config => '/etc/cinder/shares-nfs.conf',
984     }
985   }
986
987   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
988   class { '::cinder::backends' :
989     enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
990   }
991
992   class { '::sahara':
993     sync_db => $sync_db,
994   }
995   class { '::sahara::service::api':
996     manage_service => false,
997     enabled        => false,
998   }
999   class { '::sahara::service::engine':
1000     manage_service => false,
1001     enabled        => false,
1002   }
1003
1004   # swift proxy
1005   class { '::swift::proxy' :
1006     manage_service => $non_pcmk_start,
1007     enabled        => $non_pcmk_start,
1008   }
1009   include ::swift::proxy::proxy_logging
1010   include ::swift::proxy::healthcheck
1011   include ::swift::proxy::cache
1012   include ::swift::proxy::keystone
1013   include ::swift::proxy::authtoken
1014   include ::swift::proxy::staticweb
1015   include ::swift::proxy::ratelimit
1016   include ::swift::proxy::catch_errors
1017   include ::swift::proxy::tempurl
1018   include ::swift::proxy::formpost
1019
1020   # swift storage
1021   if str2bool(hiera('enable_swift_storage', true)) {
1022     class {'::swift::storage::all':
1023       mount_check => str2bool(hiera('swift_mount_check')),
1024     }
1025     class {'::swift::storage::account':
1026       manage_service => $non_pcmk_start,
1027       enabled        => $non_pcmk_start,
1028     }
1029     class {'::swift::storage::container':
1030       manage_service => $non_pcmk_start,
1031       enabled        => $non_pcmk_start,
1032     }
1033     class {'::swift::storage::object':
1034       manage_service => $non_pcmk_start,
1035       enabled        => $non_pcmk_start,
1036     }
1037     if(!defined(File['/srv/node'])) {
1038       file { '/srv/node':
1039         ensure  => directory,
1040         owner   => 'swift',
1041         group   => 'swift',
1042         require => Package['openstack-swift'],
1043       }
1044     }
1045     $swift_components = ['account', 'container', 'object']
1046     swift::storage::filter::recon { $swift_components : }
1047     swift::storage::filter::healthcheck { $swift_components : }
1048   }
1049
1050   # Ceilometer
1051   case downcase(hiera('ceilometer_backend')) {
1052     /mysql/: {
1053       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
1054     }
1055     default: {
1056       $mongo_node_string = join($mongo_node_ips_with_port, ',')
1057       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
1058     }
1059   }
1060   include ::ceilometer
1061   include ::ceilometer::config
1062   class { '::ceilometer::api' :
1063     manage_service => false,
1064     enabled        => false,
1065   }
1066   class { '::ceilometer::agent::notification' :
1067     manage_service => false,
1068     enabled        => false,
1069   }
1070   class { '::ceilometer::agent::central' :
1071     manage_service => false,
1072     enabled        => false,
1073   }
1074   class { '::ceilometer::collector' :
1075     manage_service => false,
1076     enabled        => false,
1077   }
1078   include ::ceilometer::expirer
1079   class { '::ceilometer::db' :
1080     database_connection => $ceilometer_database_connection,
1081     sync_db             => $sync_db,
1082   }
1083   include ::ceilometer::agent::auth
1084
1085   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
1086
1087   # Heat
1088   include ::heat::config
1089   class { '::heat' :
1090     sync_db             => $sync_db,
1091     notification_driver => 'messaging',
1092   }
1093   class { '::heat::api' :
1094     manage_service => false,
1095     enabled        => false,
1096   }
1097   class { '::heat::api_cfn' :
1098     manage_service => false,
1099     enabled        => false,
1100   }
1101   class { '::heat::api_cloudwatch' :
1102     manage_service => false,
1103     enabled        => false,
1104   }
1105   class { '::heat::engine' :
1106     manage_service => false,
1107     enabled        => false,
1108   }
1109
1110   # httpd/apache and horizon
1111   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
1112   class { '::apache' :
1113     service_enable => false,
1114     # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
1115   }
1116   include ::keystone::wsgi::apache
1117   include ::apache::mod::status
1118   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1119     $_profile_support = 'cisco'
1120   } else {
1121     $_profile_support = 'None'
1122   }
1123   $neutron_options   = {'profile_support' => $_profile_support }
1124
1125   $memcached_ipv6 = hiera('memcached_ipv6', false)
1126   if $memcached_ipv6 {
1127     $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
1128   } else {
1129     $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
1130   }
1131
1132   class { '::horizon':
1133     cache_server_ip => $horizon_memcached_servers,
1134     neutron_options => $neutron_options,
1135   }
1136
1137   $snmpd_user = hiera('snmpd_readonly_user_name')
1138   snmp::snmpv3_user { $snmpd_user:
1139     authtype => 'MD5',
1140     authpass => hiera('snmpd_readonly_user_password'),
1141   }
1142   class { '::snmp':
1143     agentaddress => ['udp:161','udp6:[::1]:161'],
1144     snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
1145   }
1146
1147   hiera_include('controller_classes')
1148
1149 } #END STEP 3
1150
1151 if hiera('step') >= 4 {
1152   $keystone_enable_db_purge = hiera('keystone_enable_db_purge', true)
1153   $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
1154   $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
1155   $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
1156
1157   if $keystone_enable_db_purge {
1158     include ::keystone::cron::token_flush
1159   }
1160   if $nova_enable_db_purge {
1161     include ::nova::cron::archive_deleted_rows
1162   }
1163   if $cinder_enable_db_purge {
1164     include ::cinder::cron::db_purge
1165   }
1166   if $heat_enable_db_purge {
1167     include ::heat::cron::purge_deleted
1168   }
1169
1170   if $pacemaker_master {
1171
1172     if $enable_load_balancer {
1173       pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
1174         constraint_type => 'order',
1175         first_resource  => 'haproxy-clone',
1176         second_resource => 'openstack-core-clone',
1177         first_action    => 'start',
1178         second_action   => 'start',
1179         require         => [Pacemaker::Resource::Service['haproxy'],
1180                             Pacemaker::Resource::Ocf['openstack-core']],
1181       }
1182     }
1183
1184     pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
1185       constraint_type => 'order',
1186       first_resource  => 'openstack-core-clone',
1187       second_resource => "${::apache::params::service_name}-clone",
1188       first_action    => 'start',
1189       second_action   => 'start',
1190       require         => [Pacemaker::Resource::Service[$::apache::params::service_name],
1191                           Pacemaker::Resource::Ocf['openstack-core']],
1192     }
1193     pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
1194       constraint_type => 'order',
1195       first_resource  => 'rabbitmq-clone',
1196       second_resource => 'openstack-core-clone',
1197       first_action    => 'start',
1198       second_action   => 'start',
1199       require         => [Pacemaker::Resource::Ocf['rabbitmq'],
1200                           Pacemaker::Resource::Ocf['openstack-core']],
1201     }
1202     pacemaker::constraint::base { 'memcached-then-openstack-core-constraint':
1203       constraint_type => 'order',
1204       first_resource  => 'memcached-clone',
1205       second_resource => 'openstack-core-clone',
1206       first_action    => 'start',
1207       second_action   => 'start',
1208       require         => [Pacemaker::Resource::Service['memcached'],
1209                           Pacemaker::Resource::Ocf['openstack-core']],
1210     }
1211     pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
1212       constraint_type => 'order',
1213       first_resource  => 'galera-master',
1214       second_resource => 'openstack-core-clone',
1215       first_action    => 'promote',
1216       second_action   => 'start',
1217       require         => [Pacemaker::Resource::Ocf['galera'],
1218                           Pacemaker::Resource::Ocf['openstack-core']],
1219     }
1220
1221     # Cinder
1222     pacemaker::resource::service { $::cinder::params::api_service :
1223       clone_params => 'interleave=true',
1224       require      => Pacemaker::Resource::Ocf['openstack-core'],
1225     }
1226     pacemaker::resource::service { $::cinder::params::scheduler_service :
1227       clone_params => 'interleave=true',
1228     }
1229     pacemaker::resource::service { $::cinder::params::volume_service : }
1230
1231     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
1232       constraint_type => 'order',
1233       first_resource  => 'openstack-core-clone',
1234       second_resource => "${::cinder::params::api_service}-clone",
1235       first_action    => 'start',
1236       second_action   => 'start',
1237       require         => [Pacemaker::Resource::Ocf['openstack-core'],
1238                           Pacemaker::Resource::Service[$::cinder::params::api_service]],
1239     }
1240     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
1241       constraint_type => 'order',
1242       first_resource  => "${::cinder::params::api_service}-clone",
1243       second_resource => "${::cinder::params::scheduler_service}-clone",
1244       first_action    => 'start',
1245       second_action   => 'start',
1246       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1247                           Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1248     }
1249     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
1250       source  => "${::cinder::params::scheduler_service}-clone",
1251       target  => "${::cinder::params::api_service}-clone",
1252       score   => 'INFINITY',
1253       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1254                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1255     }
1256     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
1257       constraint_type => 'order',
1258       first_resource  => "${::cinder::params::scheduler_service}-clone",
1259       second_resource => $::cinder::params::volume_service,
1260       first_action    => 'start',
1261       second_action   => 'start',
1262       require         => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1263                           Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1264     }
1265     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
1266       source  => $::cinder::params::volume_service,
1267       target  => "${::cinder::params::scheduler_service}-clone",
1268       score   => 'INFINITY',
1269       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1270                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1271     }
1272
1273     # Sahara
1274     pacemaker::resource::service { $::sahara::params::api_service_name :
1275       clone_params => 'interleave=true',
1276       require      => Pacemaker::Resource::Ocf['openstack-core'],
1277     }
1278     pacemaker::resource::service { $::sahara::params::engine_service_name :
1279       clone_params => 'interleave=true',
1280     }
1281     pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
1282       constraint_type => 'order',
1283       first_resource  => 'openstack-core-clone',
1284       second_resource => "${::sahara::params::api_service_name}-clone",
1285       first_action    => 'start',
1286       second_action   => 'start',
1287       require         => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1288                           Pacemaker::Resource::Ocf['openstack-core']],
1289     }
1290
1291     # Glance
1292     pacemaker::resource::service { $::glance::params::registry_service_name :
1293       clone_params => 'interleave=true',
1294       require      => Pacemaker::Resource::Ocf['openstack-core'],
1295     }
1296     pacemaker::resource::service { $::glance::params::api_service_name :
1297       clone_params => 'interleave=true',
1298     }
1299
1300     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
1301       constraint_type => 'order',
1302       first_resource  => 'openstack-core-clone',
1303       second_resource => "${::glance::params::registry_service_name}-clone",
1304       first_action    => 'start',
1305       second_action   => 'start',
1306       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1307                           Pacemaker::Resource::Ocf['openstack-core']],
1308     }
1309     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
1310       constraint_type => 'order',
1311       first_resource  => "${::glance::params::registry_service_name}-clone",
1312       second_resource => "${::glance::params::api_service_name}-clone",
1313       first_action    => 'start',
1314       second_action   => 'start',
1315       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1316                           Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1317     }
1318     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
1319       source  => "${::glance::params::api_service_name}-clone",
1320       target  => "${::glance::params::registry_service_name}-clone",
1321       score   => 'INFINITY',
1322       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1323                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1324     }
1325
1326     if hiera('step') == 4 {
1327       # Neutron
1328       # NOTE(gfidente): Neutron will try to populate the database with some data
1329       # as soon as neutron-server is started; to avoid races we want to make this
1330       # happen only on one node, before normal Pacemaker initialization
1331       # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1332       # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
1333       # will try to start the service while it's already started by Pacemaker
1334       # It would result to a deployment failure since systemd would return 1 to Puppet
1335       # and the overcloud would fail to deploy (6 would be returned).
1336       # This conditional prevents from a race condition during the deployment.
1337       # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
1338       exec { 'neutron-server-systemd-start-sleep' :
1339         command => 'systemctl start neutron-server && /usr/bin/sleep 5',
1340         path    => '/usr/bin',
1341         unless  => '/sbin/pcs resource show neutron-server',
1342       } ->
1343       pacemaker::resource::service { $::neutron::params::server_service:
1344         clone_params => 'interleave=true',
1345         require      => Pacemaker::Resource::Ocf['openstack-core']
1346       }
1347     } else {
1348       pacemaker::resource::service { $::neutron::params::server_service:
1349         clone_params => 'interleave=true',
1350         require      => Pacemaker::Resource::Ocf['openstack-core']
1351       }
1352     }
1353     if hiera('neutron::enable_l3_agent', true) {
1354       pacemaker::resource::service { $::neutron::params::l3_agent_service:
1355         clone_params => 'interleave=true',
1356       }
1357     }
1358     if hiera('neutron::enable_dhcp_agent', true) {
1359       pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1360         clone_params => 'interleave=true',
1361       }
1362     }
1363     if hiera('neutron::enable_ovs_agent', true) {
1364       pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1365         clone_params => 'interleave=true',
1366       }
1367     }
1368     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1369       pacemaker::resource::service {'tomcat':
1370         clone_params => 'interleave=true',
1371       }
1372     }
1373     if hiera('neutron::enable_metadata_agent', true) {
1374       pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1375         clone_params => 'interleave=true',
1376       }
1377     }
1378     if hiera('neutron::enable_ovs_agent', true) {
1379       pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1380         ocf_agent_name => 'neutron:OVSCleanup',
1381         clone_params   => 'interleave=true',
1382       }
1383       pacemaker::resource::ocf { 'neutron-netns-cleanup':
1384         ocf_agent_name => 'neutron:NetnsCleanup',
1385         clone_params   => 'interleave=true',
1386       }
1387
1388       # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1389       pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1390         constraint_type => 'order',
1391         first_resource  => "${::neutron::params::ovs_cleanup_service}-clone",
1392         second_resource => 'neutron-netns-cleanup-clone',
1393         first_action    => 'start',
1394         second_action   => 'start',
1395         require         => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1396                             Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1397       }
1398       pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1399         source  => 'neutron-netns-cleanup-clone',
1400         target  => "${::neutron::params::ovs_cleanup_service}-clone",
1401         score   => 'INFINITY',
1402         require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1403                     Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1404       }
1405       pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1406         constraint_type => 'order',
1407         first_resource  => 'neutron-netns-cleanup-clone',
1408         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1409         first_action    => 'start',
1410         second_action   => 'start',
1411         require         => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1412                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1413       }
1414       pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1415         source  => "${::neutron::params::ovs_agent_service}-clone",
1416         target  => 'neutron-netns-cleanup-clone',
1417         score   => 'INFINITY',
1418         require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1419                     Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1420       }
1421     }
1422     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1423       constraint_type => 'order',
1424       first_resource  => 'openstack-core-clone',
1425       second_resource => "${::neutron::params::server_service}-clone",
1426       first_action    => 'start',
1427       second_action   => 'start',
1428       require         => [Pacemaker::Resource::Ocf['openstack-core'],
1429                           Pacemaker::Resource::Service[$::neutron::params::server_service]],
1430     }
1431     if hiera('neutron::enable_ovs_agent',true) {
1432       pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1433         constraint_type => 'order',
1434         first_resource  => "${::neutron::params::ovs_agent_service}-clone",
1435         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1436         first_action    => 'start',
1437         second_action   => 'start',
1438         require         => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1439                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1440       }
1441     }
1442     if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
1443       pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
1444         constraint_type => 'order',
1445         first_resource  => "${::neutron::params::server_service}-clone",
1446         second_resource => "${::neutron::params::ovs_agent_service}-clone",
1447         first_action    => 'start',
1448         second_action   => 'start',
1449         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1450                             Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1451     }
1452
1453       pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1454         source  => "${::neutron::params::dhcp_agent_service}-clone",
1455         target  => "${::neutron::params::ovs_agent_service}-clone",
1456         score   => 'INFINITY',
1457         require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1458                     Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1459       }
1460     }
1461     if hiera('neutron::enable_dhcp_agent',true) and hiera('l3_agent_service',true) {
1462       pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1463         constraint_type => 'order',
1464         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1465         second_resource => "${::neutron::params::l3_agent_service}-clone",
1466         first_action    => 'start',
1467         second_action   => 'start',
1468         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1469                             Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1470       }
1471       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1472         source  => "${::neutron::params::l3_agent_service}-clone",
1473         target  => "${::neutron::params::dhcp_agent_service}-clone",
1474         score   => 'INFINITY',
1475         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1476                     Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1477       }
1478     }
1479     if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
1480       pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1481         constraint_type => 'order',
1482         first_resource  => "${::neutron::params::l3_agent_service}-clone",
1483         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1484         first_action    => 'start',
1485         second_action   => 'start',
1486         require         => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1487                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1488       }
1489       pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1490         source  => "${::neutron::params::metadata_agent_service}-clone",
1491         target  => "${::neutron::params::l3_agent_service}-clone",
1492         score   => 'INFINITY',
1493         require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1494                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1495       }
1496     }
1497     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1498       #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
1499       pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
1500         constraint_type => 'order',
1501         first_resource  => "${::neutron::params::server_service}-clone",
1502         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1503         first_action    => 'start',
1504         second_action   => 'start',
1505         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1506                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1507       }
1508       pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
1509         constraint_type => 'order',
1510         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
1511         second_resource => "${::neutron::params::metadata_agent_service}-clone",
1512         first_action    => 'start',
1513         second_action   => 'start',
1514         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1515                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1516       }
1517       pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
1518         constraint_type => 'order',
1519         first_resource  => "${::neutron::params::metadata_agent_service}-clone",
1520         second_resource => 'tomcat-clone',
1521         first_action    => 'start',
1522         second_action   => 'start',
1523         require         => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
1524                             Pacemaker::Resource::Service['tomcat']],
1525       }
1526       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
1527         source  => "${::neutron::params::metadata_agent_service}-clone",
1528         target  => "${::neutron::params::dhcp_agent_service}-clone",
1529         score   => 'INFINITY',
1530         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1531                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1532       }
1533     }
1534
1535     # Nova
1536     pacemaker::resource::service { $::nova::params::api_service_name :
1537       clone_params => 'interleave=true',
1538     }
1539     pacemaker::resource::service { $::nova::params::conductor_service_name :
1540       clone_params => 'interleave=true',
1541     }
1542     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1543       clone_params => 'interleave=true',
1544       require      => Pacemaker::Resource::Ocf['openstack-core'],
1545     }
1546     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1547       clone_params => 'interleave=true',
1548     }
1549     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1550       clone_params => 'interleave=true',
1551     }
1552
1553     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1554       constraint_type => 'order',
1555       first_resource  => 'openstack-core-clone',
1556       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1557       first_action    => 'start',
1558       second_action   => 'start',
1559       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1560                           Pacemaker::Resource::Ocf['openstack-core']],
1561     }
1562     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1563       constraint_type => 'order',
1564       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1565       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1566       first_action    => 'start',
1567       second_action   => 'start',
1568       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1569                           Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1570     }
1571     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1572       source  => "${::nova::params::vncproxy_service_name}-clone",
1573       target  => "${::nova::params::consoleauth_service_name}-clone",
1574       score   => 'INFINITY',
1575       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1576                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1577     }
1578     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1579       constraint_type => 'order',
1580       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1581       second_resource => "${::nova::params::api_service_name}-clone",
1582       first_action    => 'start',
1583       second_action   => 'start',
1584       require         => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1585                           Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1586     }
1587     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1588       source  => "${::nova::params::api_service_name}-clone",
1589       target  => "${::nova::params::vncproxy_service_name}-clone",
1590       score   => 'INFINITY',
1591       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1592                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1593     }
1594     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1595       constraint_type => 'order',
1596       first_resource  => "${::nova::params::api_service_name}-clone",
1597       second_resource => "${::nova::params::scheduler_service_name}-clone",
1598       first_action    => 'start',
1599       second_action   => 'start',
1600       require         => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1601                           Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1602     }
1603     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1604       source  => "${::nova::params::scheduler_service_name}-clone",
1605       target  => "${::nova::params::api_service_name}-clone",
1606       score   => 'INFINITY',
1607       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1608                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1609     }
1610     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1611       constraint_type => 'order',
1612       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1613       second_resource => "${::nova::params::conductor_service_name}-clone",
1614       first_action    => 'start',
1615       second_action   => 'start',
1616       require         => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1617                           Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1618     }
1619     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1620       source  => "${::nova::params::conductor_service_name}-clone",
1621       target  => "${::nova::params::scheduler_service_name}-clone",
1622       score   => 'INFINITY',
1623       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1624                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1625     }
1626
1627     # Ceilometer
1628     case downcase(hiera('ceilometer_backend')) {
1629       /mysql/: {
1630         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1631           clone_params => 'interleave=true',
1632           require      => Pacemaker::Resource::Ocf['openstack-core'],
1633         }
1634       }
1635       default: {
1636         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1637           clone_params => 'interleave=true',
1638           require      => [Pacemaker::Resource::Ocf['openstack-core'],
1639                           Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1640         }
1641       }
1642     }
1643     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1644       clone_params => 'interleave=true',
1645     }
1646     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1647       clone_params => 'interleave=true',
1648     }
1649     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1650       clone_params => 'interleave=true',
1651     }
1652     pacemaker::resource::ocf { 'delay' :
1653       ocf_agent_name  => 'heartbeat:Delay',
1654       clone_params    => 'interleave=true',
1655       resource_params => 'startdelay=10',
1656     }
1657     # Fedora doesn't know `require-all` parameter for constraints yet
1658     if $::operatingsystem == 'Fedora' {
1659       $redis_ceilometer_constraint_params = undef
1660     } else {
1661       $redis_ceilometer_constraint_params = 'require-all=false'
1662     }
1663     pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1664       constraint_type   => 'order',
1665       first_resource    => 'redis-master',
1666       second_resource   => "${::ceilometer::params::agent_central_service_name}-clone",
1667       first_action      => 'promote',
1668       second_action     => 'start',
1669       constraint_params => $redis_ceilometer_constraint_params,
1670       require           => [Pacemaker::Resource::Ocf['redis'],
1671                             Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1672     }
1673     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1674       constraint_type => 'order',
1675       first_resource  => 'openstack-core-clone',
1676       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1677       first_action    => 'start',
1678       second_action   => 'start',
1679       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1680                           Pacemaker::Resource::Ocf['openstack-core']],
1681     }
1682     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1683       constraint_type => 'order',
1684       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1685       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1686       first_action    => 'start',
1687       second_action   => 'start',
1688       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1689                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1690     }
1691     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1692       constraint_type => 'order',
1693       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1694       second_resource => "${::ceilometer::params::api_service_name}-clone",
1695       first_action    => 'start',
1696       second_action   => 'start',
1697       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1698                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1699     }
1700     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1701       source  => "${::ceilometer::params::api_service_name}-clone",
1702       target  => "${::ceilometer::params::collector_service_name}-clone",
1703       score   => 'INFINITY',
1704       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1705                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1706     }
1707     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1708       constraint_type => 'order',
1709       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1710       second_resource => 'delay-clone',
1711       first_action    => 'start',
1712       second_action   => 'start',
1713       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1714                           Pacemaker::Resource::Ocf['delay']],
1715     }
1716     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1717       source  => 'delay-clone',
1718       target  => "${::ceilometer::params::api_service_name}-clone",
1719       score   => 'INFINITY',
1720       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1721                   Pacemaker::Resource::Ocf['delay']],
1722     }
1723     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1724       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1725         constraint_type => 'order',
1726         first_resource  => "${::mongodb::params::service_name}-clone",
1727         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1728         first_action    => 'start',
1729         second_action   => 'start',
1730         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1731                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1732       }
1733     }
1734
1735     # Heat
1736     pacemaker::resource::service { $::heat::params::api_service_name :
1737       clone_params => 'interleave=true',
1738     }
1739     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1740       clone_params => 'interleave=true',
1741     }
1742     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1743       clone_params => 'interleave=true',
1744     }
1745     pacemaker::resource::service { $::heat::params::engine_service_name :
1746       clone_params => 'interleave=true',
1747     }
1748     pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1749       constraint_type => 'order',
1750       first_resource  => 'openstack-core-clone',
1751       second_resource => "${::heat::params::api_service_name}-clone",
1752       first_action    => 'start',
1753       second_action   => 'start',
1754       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1755                           Pacemaker::Resource::Ocf['openstack-core']],
1756     }
1757     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1758       constraint_type => 'order',
1759       first_resource  => "${::heat::params::api_service_name}-clone",
1760       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1761       first_action    => 'start',
1762       second_action   => 'start',
1763       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1764                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1765     }
1766     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1767       source  => "${::heat::params::api_cfn_service_name}-clone",
1768       target  => "${::heat::params::api_service_name}-clone",
1769       score   => 'INFINITY',
1770       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1771                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1772     }
1773     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1774       constraint_type => 'order',
1775       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1776       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1777       first_action    => 'start',
1778       second_action   => 'start',
1779       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1780                           Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1781     }
1782     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1783       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1784       target  => "${::heat::params::api_cfn_service_name}-clone",
1785       score   => 'INFINITY',
1786       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1787                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1788     }
1789     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1790       constraint_type => 'order',
1791       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1792       second_resource => "${::heat::params::engine_service_name}-clone",
1793       first_action    => 'start',
1794       second_action   => 'start',
1795       require         => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1796                           Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1797     }
1798     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1799       source  => "${::heat::params::engine_service_name}-clone",
1800       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1801       score   => 'INFINITY',
1802       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1803                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1804     }
1805     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1806       constraint_type => 'order',
1807       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1808       second_resource => "${::heat::params::api_service_name}-clone",
1809       first_action    => 'start',
1810       second_action   => 'start',
1811       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1812                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1813     }
1814
1815     # Horizon and Keystone
1816     pacemaker::resource::service { $::apache::params::service_name:
1817       clone_params     => 'interleave=true',
1818       verify_on_create => true,
1819       require          => [File['/etc/keystone/ssl/certs/ca.pem'],
1820       File['/etc/keystone/ssl/private/signing_key.pem'],
1821       File['/etc/keystone/ssl/certs/signing_cert.pem']],
1822     }
1823
1824     #VSM
1825     if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1826       pacemaker::resource::ocf { 'vsm-p' :
1827         ocf_agent_name  => 'heartbeat:VirtualDomain',
1828         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1829         require         => Class['n1k_vsm'],
1830         meta_params     => 'resource-stickiness=INFINITY',
1831       }
1832       if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
1833         pacemaker::resource::ocf { 'vsm-s' :
1834           ocf_agent_name  => 'heartbeat:VirtualDomain',
1835           resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1836           require         => Class['n1k_vsm'],
1837           meta_params     => 'resource-stickiness=INFINITY',
1838         }
1839         pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1840           source  => 'vsm-p',
1841           target  => 'vsm-s',
1842           score   => '-INFINITY',
1843           require => [Pacemaker::Resource::Ocf['vsm-p'],
1844                       Pacemaker::Resource::Ocf['vsm-s']],
1845         }
1846       }
1847     }
1848
1849   }
1850
1851 } #END STEP 4
1852
1853 if hiera('step') >= 5 {
1854
1855   if $pacemaker_master {
1856
1857     class {'::keystone::roles::admin' :
1858       require => Pacemaker::Resource::Service[$::apache::params::service_name],
1859     } ->
1860     class {'::keystone::endpoint' :
1861       require => Pacemaker::Resource::Service[$::apache::params::service_name],
1862     }
1863     include ::heat::keystone::domain
1864     Class['::keystone::roles::admin'] -> Class['::heat::keystone::domain']
1865
1866   } else {
1867     # On non-master controller we don't need to create Keystone resources again
1868     class { '::heat::keystone::domain':
1869       manage_domain => false,
1870       manage_user   => false,
1871       manage_role   => false,
1872     }
1873   }
1874
1875 } #END STEP 5
1876
1877 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1878 package_manifest{$package_manifest_name: ensure => present}