808c7a207f850c4acde14c4567cc031979b2a54a
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 include tripleo::packages
22
23 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
24   $pacemaker_master = true
25   $sync_db = true
26 } else {
27   $pacemaker_master = false
28   $sync_db = false
29 }
30
31 $enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5
32
33 # When to start and enable services which haven't been Pacemakerized
34 # FIXME: remove when we start all OpenStack services using Pacemaker
35 # (occurences of this variable will be gradually replaced with false)
36 $non_pcmk_start = hiera('step') >= 4
37
38 if hiera('step') >= 1 {
39
40   create_resources(sysctl::value, hiera('sysctl_settings'), {})
41
42   if count(hiera('ntp::servers')) > 0 {
43     include ::ntp
44   }
45
46   $controller_node_ips = split(hiera('controller_node_ips'), ',')
47   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
48   class { '::tripleo::loadbalancer' :
49     controller_hosts       => $controller_node_ips,
50     controller_hosts_names => $controller_node_names,
51     manage_vip             => false,
52     mysql_clustercheck     => true,
53     haproxy_service_manage => false,
54   }
55
56   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
57   user { 'hacluster':
58    ensure => present,
59   } ->
60   class { '::pacemaker':
61     hacluster_pwd => hiera('hacluster_pwd'),
62   } ->
63   class { '::pacemaker::corosync':
64     cluster_members => $pacemaker_cluster_members,
65     setup_cluster   => $pacemaker_master,
66   }
67   class { '::pacemaker::stonith':
68     disable => !$enable_fencing,
69   }
70   if $enable_fencing {
71     include tripleo::fencing
72
73     # enable stonith after all fencing devices have been created
74     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
75   }
76
77   # FIXME(gfidente): sets 90secs as default start timeout op
78   # param; until we can use pcmk global defaults we'll still
79   # need to add it to every resource which redefines op params
80   Pacemaker::Resource::Service {
81     op_params => 'start timeout=90s',
82   }
83
84   # Only configure RabbitMQ in this step, don't start it yet to
85   # avoid races where non-master nodes attempt to start without
86   # config (eg. binding on 0.0.0.0)
87   # The module ignores erlang_cookie if cluster_config is false
88   class { '::rabbitmq':
89     service_manage          => false,
90     tcp_keepalive           => false,
91     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
92     config_variables        => hiera('rabbitmq_config_variables'),
93     environment_variables   => hiera('rabbitmq_environment'),
94   } ->
95   file { '/var/lib/rabbitmq/.erlang.cookie':
96     ensure  => 'present',
97     owner   => 'rabbitmq',
98     group   => 'rabbitmq',
99     mode    => '0400',
100     content => hiera('rabbitmq::erlang_cookie'),
101     replace => true,
102   }
103
104   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
105     include ::mongodb::globals
106     class { '::mongodb::server' :
107       service_manage => false,
108     }
109   }
110
111   # Memcached
112   class {'::memcached' :
113     service_manage => false,
114   }
115
116   # Redis
117   class { '::redis' :
118     service_manage => false,
119     notify_service => false,
120   }
121
122   # Galera
123   if str2bool(hiera('enable_galera', 'true')) {
124     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
125   } else {
126     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
127   }
128   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
129   $galera_nodes_count = count(split($galera_nodes, ','))
130
131   $mysqld_options = {
132     'mysqld' => {
133       'skip-name-resolve'             => '1',
134       'binlog_format'                 => 'ROW',
135       'default-storage-engine'        => 'innodb',
136       'innodb_autoinc_lock_mode'      => '2',
137       'innodb_locks_unsafe_for_binlog'=> '1',
138       'query_cache_size'              => '0',
139       'query_cache_type'              => '0',
140       'bind-address'                  => hiera('mysql_bind_host'),
141       'max_connections'               => hiera('mysql_max_connections'),
142       'open_files_limit'              => '-1',
143       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
144       'wsrep_cluster_name'            => 'galera_cluster',
145       'wsrep_slave_threads'           => '1',
146       'wsrep_certify_nonPK'           => '1',
147       'wsrep_max_ws_rows'             => '131072',
148       'wsrep_max_ws_size'             => '1073741824',
149       'wsrep_debug'                   => '0',
150       'wsrep_convert_LOCK_to_trx'     => '0',
151       'wsrep_retry_autocommit'        => '1',
152       'wsrep_auto_increment_control'  => '1',
153       'wsrep_drupal_282555_workaround'=> '0',
154       'wsrep_causal_reads'            => '0',
155       'wsrep_notify_cmd'              => '',
156       'wsrep_sst_method'              => 'rsync',
157     }
158   }
159
160   class { '::mysql::server':
161     create_root_user        => false,
162     create_root_my_cnf      => false,
163     config_file             => $mysql_config_file,
164     override_options        => $mysqld_options,
165     remove_default_accounts => $pacemaker_master,
166     service_manage          => false,
167     service_enabled         => false,
168   }
169
170 }
171
172 if hiera('step') >= 2 {
173
174   # NOTE(gfidente): the following vars are needed on all nodes so they
175   # need to stay out of pacemaker_master conditional
176   $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
177   $mongodb_replset = hiera('mongodb::server::replset')
178
179   if $pacemaker_master {
180
181     include pacemaker::resource_defaults
182
183     # FIXME: we should not have to access tripleo::loadbalancer class
184     # parameters here to configure pacemaker VIPs. The configuration
185     # of pacemaker VIPs could move into puppet-tripleo or we should
186     # make use of less specific hiera parameters here for the settings.
187     pacemaker::resource::service { 'haproxy':
188       clone_params => true,
189     }
190
191     $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
192     pacemaker::resource::ip { 'control_vip':
193       ip_address => $control_vip,
194     }
195     pacemaker::constraint::base { 'control_vip-then-haproxy':
196       constraint_type   => 'order',
197       first_resource    => "ip-${control_vip}",
198       second_resource   => 'haproxy-clone',
199       first_action      => 'start',
200       second_action     => 'start',
201       constraint_params => 'kind=Optional',
202       require => [Pacemaker::Resource::Service['haproxy'],
203                   Pacemaker::Resource::Ip['control_vip']],
204     }
205     pacemaker::constraint::colocation { 'control_vip-with-haproxy':
206       source  => "ip-${control_vip}",
207       target  => 'haproxy-clone',
208       score   => 'INFINITY',
209       require => [Pacemaker::Resource::Service['haproxy'],
210                   Pacemaker::Resource::Ip['control_vip']],
211     }
212
213     $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
214     if $public_vip and $public_vip != $control_vip {
215       pacemaker::resource::ip { 'public_vip':
216         ip_address => $public_vip,
217       }
218       pacemaker::constraint::base { 'public_vip-then-haproxy':
219         constraint_type   => 'order',
220         first_resource    => "ip-${public_vip}",
221         second_resource   => 'haproxy-clone',
222         first_action      => 'start',
223         second_action     => 'start',
224         constraint_params => 'kind=Optional',
225         require => [Pacemaker::Resource::Service['haproxy'],
226                     Pacemaker::Resource::Ip['public_vip']],
227       }
228       pacemaker::constraint::colocation { 'public_vip-with-haproxy':
229         source  => "ip-${public_vip}",
230         target  => 'haproxy-clone',
231         score   => 'INFINITY',
232         require => [Pacemaker::Resource::Service['haproxy'],
233                     Pacemaker::Resource::Ip['public_vip']],
234       }
235     }
236
237     $redis_vip = hiera('redis_vip')
238     if $redis_vip and $redis_vip != $control_vip {
239       pacemaker::resource::ip { 'redis_vip':
240         ip_address => $redis_vip,
241       }
242       pacemaker::constraint::base { 'redis_vip-then-haproxy':
243         constraint_type   => 'order',
244         first_resource    => "ip-${redis_vip}",
245         second_resource   => 'haproxy-clone',
246         first_action      => 'start',
247         second_action     => 'start',
248         constraint_params => 'kind=Optional',
249         require => [Pacemaker::Resource::Service['haproxy'],
250                     Pacemaker::Resource::Ip['redis_vip']],
251       }
252       pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
253         source  => "ip-${redis_vip}",
254         target  => 'haproxy-clone',
255         score   => 'INFINITY',
256         require => [Pacemaker::Resource::Service['haproxy'],
257                     Pacemaker::Resource::Ip['redis_vip']],
258       }
259     }
260
261     $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
262     if $internal_api_vip and $internal_api_vip != $control_vip {
263       pacemaker::resource::ip { 'internal_api_vip':
264         ip_address => $internal_api_vip,
265       }
266       pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
267         constraint_type   => 'order',
268         first_resource    => "ip-${internal_api_vip}",
269         second_resource   => 'haproxy-clone',
270         first_action      => 'start',
271         second_action     => 'start',
272         constraint_params => 'kind=Optional',
273         require => [Pacemaker::Resource::Service['haproxy'],
274                     Pacemaker::Resource::Ip['internal_api_vip']],
275       }
276       pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
277         source  => "ip-${internal_api_vip}",
278         target  => 'haproxy-clone',
279         score   => 'INFINITY',
280         require => [Pacemaker::Resource::Service['haproxy'],
281                     Pacemaker::Resource::Ip['internal_api_vip']],
282       }
283     }
284
285     $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
286     if $storage_vip and $storage_vip != $control_vip {
287       pacemaker::resource::ip { 'storage_vip':
288         ip_address => $storage_vip,
289       }
290       pacemaker::constraint::base { 'storage_vip-then-haproxy':
291         constraint_type   => 'order',
292         first_resource    => "ip-${storage_vip}",
293         second_resource   => 'haproxy-clone',
294         first_action      => 'start',
295         second_action     => 'start',
296         constraint_params => 'kind=Optional',
297         require => [Pacemaker::Resource::Service['haproxy'],
298                     Pacemaker::Resource::Ip['storage_vip']],
299       }
300       pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
301         source  => "ip-${storage_vip}",
302         target  => 'haproxy-clone',
303         score   => 'INFINITY',
304         require => [Pacemaker::Resource::Service['haproxy'],
305                     Pacemaker::Resource::Ip['storage_vip']],
306       }
307     }
308
309     $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
310     if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
311       pacemaker::resource::ip { 'storage_mgmt_vip':
312         ip_address => $storage_mgmt_vip,
313       }
314       pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
315         constraint_type   => 'order',
316         first_resource    => "ip-${storage_mgmt_vip}",
317         second_resource   => 'haproxy-clone',
318         first_action      => 'start',
319         second_action     => 'start',
320         constraint_params => 'kind=Optional',
321         require => [Pacemaker::Resource::Service['haproxy'],
322                     Pacemaker::Resource::Ip['storage_mgmt_vip']],
323       }
324       pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
325         source  => "ip-${storage_mgmt_vip}",
326         target  => 'haproxy-clone',
327         score   => 'INFINITY',
328         require => [Pacemaker::Resource::Service['haproxy'],
329                     Pacemaker::Resource::Ip['storage_mgmt_vip']],
330       }
331     }
332
333     pacemaker::resource::service { $::memcached::params::service_name :
334       clone_params => true,
335       require      => Class['::memcached'],
336     }
337
338     pacemaker::resource::ocf { 'rabbitmq':
339       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
340       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
341       clone_params    => 'ordered=true interleave=true',
342       require         => Class['::rabbitmq'],
343     }
344
345     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
346       pacemaker::resource::service { $::mongodb::params::service_name :
347         op_params    => 'start timeout=120s',
348         clone_params => true,
349         require      => Class['::mongodb::server'],
350       }
351       # NOTE (spredzy) : The replset can only be run
352       # once all the nodes have joined the cluster.
353       mongodb_conn_validator { $mongo_node_ips_with_port :
354         timeout => '600',
355         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
356         before  => Mongodb_replset[$mongodb_replset],
357       }
358       mongodb_replset { $mongodb_replset :
359         members => $mongo_node_ips_with_port,
360       }
361     }
362
363     pacemaker::resource::ocf { 'galera' :
364       ocf_agent_name  => 'heartbeat:galera',
365       op_params       => 'promote timeout=300s on-fail=block',
366       master_params   => '',
367       meta_params     => "master-max=${galera_nodes_count} ordered=true",
368       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
369       require         => Class['::mysql::server'],
370       before          => Exec['galera-ready'],
371     }
372
373     pacemaker::resource::ocf { 'redis':
374       ocf_agent_name  => 'heartbeat:redis',
375       master_params   => '',
376       meta_params     => 'notify=true ordered=true interleave=true',
377       resource_params => 'wait_last_known_master=true',
378       require         => Class['::redis'],
379     }
380
381   }
382
383   exec { 'galera-ready' :
384     command     => '/usr/bin/clustercheck >/dev/null',
385     timeout     => 30,
386     tries       => 180,
387     try_sleep   => 10,
388     environment => ["AVAILABLE_WHEN_READONLY=0"],
389     require     => File['/etc/sysconfig/clustercheck'],
390   }
391
392   file { '/etc/sysconfig/clustercheck' :
393     ensure  => file,
394     content => "MYSQL_USERNAME=root\n
395 MYSQL_PASSWORD=''\n
396 MYSQL_HOST=localhost\n",
397   }
398
399   xinetd::service { 'galera-monitor' :
400     port           => '9200',
401     server         => '/usr/bin/clustercheck',
402     per_source     => 'UNLIMITED',
403     log_on_success => '',
404     log_on_failure => 'HOST',
405     flags          => 'REUSE',
406     service_type   => 'UNLISTED',
407     user           => 'root',
408     group          => 'root',
409     require        => File['/etc/sysconfig/clustercheck'],
410   }
411
412   # Create all the database schemas
413   if $sync_db {
414     class { 'keystone::db::mysql':
415       require       => Exec['galera-ready'],
416     }
417     class { 'glance::db::mysql':
418       require       => Exec['galera-ready'],
419     }
420     class { 'nova::db::mysql':
421       require       => Exec['galera-ready'],
422     }
423     class { 'neutron::db::mysql':
424       require       => Exec['galera-ready'],
425     }
426     class { 'cinder::db::mysql':
427       require       => Exec['galera-ready'],
428     }
429     class { 'heat::db::mysql':
430       require       => Exec['galera-ready'],
431     }
432
433     if downcase(hiera('ceilometer_backend')) == 'mysql' {
434       class { 'ceilometer::db::mysql':
435         require       => Exec['galera-ready'],
436       }
437     }
438   }
439
440   # pre-install swift here so we can build rings
441   include ::swift
442
443   # Ceph
444   $enable_ceph = hiera('ceph_storage_count', 0) > 0
445
446   if $enable_ceph {
447     class { 'ceph::profile::params':
448       mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
449     }
450     include ::ceph::profile::mon
451   }
452
453   if str2bool(hiera('enable_ceph_storage', 'false')) {
454     if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
455       exec { 'set selinux to permissive on boot':
456         command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
457         onlyif  => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
458         path    => ["/usr/bin", "/usr/sbin"],
459       }
460
461       exec { 'set selinux to permissive':
462         command => "setenforce 0",
463         onlyif  => "which setenforce && getenforce | grep -i 'enforcing'",
464         path    => ["/usr/bin", "/usr/sbin"],
465       } -> Class['ceph::profile::osd']
466     }
467
468     include ::ceph::profile::osd
469   }
470
471   if str2bool(hiera('enable_external_ceph', 'false')) {
472     include ::ceph::profile::client
473   }
474
475
476 } #END STEP 2
477
478 if hiera('step') >= 3 {
479
480   class { '::keystone':
481     sync_db => $sync_db,
482     manage_service => false,
483     enabled => false,
484   }
485
486   #TODO: need a cleanup-keystone-tokens.sh solution here
487   keystone_config {
488     'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
489   }
490   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
491     ensure  => 'directory',
492     owner   => 'keystone',
493     group   => 'keystone',
494     require => Package['keystone'],
495   }
496   file { '/etc/keystone/ssl/certs/signing_cert.pem':
497     content => hiera('keystone_signing_certificate'),
498     owner   => 'keystone',
499     group   => 'keystone',
500     notify  => Service['keystone'],
501     require => File['/etc/keystone/ssl/certs'],
502   }
503   file { '/etc/keystone/ssl/private/signing_key.pem':
504     content => hiera('keystone_signing_key'),
505     owner   => 'keystone',
506     group   => 'keystone',
507     notify  => Service['keystone'],
508     require => File['/etc/keystone/ssl/private'],
509   }
510   file { '/etc/keystone/ssl/certs/ca.pem':
511     content => hiera('keystone_ca_certificate'),
512     owner   => 'keystone',
513     group   => 'keystone',
514     notify  => Service['keystone'],
515     require => File['/etc/keystone/ssl/certs'],
516   }
517
518   $glance_backend = downcase(hiera('glance_backend', 'swift'))
519   case $glance_backend {
520       swift: { $backend_store = 'glance.store.swift.Store' }
521       file: { $backend_store = 'glance.store.filesystem.Store' }
522       rbd: { $backend_store = 'glance.store.rbd.Store' }
523       default: { fail('Unrecognized glance_backend parameter.') }
524   }
525   $http_store = ['glance.store.http.Store']
526   $glance_store = concat($http_store, $backend_store)
527
528   # TODO: notifications, scrubber, etc.
529   include ::glance
530   class { 'glance::api':
531     known_stores => $glance_store,
532     manage_service => false,
533     enabled => false,
534   }
535   class { '::glance::registry' :
536     sync_db => $sync_db,
537     manage_service => false,
538     enabled => false,
539   }
540   include join(['::glance::backend::', $glance_backend])
541
542   class { '::nova' :
543     memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'),
544   }
545
546   include ::nova::config
547
548   class { '::nova::api' :
549     sync_db => $sync_db,
550     manage_service => false,
551     enabled => false,
552   }
553   class { '::nova::cert' :
554     manage_service => false,
555     enabled => false,
556   }
557   class { '::nova::conductor' :
558     manage_service => false,
559     enabled => false,
560   }
561   class { '::nova::consoleauth' :
562     manage_service => false,
563     enabled => false,
564   }
565   class { '::nova::vncproxy' :
566     manage_service => false,
567     enabled => false,
568   }
569   include ::nova::scheduler::filter
570   class { '::nova::scheduler' :
571     manage_service => false,
572     enabled => false,
573   }
574   include ::nova::network::neutron
575
576   # Neutron class definitions
577   include ::neutron
578   class { '::neutron::server' :
579     sync_db => $sync_db,
580     manage_service => false,
581     enabled => false,
582   }
583   class { '::neutron::agents::dhcp' :
584     manage_service => false,
585     enabled => false,
586   }
587   class { '::neutron::agents::l3' :
588     manage_service => false,
589     enabled => false,
590   }
591   class { 'neutron::agents::metadata':
592     manage_service => false,
593     enabled => false,
594   }
595   file { '/etc/neutron/dnsmasq-neutron.conf':
596     content => hiera('neutron_dnsmasq_options'),
597     owner   => 'neutron',
598     group   => 'neutron',
599     notify  => Service['neutron-dhcp-service'],
600     require => Package['neutron'],
601   }
602   class { 'neutron::plugins::ml2':
603     flat_networks   => split(hiera('neutron_flat_networks'), ','),
604     tenant_network_types => [hiera('neutron_tenant_network_type')],
605     mechanism_drivers   => [hiera('neutron_mechanism_drivers')],
606   }
607   class { 'neutron::agents::ml2::ovs':
608     manage_service   => false,
609     enabled          => false,
610     bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
611     tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
612   }
613
614   if 'cisco_ucsm' in hiera('neutron_mechanism_drivers') {
615     include ::neutron::plugins::ml2::cisco::ucsm
616   }
617   if 'cisco_nexus' in hiera('neutron_mechanism_drivers') {
618     include ::neutron::plugins::ml2::cisco::nexus
619     include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
620   }
621
622   if hiera('neutron_enable_bigswitch_ml2', false) {
623     include neutron::plugins::ml2::bigswitch::restproxy
624   }
625   neutron_l3_agent_config {
626     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
627   }
628   neutron_dhcp_agent_config {
629     'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
630   }
631
632   include ::cinder
633   class { '::cinder::api':
634     sync_db => $sync_db,
635     manage_service => false,
636     enabled => false,
637   }
638   class { '::cinder::scheduler' :
639     manage_service => false,
640     enabled => false,
641   }
642   class { '::cinder::volume' :
643     manage_service => false,
644     enabled => false,
645   }
646   include ::cinder::glance
647   class {'cinder::setup_test_volume':
648     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
649   }
650
651   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
652   if $cinder_enable_iscsi {
653     $cinder_iscsi_backend = 'tripleo_iscsi'
654
655     cinder::backend::iscsi { $cinder_iscsi_backend :
656       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
657       iscsi_helper     => hiera('cinder_iscsi_helper'),
658     }
659   }
660
661   if $enable_ceph {
662
663     Ceph_pool {
664       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
665       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
666       size    => hiera('ceph::profile::params::osd_pool_default_size'),
667     }
668
669     $ceph_pools = hiera('ceph_pools')
670     ceph::pool { $ceph_pools : }
671
672     $cinder_pool_requires = [Ceph::Pool['volumes']]
673
674   } else {
675     $cinder_pool_requires = []
676   }
677
678   if hiera('cinder_enable_rbd_backend', false) {
679     $cinder_rbd_backend = 'tripleo_ceph'
680
681     cinder::backend::rbd { $cinder_rbd_backend :
682       rbd_pool        => 'volumes',
683       rbd_user        => 'openstack',
684       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
685       require         => $cinder_pool_requires,
686     }
687   }
688
689   if hiera('cinder_enable_netapp_backend', false) {
690     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
691
692     cinder_config {
693       "${cinder_netapp_backend}/host": value => 'hostgroup';
694     }
695
696     if hiera('cinder::backend::netapp::nfs_shares', undef) {
697       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
698     }
699
700     cinder::backend::netapp { $cinder_netapp_backend :
701       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
702       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
703       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
704       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
705       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
706       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
707       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
708       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
709       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
710       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
711       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
712       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
713       nfs_shares                   => $cinder_netapp_nfs_shares,
714       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
715       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
716       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
717       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
718       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
719       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
720       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
721     }
722   }
723
724   if hiera('cinder_enable_nfs_backend', false) {
725     $cinder_nfs_backend = 'tripleo_nfs'
726
727     if ($::selinux != "false") {
728       selboolean { 'virt_use_nfs':
729           value => on,
730           persistent => true,
731       } -> Package['nfs-utils']
732     }
733
734     package {'nfs-utils': } ->
735     cinder::backend::nfs { $cinder_nfs_backend:
736       nfs_servers         => hiera('cinder_nfs_servers'),
737       nfs_mount_options   => hiera('cinder_nfs_mount_options'),
738       nfs_shares_config   => '/etc/cinder/shares-nfs.conf',
739     }
740   }
741
742   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend])
743   class { '::cinder::backends' :
744     enabled_backends => $cinder_enabled_backends,
745   }
746
747   # swift proxy
748   class { '::swift::proxy' :
749     manage_service => $non_pcmk_start,
750     enabled => $non_pcmk_start,
751   }
752   include ::swift::proxy::proxy_logging
753   include ::swift::proxy::healthcheck
754   include ::swift::proxy::cache
755   include ::swift::proxy::keystone
756   include ::swift::proxy::authtoken
757   include ::swift::proxy::staticweb
758   include ::swift::proxy::ratelimit
759   include ::swift::proxy::catch_errors
760   include ::swift::proxy::tempurl
761   include ::swift::proxy::formpost
762
763   # swift storage
764   if str2bool(hiera('enable_swift_storage', 'true')) {
765     class {'::swift::storage::all':
766       mount_check => str2bool(hiera('swift_mount_check'))
767     }
768     class {'::swift::storage::account':
769       manage_service => $non_pcmk_start,
770       enabled => $non_pcmk_start,
771     }
772     class {'::swift::storage::container':
773       manage_service => $non_pcmk_start,
774       enabled => $non_pcmk_start,
775     }
776     class {'::swift::storage::object':
777       manage_service => $non_pcmk_start,
778       enabled => $non_pcmk_start,
779     }
780     if(!defined(File['/srv/node'])) {
781       file { '/srv/node':
782         ensure  => directory,
783         owner   => 'swift',
784         group   => 'swift',
785         require => Package['openstack-swift'],
786       }
787     }
788     $swift_components = ['account', 'container', 'object']
789     swift::storage::filter::recon { $swift_components : }
790     swift::storage::filter::healthcheck { $swift_components : }
791   }
792
793   # Ceilometer
794   $ceilometer_backend = downcase(hiera('ceilometer_backend'))
795   case $ceilometer_backend {
796     /mysql/ : {
797       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
798     }
799     default : {
800       $mongo_node_string = join($mongo_node_ips_with_port, ',')
801       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
802     }
803   }
804   include ::ceilometer
805   class { '::ceilometer::api' :
806     manage_service => false,
807     enabled => false,
808   }
809   class { '::ceilometer::agent::notification' :
810     manage_service => false,
811     enabled => false,
812   }
813   class { '::ceilometer::agent::central' :
814     manage_service => false,
815     enabled => false,
816   }
817   class { '::ceilometer::alarm::notifier' :
818     manage_service => false,
819     enabled => false,
820   }
821   class { '::ceilometer::alarm::evaluator' :
822     manage_service => false,
823     enabled => false,
824   }
825   class { '::ceilometer::collector' :
826     manage_service => false,
827     enabled => false,
828   }
829   include ::ceilometer::expirer
830   class { '::ceilometer::db' :
831     database_connection => $ceilometer_database_connection,
832     sync_db             => $sync_db,
833   }
834   include ceilometer::agent::auth
835
836   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
837
838   # Heat
839   class { '::heat' :
840     sync_db => $sync_db,
841   }
842   class { '::heat::api' :
843     manage_service => false,
844     enabled => false,
845   }
846   class { '::heat::api_cfn' :
847     manage_service => false,
848     enabled => false,
849   }
850   class { '::heat::api_cloudwatch' :
851     manage_service => false,
852     enabled => false,
853   }
854   class { '::heat::engine' :
855     manage_service => false,
856     enabled => false,
857   }
858
859   # httpd/apache and horizon
860   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
861   include ::apache
862   include ::apache::mod::status
863   $vhost_params = {
864     add_listen => false,
865     priority   => 10,
866   }
867   class { 'horizon':
868     cache_server_ip    => hiera('memcache_node_ips', '127.0.0.1'),
869     vhost_extra_params => $vhost_params,
870     server_aliases     => $::hostname,
871   }
872
873   $snmpd_user = hiera('snmpd_readonly_user_name')
874   snmp::snmpv3_user { $snmpd_user:
875     authtype => 'MD5',
876     authpass => hiera('snmpd_readonly_user_password'),
877   }
878   class { 'snmp':
879     agentaddress => ['udp:161','udp6:[::1]:161'],
880     snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
881   }
882
883   hiera_include('controller_classes')
884
885 } #END STEP 3
886
887 if hiera('step') >= 4 {
888   include ::keystone::cron::token_flush
889
890   if $pacemaker_master {
891
892     # Keystone
893     pacemaker::resource::service { $::keystone::params::service_name :
894       clone_params => "interleave=true",
895     }
896
897     pacemaker::constraint::base { 'haproxy-then-keystone-constraint':
898       constraint_type => 'order',
899       first_resource  => "haproxy-clone",
900       second_resource => "${::keystone::params::service_name}-clone",
901       first_action    => 'start',
902       second_action   => 'start',
903       require         => [Pacemaker::Resource::Service['haproxy'],
904                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
905     }
906     pacemaker::constraint::base { 'rabbitmq-then-keystone-constraint':
907       constraint_type => 'order',
908       first_resource  => "rabbitmq-clone",
909       second_resource => "${::keystone::params::service_name}-clone",
910       first_action    => 'start',
911       second_action   => 'start',
912       require         => [Pacemaker::Resource::Ocf['rabbitmq'],
913                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
914     }
915     pacemaker::constraint::base { 'memcached-then-keystone-constraint':
916       constraint_type => 'order',
917       first_resource  => "memcached-clone",
918       second_resource => "${::keystone::params::service_name}-clone",
919       first_action    => 'start',
920       second_action   => 'start',
921       require         => [Pacemaker::Resource::Service['memcached'],
922                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
923     }
924     pacemaker::constraint::base { 'galera-then-keystone-constraint':
925       constraint_type => 'order',
926       first_resource  => "galera-master",
927       second_resource => "${::keystone::params::service_name}-clone",
928       first_action    => 'promote',
929       second_action   => 'start',
930       require         => [Pacemaker::Resource::Ocf['galera'],
931                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
932     }
933
934     # Cinder
935     pacemaker::resource::service { $::cinder::params::api_service :
936       clone_params => "interleave=true",
937       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
938     }
939     pacemaker::resource::service { $::cinder::params::scheduler_service :
940       clone_params => "interleave=true",
941     }
942     pacemaker::resource::service { $::cinder::params::volume_service : }
943
944     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
945       constraint_type => 'order',
946       first_resource  => "${::keystone::params::service_name}-clone",
947       second_resource => "${::cinder::params::api_service}-clone",
948       first_action    => 'start',
949       second_action   => 'start',
950       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
951                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
952     }
953     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
954       constraint_type => "order",
955       first_resource => "${::cinder::params::api_service}-clone",
956       second_resource => "${::cinder::params::scheduler_service}-clone",
957       first_action => "start",
958       second_action => "start",
959       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
960                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
961     }
962     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
963       source => "${::cinder::params::scheduler_service}-clone",
964       target => "${::cinder::params::api_service}-clone",
965       score => "INFINITY",
966       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
967                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
968     }
969     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
970       constraint_type => "order",
971       first_resource => "${::cinder::params::scheduler_service}-clone",
972       second_resource => "${::cinder::params::volume_service}",
973       first_action => "start",
974       second_action => "start",
975       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
976                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
977     }
978     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
979       source => "${::cinder::params::volume_service}",
980       target => "${::cinder::params::scheduler_service}-clone",
981       score => "INFINITY",
982       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
983                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
984     }
985
986     # Glance
987     pacemaker::resource::service { $::glance::params::registry_service_name :
988       clone_params => "interleave=true",
989       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
990     }
991     pacemaker::resource::service { $::glance::params::api_service_name :
992       clone_params => "interleave=true",
993     }
994
995     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
996       constraint_type => 'order',
997       first_resource  => "${::keystone::params::service_name}-clone",
998       second_resource => "${::glance::params::registry_service_name}-clone",
999       first_action    => 'start',
1000       second_action   => 'start',
1001       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1002                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1003     }
1004     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
1005       constraint_type => "order",
1006       first_resource  => "${::glance::params::registry_service_name}-clone",
1007       second_resource => "${::glance::params::api_service_name}-clone",
1008       first_action    => "start",
1009       second_action   => "start",
1010       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1011                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1012     }
1013     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
1014       source  => "${::glance::params::api_service_name}-clone",
1015       target  => "${::glance::params::registry_service_name}-clone",
1016       score   => "INFINITY",
1017       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
1018                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
1019     }
1020
1021     # Neutron
1022     # NOTE(gfidente): Neutron will try to populate the database with some data
1023     # as soon as neutron-server is started; to avoid races we want to make this
1024     # happen only on one node, before normal Pacemaker initialization
1025     # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1026     exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
1027     pacemaker::resource::service { $::neutron::params::server_service:
1028       op_params => "start timeout=90",
1029       clone_params   => "interleave=true",
1030       require => Pacemaker::Resource::Service[$::keystone::params::service_name]
1031     }
1032     pacemaker::resource::service { $::neutron::params::l3_agent_service:
1033       clone_params   => "interleave=true",
1034     }
1035     pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1036       clone_params   => "interleave=true",
1037     }
1038     pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1039       clone_params => "interleave=true",
1040     }
1041     pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1042       clone_params => "interleave=true",
1043     }
1044     pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1045       ocf_agent_name => "neutron:OVSCleanup",
1046       clone_params => "interleave=true",
1047     }
1048     pacemaker::resource::ocf { 'neutron-netns-cleanup':
1049       ocf_agent_name => "neutron:NetnsCleanup",
1050       clone_params => "interleave=true",
1051     }
1052     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1053       constraint_type => "order",
1054       first_resource => "${::keystone::params::service_name}-clone",
1055       second_resource => "${::neutron::params::server_service}-clone",
1056       first_action => "start",
1057       second_action => "start",
1058       require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
1059                   Pacemaker::Resource::Service[$::neutron::params::server_service]],
1060     }
1061     pacemaker::constraint::base { 'neutron-server-to-neutron-ovs-cleanup-constraint':
1062       constraint_type => "order",
1063       first_resource => "${::neutron::params::server_service}-clone",
1064       second_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1065       first_action => "start",
1066       second_action => "start",
1067       require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1068                   Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
1069     }
1070     pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1071       constraint_type => "order",
1072       first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1073       second_resource => "neutron-netns-cleanup-clone",
1074       first_action => "start",
1075       second_action => "start",
1076       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
1077                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1078     }
1079     pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1080       source => "neutron-netns-cleanup-clone",
1081       target => "${::neutron::params::ovs_cleanup_service}-clone",
1082       score => "INFINITY",
1083       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
1084                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1085     }
1086     pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1087       constraint_type => "order",
1088       first_resource => "neutron-netns-cleanup-clone",
1089       second_resource => "${::neutron::params::ovs_agent_service}-clone",
1090       first_action => "start",
1091       second_action => "start",
1092       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
1093                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
1094     }
1095     pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1096       source => "${::neutron::params::ovs_agent_service}-clone",
1097       target => "neutron-netns-cleanup-clone",
1098       score => "INFINITY",
1099       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
1100                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
1101     }
1102     pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1103       constraint_type => "order",
1104       first_resource => "${::neutron::params::ovs_agent_service}-clone",
1105       second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1106       first_action => "start",
1107       second_action => "start",
1108       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
1109                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
1110
1111     }
1112     pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1113       source => "${::neutron::params::dhcp_agent_service}-clone",
1114       target => "${::neutron::params::ovs_agent_service}-clone",
1115       score => "INFINITY",
1116       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
1117                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
1118     }
1119     pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1120       constraint_type => "order",
1121       first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1122       second_resource => "${::neutron::params::l3_agent_service}-clone",
1123       first_action => "start",
1124       second_action => "start",
1125       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
1126                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
1127     }
1128     pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1129       source => "${::neutron::params::l3_agent_service}-clone",
1130       target => "${::neutron::params::dhcp_agent_service}-clone",
1131       score => "INFINITY",
1132       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
1133                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
1134     }
1135     pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1136       constraint_type => "order",
1137       first_resource => "${::neutron::params::l3_agent_service}-clone",
1138       second_resource => "${::neutron::params::metadata_agent_service}-clone",
1139       first_action => "start",
1140       second_action => "start",
1141       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
1142                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
1143     }
1144     pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1145       source => "${::neutron::params::metadata_agent_service}-clone",
1146       target => "${::neutron::params::l3_agent_service}-clone",
1147       score => "INFINITY",
1148       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
1149                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
1150     }
1151
1152     # Nova
1153     pacemaker::resource::service { $::nova::params::api_service_name :
1154       clone_params    => "interleave=true",
1155       op_params       => "start timeout=90s monitor start-delay=10s",
1156     }
1157     pacemaker::resource::service { $::nova::params::conductor_service_name :
1158       clone_params    => "interleave=true",
1159       op_params       => "start timeout=90s monitor start-delay=10s",
1160     }
1161     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1162       clone_params    => "interleave=true",
1163       op_params       => "start timeout=90s monitor start-delay=10s",
1164       require         => Pacemaker::Resource::Service[$::keystone::params::service_name],
1165     }
1166     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1167       clone_params    => "interleave=true",
1168       op_params       => "start timeout=90s monitor start-delay=10s",
1169     }
1170     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1171       clone_params    => "interleave=true",
1172       op_params       => "start timeout=90s monitor start-delay=10s",
1173     }
1174
1175     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1176       constraint_type => 'order',
1177       first_resource  => "${::keystone::params::service_name}-clone",
1178       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1179       first_action    => 'start',
1180       second_action   => 'start',
1181       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1182                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1183     }
1184     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1185       constraint_type => "order",
1186       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1187       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1188       first_action    => "start",
1189       second_action   => "start",
1190       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1191                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1192     }
1193     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1194       source => "${::nova::params::vncproxy_service_name}-clone",
1195       target => "${::nova::params::consoleauth_service_name}-clone",
1196       score => "INFINITY",
1197       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1198                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1199     }
1200     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1201       constraint_type => "order",
1202       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1203       second_resource => "${::nova::params::api_service_name}-clone",
1204       first_action    => "start",
1205       second_action   => "start",
1206       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1207                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1208     }
1209     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1210       source => "${::nova::params::api_service_name}-clone",
1211       target => "${::nova::params::vncproxy_service_name}-clone",
1212       score => "INFINITY",
1213       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1214                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1215     }
1216     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1217       constraint_type => "order",
1218       first_resource  => "${::nova::params::api_service_name}-clone",
1219       second_resource => "${::nova::params::scheduler_service_name}-clone",
1220       first_action    => "start",
1221       second_action   => "start",
1222       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1223                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1224     }
1225     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1226       source => "${::nova::params::scheduler_service_name}-clone",
1227       target => "${::nova::params::api_service_name}-clone",
1228       score => "INFINITY",
1229       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1230                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1231     }
1232     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1233       constraint_type => "order",
1234       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1235       second_resource => "${::nova::params::conductor_service_name}-clone",
1236       first_action    => "start",
1237       second_action   => "start",
1238       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1239                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1240     }
1241     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1242       source => "${::nova::params::conductor_service_name}-clone",
1243       target => "${::nova::params::scheduler_service_name}-clone",
1244       score => "INFINITY",
1245       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1246                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1247     }
1248
1249     # Ceilometer
1250     pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
1251       clone_params => 'interleave=true',
1252       require      => [Pacemaker::Resource::Service[$::keystone::params::service_name],
1253                        Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1254     }
1255     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1256       clone_params => 'interleave=true',
1257     }
1258     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1259       clone_params => 'interleave=true',
1260     }
1261     pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name :
1262       clone_params => 'interleave=true',
1263     }
1264     pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name :
1265       clone_params => 'interleave=true',
1266     }
1267     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1268       clone_params => 'interleave=true',
1269     }
1270     pacemaker::resource::ocf { 'delay' :
1271       ocf_agent_name  => 'heartbeat:Delay',
1272       clone_params    => 'interleave=true',
1273       resource_params => 'startdelay=10',
1274     }
1275     # Fedora doesn't know `require-all` parameter for constraints yet
1276     if $::operatingsystem == 'Fedora' {
1277       $redis_ceilometer_constraint_params = undef
1278     } else {
1279       $redis_ceilometer_constraint_params = 'require-all=false'
1280     }
1281     pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1282       constraint_type   => 'order',
1283       first_resource    => "redis-master",
1284       second_resource   => "${::ceilometer::params::agent_central_service_name}-clone",
1285       first_action      => 'promote',
1286       second_action     => 'start',
1287       constraint_params => $redis_ceilometer_constraint_params,
1288       require           => [Pacemaker::Resource::Ocf['redis'],
1289                             Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1290     }
1291     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1292       constraint_type => 'order',
1293       first_resource  => "${::keystone::params::service_name}-clone",
1294       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1295       first_action    => 'start',
1296       second_action   => 'start',
1297       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1298                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1299     }
1300     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1301       constraint_type => 'order',
1302       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1303       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1304       first_action    => 'start',
1305       second_action   => 'start',
1306       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1307                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1308     }
1309     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1310       constraint_type => 'order',
1311       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1312       second_resource => "${::ceilometer::params::api_service_name}-clone",
1313       first_action    => 'start',
1314       second_action   => 'start',
1315       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1316                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1317     }
1318     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1319       source  => "${::ceilometer::params::api_service_name}-clone",
1320       target  => "${::ceilometer::params::collector_service_name}-clone",
1321       score   => 'INFINITY',
1322       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1323                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1324     }
1325     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1326       constraint_type => 'order',
1327       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1328       second_resource => 'delay-clone',
1329       first_action    => 'start',
1330       second_action   => 'start',
1331       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1332                           Pacemaker::Resource::Ocf['delay']],
1333     }
1334     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1335       source  => 'delay-clone',
1336       target  => "${::ceilometer::params::api_service_name}-clone",
1337       score   => 'INFINITY',
1338       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1339                   Pacemaker::Resource::Ocf['delay']],
1340     }
1341     pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint':
1342       constraint_type => 'order',
1343       first_resource  => 'delay-clone',
1344       second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1345       first_action    => 'start',
1346       second_action   => 'start',
1347       require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1348                           Pacemaker::Resource::Ocf['delay']],
1349     }
1350     pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation':
1351       source  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1352       target  => 'delay-clone',
1353       score   => 'INFINITY',
1354       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1355                   Pacemaker::Resource::Ocf['delay']],
1356     }
1357     pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint':
1358       constraint_type => 'order',
1359       first_resource  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1360       second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1361       first_action    => 'start',
1362       second_action   => 'start',
1363       require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1364                           Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1365     }
1366     pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation':
1367       source  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1368       target  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1369       score   => 'INFINITY',
1370       require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1371                   Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1372     }
1373     pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint':
1374       constraint_type => 'order',
1375       first_resource  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1376       second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1377       first_action    => 'start',
1378       second_action   => 'start',
1379       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
1380                           Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1381     }
1382     pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation':
1383       source  => "${::ceilometer::params::agent_notification_service_name}-clone",
1384       target  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1385       score   => 'INFINITY',
1386       require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
1387                   Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1388     }
1389     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1390       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1391         constraint_type => 'order',
1392         first_resource  => "${::mongodb::params::service_name}-clone",
1393         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1394         first_action    => 'start',
1395         second_action   => 'start',
1396         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1397                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1398       }
1399     }
1400
1401     # Heat
1402     pacemaker::resource::service { $::heat::params::api_service_name :
1403       clone_params => 'interleave=true',
1404     }
1405     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1406       clone_params => 'interleave=true',
1407     }
1408     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1409       clone_params => 'interleave=true',
1410     }
1411     pacemaker::resource::service { $::heat::params::engine_service_name :
1412       clone_params => 'interleave=true',
1413     }
1414     pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1415       constraint_type => 'order',
1416       first_resource  => "${::keystone::params::service_name}-clone",
1417       second_resource => "${::heat::params::api_service_name}-clone",
1418       first_action    => 'start',
1419       second_action   => 'start',
1420       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1421                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1422     }
1423     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1424       constraint_type => 'order',
1425       first_resource  => "${::heat::params::api_service_name}-clone",
1426       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1427       first_action    => 'start',
1428       second_action   => 'start',
1429       require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1430                   Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1431     }
1432     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1433       source  => "${::heat::params::api_cfn_service_name}-clone",
1434       target  => "${::heat::params::api_service_name}-clone",
1435       score   => 'INFINITY',
1436       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1437                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1438     }
1439     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1440       constraint_type => 'order',
1441       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1442       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1443       first_action    => 'start',
1444       second_action   => 'start',
1445       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1446                   Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1447     }
1448     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1449       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1450       target  => "${::heat::params::api_cfn_service_name}-clone",
1451       score   => 'INFINITY',
1452       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1453                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1454     }
1455     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1456       constraint_type => 'order',
1457       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1458       second_resource => "${::heat::params::engine_service_name}-clone",
1459       first_action    => 'start',
1460       second_action   => 'start',
1461       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1462                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1463     }
1464     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1465       source  => "${::heat::params::engine_service_name}-clone",
1466       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1467       score   => 'INFINITY',
1468       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1469                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1470     }
1471     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1472       constraint_type => 'order',
1473       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1474       second_resource => "${::heat::params::api_service_name}-clone",
1475       first_action    => 'start',
1476       second_action   => 'start',
1477       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1478                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1479     }
1480
1481     # Horizon
1482     pacemaker::resource::service { $::horizon::params::http_service:
1483         clone_params => "interleave=true",
1484     }
1485
1486
1487   }
1488
1489 } #END STEP 4
1490
1491 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1492 package_manifest{$package_manifest_name: ensure => present}