24a2f734af8a54fa89dd1196f7ce026bca41bff3
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 include tripleo::packages
22
23 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
24   $pacemaker_master = true
25   $sync_db = true
26 } else {
27   $pacemaker_master = false
28   $sync_db = false
29 }
30
31 $enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5
32
33 # When to start and enable services which haven't been Pacemakerized
34 # FIXME: remove when we start all OpenStack services using Pacemaker
35 # (occurences of this variable will be gradually replaced with false)
36 $non_pcmk_start = hiera('step') >= 4
37
38 if hiera('step') >= 1 {
39
40   create_resources(sysctl::value, hiera('sysctl_settings'), {})
41
42   if count(hiera('ntp::servers')) > 0 {
43     include ::ntp
44   }
45
46   $controller_node_ips = split(hiera('controller_node_ips'), ',')
47   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
48   class { '::tripleo::loadbalancer' :
49     controller_hosts       => $controller_node_ips,
50     controller_hosts_names => $controller_node_names,
51     manage_vip             => false,
52     mysql_clustercheck     => true,
53     haproxy_service_manage => false,
54   }
55
56   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
57   user { 'hacluster':
58    ensure => present,
59   } ->
60   class { '::pacemaker':
61     hacluster_pwd => hiera('hacluster_pwd'),
62   } ->
63   class { '::pacemaker::corosync':
64     cluster_members => $pacemaker_cluster_members,
65     setup_cluster   => $pacemaker_master,
66   }
67   class { '::pacemaker::stonith':
68     disable => !$enable_fencing,
69   }
70   if $enable_fencing {
71     include tripleo::fencing
72
73     # enable stonith after all fencing devices have been created
74     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
75   }
76
77   # Only configure RabbitMQ in this step, don't start it yet to
78   # avoid races where non-master nodes attempt to start without
79   # config (eg. binding on 0.0.0.0)
80   # The module ignores erlang_cookie if cluster_config is false
81   class { '::rabbitmq':
82     service_manage          => false,
83     tcp_keepalive           => false,
84     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
85     config_variables        => hiera('rabbitmq_config_variables'),
86     environment_variables   => hiera('rabbitmq_environment'),
87   } ->
88   file { '/var/lib/rabbitmq/.erlang.cookie':
89     ensure  => 'present',
90     owner   => 'rabbitmq',
91     group   => 'rabbitmq',
92     mode    => '0400',
93     content => hiera('rabbitmq::erlang_cookie'),
94     replace => true,
95   }
96
97   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
98     include ::mongodb::globals
99     class { '::mongodb::server' :
100       service_manage => false,
101     }
102   }
103
104   # Memcached
105   class {'::memcached' :
106     service_manage => false,
107   }
108
109   # Redis
110   class { '::redis' :
111     service_manage => false,
112     notify_service => false,
113   }
114
115   # Galera
116   if str2bool(hiera('enable_galera', 'true')) {
117     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
118   } else {
119     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
120   }
121   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
122   $galera_nodes_count = count(split($galera_nodes, ','))
123
124   $mysqld_options = {
125     'mysqld' => {
126       'skip-name-resolve'             => '1',
127       'binlog_format'                 => 'ROW',
128       'default-storage-engine'        => 'innodb',
129       'innodb_autoinc_lock_mode'      => '2',
130       'innodb_locks_unsafe_for_binlog'=> '1',
131       'query_cache_size'              => '0',
132       'query_cache_type'              => '0',
133       'bind-address'                  => hiera('mysql_bind_host'),
134       'max_connections'               => hiera('mysql_max_connections'),
135       'open_files_limit'              => '-1',
136       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
137       'wsrep_cluster_name'            => 'galera_cluster',
138       'wsrep_slave_threads'           => '1',
139       'wsrep_certify_nonPK'           => '1',
140       'wsrep_max_ws_rows'             => '131072',
141       'wsrep_max_ws_size'             => '1073741824',
142       'wsrep_debug'                   => '0',
143       'wsrep_convert_LOCK_to_trx'     => '0',
144       'wsrep_retry_autocommit'        => '1',
145       'wsrep_auto_increment_control'  => '1',
146       'wsrep_drupal_282555_workaround'=> '0',
147       'wsrep_causal_reads'            => '0',
148       'wsrep_notify_cmd'              => '',
149       'wsrep_sst_method'              => 'rsync',
150     }
151   }
152
153   class { '::mysql::server':
154     create_root_user   => false,
155     create_root_my_cnf => false,
156     config_file        => $mysql_config_file,
157     override_options   => $mysqld_options,
158     service_manage     => false,
159     service_enabled    => false,
160   }
161
162 }
163
164 if hiera('step') >= 2 {
165
166   # NOTE(gfidente): the following vars are needed on all nodes so they
167   # need to stay out of pacemaker_master conditional
168   $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
169   $mongodb_replset = hiera('mongodb::server::replset')
170
171   if $pacemaker_master {
172
173     # FIXME: we should not have to access tripleo::loadbalancer class
174     # parameters here to configure pacemaker VIPs. The configuration
175     # of pacemaker VIPs could move into puppet-tripleo or we should
176     # make use of less specific hiera parameters here for the settings.
177     pacemaker::resource::service { 'haproxy':
178       clone_params => true,
179     }
180
181     $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
182     pacemaker::resource::ip { 'control_vip':
183       ip_address => $control_vip,
184     }
185     pacemaker::constraint::base { 'control_vip-then-haproxy':
186       constraint_type   => 'order',
187       first_resource    => "ip-${control_vip}",
188       second_resource   => 'haproxy-clone',
189       first_action      => 'start',
190       second_action     => 'start',
191       constraint_params => 'kind=Optional',
192       require => [Pacemaker::Resource::Service['haproxy'],
193                   Pacemaker::Resource::Ip['control_vip']],
194     }
195     pacemaker::constraint::colocation { 'control_vip-with-haproxy':
196       source  => "ip-${control_vip}",
197       target  => 'haproxy-clone',
198       score   => 'INFINITY',
199       require => [Pacemaker::Resource::Service['haproxy'],
200                   Pacemaker::Resource::Ip['control_vip']],
201     }
202
203     $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
204     if $public_vip and $public_vip != $control_vip {
205       pacemaker::resource::ip { 'public_vip':
206         ip_address => $public_vip,
207       }
208       pacemaker::constraint::base { 'public_vip-then-haproxy':
209         constraint_type   => 'order',
210         first_resource    => "ip-${public_vip}",
211         second_resource   => 'haproxy-clone',
212         first_action      => 'start',
213         second_action     => 'start',
214         constraint_params => 'kind=Optional',
215         require => [Pacemaker::Resource::Service['haproxy'],
216                     Pacemaker::Resource::Ip['public_vip']],
217       }
218       pacemaker::constraint::colocation { 'public_vip-with-haproxy':
219         source  => "ip-${public_vip}",
220         target  => 'haproxy-clone',
221         score   => 'INFINITY',
222         require => [Pacemaker::Resource::Service['haproxy'],
223                     Pacemaker::Resource::Ip['public_vip']],
224       }
225     }
226
227     $redis_vip = hiera('redis_vip')
228     if $redis_vip and $redis_vip != $control_vip {
229       pacemaker::resource::ip { 'redis_vip':
230         ip_address => $redis_vip,
231       }
232       pacemaker::constraint::base { 'redis_vip-then-haproxy':
233         constraint_type   => 'order',
234         first_resource    => "ip-${redis_vip}",
235         second_resource   => 'haproxy-clone',
236         first_action      => 'start',
237         second_action     => 'start',
238         constraint_params => 'kind=Optional',
239         require => [Pacemaker::Resource::Service['haproxy'],
240                     Pacemaker::Resource::Ip['redis_vip']],
241       }
242       pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
243         source  => "ip-${redis_vip}",
244         target  => 'haproxy-clone',
245         score   => 'INFINITY',
246         require => [Pacemaker::Resource::Service['haproxy'],
247                     Pacemaker::Resource::Ip['redis_vip']],
248       }
249     }
250
251     $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
252     if $internal_api_vip and $internal_api_vip != $control_vip {
253       pacemaker::resource::ip { 'internal_api_vip':
254         ip_address => $internal_api_vip,
255       }
256       pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
257         constraint_type   => 'order',
258         first_resource    => "ip-${internal_api_vip}",
259         second_resource   => 'haproxy-clone',
260         first_action      => 'start',
261         second_action     => 'start',
262         constraint_params => 'kind=Optional',
263         require => [Pacemaker::Resource::Service['haproxy'],
264                     Pacemaker::Resource::Ip['internal_api_vip']],
265       }
266       pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
267         source  => "ip-${internal_api_vip}",
268         target  => 'haproxy-clone',
269         score   => 'INFINITY',
270         require => [Pacemaker::Resource::Service['haproxy'],
271                     Pacemaker::Resource::Ip['internal_api_vip']],
272       }
273     }
274
275     $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
276     if $storage_vip and $storage_vip != $control_vip {
277       pacemaker::resource::ip { 'storage_vip':
278         ip_address => $storage_vip,
279       }
280       pacemaker::constraint::base { 'storage_vip-then-haproxy':
281         constraint_type   => 'order',
282         first_resource    => "ip-${storage_vip}",
283         second_resource   => 'haproxy-clone',
284         first_action      => 'start',
285         second_action     => 'start',
286         constraint_params => 'kind=Optional',
287         require => [Pacemaker::Resource::Service['haproxy'],
288                     Pacemaker::Resource::Ip['storage_vip']],
289       }
290       pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
291         source  => "ip-${storage_vip}",
292         target  => 'haproxy-clone',
293         score   => 'INFINITY',
294         require => [Pacemaker::Resource::Service['haproxy'],
295                     Pacemaker::Resource::Ip['storage_vip']],
296       }
297     }
298
299     $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
300     if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
301       pacemaker::resource::ip { 'storage_mgmt_vip':
302         ip_address => $storage_mgmt_vip,
303       }
304       pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
305         constraint_type   => 'order',
306         first_resource    => "ip-${storage_mgmt_vip}",
307         second_resource   => 'haproxy-clone',
308         first_action      => 'start',
309         second_action     => 'start',
310         constraint_params => 'kind=Optional',
311         require => [Pacemaker::Resource::Service['haproxy'],
312                     Pacemaker::Resource::Ip['storage_mgmt_vip']],
313       }
314       pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
315         source  => "ip-${storage_mgmt_vip}",
316         target  => 'haproxy-clone',
317         score   => 'INFINITY',
318         require => [Pacemaker::Resource::Service['haproxy'],
319                     Pacemaker::Resource::Ip['storage_mgmt_vip']],
320       }
321     }
322
323     pacemaker::resource::service { $::memcached::params::service_name :
324       clone_params => true,
325       require      => Class['::memcached'],
326     }
327
328     pacemaker::resource::ocf { 'rabbitmq':
329       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
330       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
331       clone_params    => 'ordered=true interleave=true',
332       require         => Class['::rabbitmq'],
333     }
334
335     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
336       pacemaker::resource::service { $::mongodb::params::service_name :
337         op_params    => 'start timeout=120s',
338         clone_params => true,
339         require      => Class['::mongodb::server'],
340       }
341       # NOTE (spredzy) : The replset can only be run
342       # once all the nodes have joined the cluster.
343       mongodb_conn_validator { $mongo_node_ips_with_port :
344         timeout => '600',
345         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
346         before  => Mongodb_replset[$mongodb_replset],
347       }
348       mongodb_replset { $mongodb_replset :
349         members => $mongo_node_ips_with_port,
350       }
351     }
352
353     pacemaker::resource::ocf { 'galera' :
354       ocf_agent_name  => 'heartbeat:galera',
355       op_params       => 'promote timeout=300s on-fail=block',
356       master_params   => '',
357       meta_params     => "master-max=${galera_nodes_count} ordered=true",
358       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
359       require         => Class['::mysql::server'],
360       before          => Exec['galera-ready'],
361     }
362
363     pacemaker::resource::ocf { 'redis':
364       ocf_agent_name  => 'heartbeat:redis',
365       master_params   => '',
366       meta_params     => 'notify=true ordered=true interleave=true',
367       resource_params => 'wait_last_known_master=true',
368       require         => Class['::redis'],
369     }
370
371   }
372
373   exec { 'galera-ready' :
374     command     => '/usr/bin/clustercheck >/dev/null',
375     timeout     => 30,
376     tries       => 180,
377     try_sleep   => 10,
378     environment => ["AVAILABLE_WHEN_READONLY=0"],
379     require     => File['/etc/sysconfig/clustercheck'],
380   }
381
382   file { '/etc/sysconfig/clustercheck' :
383     ensure  => file,
384     content => "MYSQL_USERNAME=root\n
385 MYSQL_PASSWORD=''\n
386 MYSQL_HOST=localhost\n",
387   }
388
389   xinetd::service { 'galera-monitor' :
390     port           => '9200',
391     server         => '/usr/bin/clustercheck',
392     per_source     => 'UNLIMITED',
393     log_on_success => '',
394     log_on_failure => 'HOST',
395     flags          => 'REUSE',
396     service_type   => 'UNLISTED',
397     user           => 'root',
398     group          => 'root',
399     require        => File['/etc/sysconfig/clustercheck'],
400   }
401
402   # Create all the database schemas
403   # Example DSN format: mysql://user:password@host/dbname
404   if $sync_db {
405     $allowed_hosts = ['%',hiera('mysql_bind_host')]
406     $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]')
407     class { 'keystone::db::mysql':
408       user          => $keystone_dsn[3],
409       password      => $keystone_dsn[4],
410       host          => $keystone_dsn[5],
411       dbname        => $keystone_dsn[6],
412       allowed_hosts => $allowed_hosts,
413       require       => Exec['galera-ready'],
414     }
415     $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]')
416     class { 'glance::db::mysql':
417       user          => $glance_dsn[3],
418       password      => $glance_dsn[4],
419       host          => $glance_dsn[5],
420       dbname        => $glance_dsn[6],
421       allowed_hosts => $allowed_hosts,
422       require       => Exec['galera-ready'],
423     }
424     $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]')
425     class { 'nova::db::mysql':
426       user          => $nova_dsn[3],
427       password      => $nova_dsn[4],
428       host          => $nova_dsn[5],
429       dbname        => $nova_dsn[6],
430       allowed_hosts => $allowed_hosts,
431       require       => Exec['galera-ready'],
432     }
433     $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]')
434     class { 'neutron::db::mysql':
435       user          => $neutron_dsn[3],
436       password      => $neutron_dsn[4],
437       host          => $neutron_dsn[5],
438       dbname        => $neutron_dsn[6],
439       allowed_hosts => $allowed_hosts,
440       require       => Exec['galera-ready'],
441     }
442     $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]')
443     class { 'cinder::db::mysql':
444       user          => $cinder_dsn[3],
445       password      => $cinder_dsn[4],
446       host          => $cinder_dsn[5],
447       dbname        => $cinder_dsn[6],
448       allowed_hosts => $allowed_hosts,
449       require       => Exec['galera-ready'],
450     }
451     $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]')
452     class { 'heat::db::mysql':
453       user          => $heat_dsn[3],
454       password      => $heat_dsn[4],
455       host          => $heat_dsn[5],
456       dbname        => $heat_dsn[6],
457       allowed_hosts => $allowed_hosts,
458       require       => Exec['galera-ready'],
459     }
460     if downcase(hiera('ceilometer_backend')) == 'mysql' {
461       $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]')
462       class { 'ceilometer::db::mysql':
463         user          => $ceilometer_dsn[3],
464         password      => $ceilometer_dsn[4],
465         host          => $ceilometer_dsn[5],
466         dbname        => $ceilometer_dsn[6],
467         allowed_hosts => $allowed_hosts,
468         require       => Exec['galera-ready'],
469       }
470     }
471   }
472
473   # pre-install swift here so we can build rings
474   include ::swift
475
476   # Ceph
477   $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false)
478   $enable_ceph = $cinder_enable_rbd_backend
479
480   if $enable_ceph {
481     class { 'ceph::profile::params':
482       mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
483     }
484     include ::ceph::profile::mon
485   }
486
487   if str2bool(hiera('enable_ceph_storage', 'false')) {
488     include ::ceph::profile::client
489     include ::ceph::profile::osd
490   }
491
492
493 } #END STEP 2
494
495 if hiera('step') >= 3 {
496
497   class { '::keystone':
498     sync_db => $sync_db,
499     manage_service => false,
500     enabled => false,
501   }
502
503   #TODO: need a cleanup-keystone-tokens.sh solution here
504   keystone_config {
505     'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
506   }
507   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
508     ensure  => 'directory',
509     owner   => 'keystone',
510     group   => 'keystone',
511     require => Package['keystone'],
512   }
513   file { '/etc/keystone/ssl/certs/signing_cert.pem':
514     content => hiera('keystone_signing_certificate'),
515     owner   => 'keystone',
516     group   => 'keystone',
517     notify  => Service['keystone'],
518     require => File['/etc/keystone/ssl/certs'],
519   }
520   file { '/etc/keystone/ssl/private/signing_key.pem':
521     content => hiera('keystone_signing_key'),
522     owner   => 'keystone',
523     group   => 'keystone',
524     notify  => Service['keystone'],
525     require => File['/etc/keystone/ssl/private'],
526   }
527   file { '/etc/keystone/ssl/certs/ca.pem':
528     content => hiera('keystone_ca_certificate'),
529     owner   => 'keystone',
530     group   => 'keystone',
531     notify  => Service['keystone'],
532     require => File['/etc/keystone/ssl/certs'],
533   }
534
535   $glance_backend = downcase(hiera('glance_backend', 'swift'))
536   case $glance_backend {
537       swift: { $backend_store = 'glance.store.swift.Store' }
538       file: { $backend_store = 'glance.store.filesystem.Store' }
539       rbd: { $backend_store = 'glance.store.rbd.Store' }
540       default: { fail('Unrecognized glance_backend parameter.') }
541   }
542   $http_store = ['glance.store.http.Store']
543   $glance_store = concat($http_store, $backend_store)
544
545   # TODO: notifications, scrubber, etc.
546   include ::glance
547   class { 'glance::api':
548     known_stores => $glance_store,
549     manage_service => false,
550     enabled => false,
551   }
552   class { '::glance::registry' :
553     sync_db => $sync_db,
554     manage_service => false,
555     enabled => false,
556   }
557   include join(['::glance::backend::', $glance_backend])
558
559   include ::nova
560
561   class { '::nova::api' :
562     sync_db => $sync_db,
563     manage_service => false,
564     enabled => false,
565   }
566   class { '::nova::cert' :
567     manage_service => false,
568     enabled => false,
569   }
570   class { '::nova::conductor' :
571     manage_service => false,
572     enabled => false,
573   }
574   class { '::nova::consoleauth' :
575     manage_service => false,
576     enabled => false,
577   }
578   class { '::nova::vncproxy' :
579     manage_service => false,
580     enabled => false,
581   }
582   class { '::nova::scheduler' :
583     manage_service => false,
584     enabled => false,
585   }
586   include ::nova::network::neutron
587
588   # Neutron class definitions
589   include ::neutron
590   class { '::neutron::server' :
591     sync_db => $sync_db,
592     manage_service => false,
593     enabled => false,
594   }
595   class { '::neutron::agents::dhcp' :
596     manage_service => false,
597     enabled => false,
598   }
599   class { '::neutron::agents::l3' :
600     manage_service => false,
601     enabled => false,
602   }
603   class { 'neutron::agents::metadata':
604     manage_service => false,
605     enabled => false,
606   }
607   file { '/etc/neutron/dnsmasq-neutron.conf':
608     content => hiera('neutron_dnsmasq_options'),
609     owner   => 'neutron',
610     group   => 'neutron',
611     notify  => Service['neutron-dhcp-service'],
612     require => Package['neutron'],
613   }
614   class { 'neutron::plugins::ml2':
615     flat_networks   => split(hiera('neutron_flat_networks'), ','),
616     tenant_network_types => [hiera('neutron_tenant_network_type')],
617   }
618   class { 'neutron::agents::ml2::ovs':
619     manage_service   => false,
620     enabled          => false,
621     bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
622     tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
623   }
624
625   include ::cinder
626   class { '::cinder::api':
627     sync_db => $sync_db,
628     manage_service => false,
629     enabled => false,
630   }
631   class { '::cinder::scheduler' :
632     manage_service => false,
633     enabled => false,
634   }
635   class { '::cinder::volume' :
636     manage_service => false,
637     enabled => false,
638   }
639   include ::cinder::glance
640   class {'cinder::setup_test_volume':
641     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
642   }
643
644   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
645   if $cinder_enable_iscsi {
646     $cinder_iscsi_backend = 'tripleo_iscsi'
647
648     cinder::backend::iscsi { $cinder_iscsi_backend :
649       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
650       iscsi_helper     => hiera('cinder_iscsi_helper'),
651     }
652   }
653
654   if $enable_ceph {
655
656     Ceph_pool {
657       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
658       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
659       size    => hiera('ceph::profile::params::osd_pool_default_size'),
660     }
661
662     $ceph_pools = hiera('ceph_pools')
663     ceph::pool { $ceph_pools : }
664   }
665
666   if $cinder_enable_rbd_backend {
667     $cinder_rbd_backend = 'tripleo_ceph'
668
669     cinder_config {
670       "${cinder_rbd_backend}/host": value => 'hostgroup';
671     }
672
673     cinder::backend::rbd { $cinder_rbd_backend :
674       rbd_pool        => 'volumes',
675       rbd_user        => 'openstack',
676       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
677       require         => Ceph::Pool['volumes'],
678     }
679   }
680
681   if hiera('cinder_enable_netapp_backend', false) {
682     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
683
684     cinder_config {
685       "${cinder_netapp_backend}/host": value => 'hostgroup';
686     }
687
688     if hiera('cinder::backend::netapp::nfs_shares', undef) {
689       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
690     }
691
692     cinder::backend::netapp { $cinder_netapp_backend :
693       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
694       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
695       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
696       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
697       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
698       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
699       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
700       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
701       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
702       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
703       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
704       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
705       nfs_shares                   => $cinder_netapp_nfs_shares,
706       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
707       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
708       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
709       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
710       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
711       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
712       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
713     }
714   }
715
716   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend])
717   class { '::cinder::backends' :
718     enabled_backends => $cinder_enabled_backends,
719   }
720
721   # swift proxy
722   class { '::swift::proxy' :
723     manage_service => $non_pcmk_start,
724     enabled => $non_pcmk_start,
725   }
726   include ::swift::proxy::proxy_logging
727   include ::swift::proxy::healthcheck
728   include ::swift::proxy::cache
729   include ::swift::proxy::keystone
730   include ::swift::proxy::authtoken
731   include ::swift::proxy::staticweb
732   include ::swift::proxy::ratelimit
733   include ::swift::proxy::catch_errors
734   include ::swift::proxy::tempurl
735   include ::swift::proxy::formpost
736
737   # swift storage
738   if str2bool(hiera('enable_swift_storage', 'true')) {
739     class {'::swift::storage::all':
740       mount_check => str2bool(hiera('swift_mount_check'))
741     }
742     class {'::swift::storage::account':
743       manage_service => $non_pcmk_start,
744       enabled => $non_pcmk_start,
745     }
746     class {'::swift::storage::container':
747       manage_service => $non_pcmk_start,
748       enabled => $non_pcmk_start,
749     }
750     class {'::swift::storage::object':
751       manage_service => $non_pcmk_start,
752       enabled => $non_pcmk_start,
753     }
754     if(!defined(File['/srv/node'])) {
755       file { '/srv/node':
756         ensure  => directory,
757         owner   => 'swift',
758         group   => 'swift',
759         require => Package['openstack-swift'],
760       }
761     }
762     $swift_components = ['account', 'container', 'object']
763     swift::storage::filter::recon { $swift_components : }
764     swift::storage::filter::healthcheck { $swift_components : }
765   }
766
767   # Ceilometer
768   $ceilometer_backend = downcase(hiera('ceilometer_backend'))
769   case $ceilometer_backend {
770     /mysql/ : {
771       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
772     }
773     default : {
774       $mongo_node_string = join($mongo_node_ips_with_port, ',')
775       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
776     }
777   }
778   include ::ceilometer
779   class { '::ceilometer::api' :
780     manage_service => false,
781     enabled => false,
782   }
783   class { '::ceilometer::agent::notification' :
784     manage_service => false,
785     enabled => false,
786   }
787   class { '::ceilometer::agent::central' :
788     manage_service => false,
789     enabled => false,
790   }
791   class { '::ceilometer::alarm::notifier' :
792     manage_service => false,
793     enabled => false,
794   }
795   class { '::ceilometer::alarm::evaluator' :
796     manage_service => false,
797     enabled => false,
798   }
799   class { '::ceilometer::collector' :
800     manage_service => false,
801     enabled => false,
802   }
803   include ::ceilometer::expirer
804   class { '::ceilometer::db' :
805     database_connection => $ceilometer_database_connection,
806     sync_db             => $sync_db,
807   }
808   include ceilometer::agent::auth
809
810   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
811
812   # Heat
813   class { '::heat' :
814     sync_db => $sync_db,
815   }
816   class { '::heat::api' :
817     manage_service => false,
818     enabled => false,
819   }
820   class { '::heat::api_cfn' :
821     manage_service => false,
822     enabled => false,
823   }
824   class { '::heat::api_cloudwatch' :
825     manage_service => false,
826     enabled => false,
827   }
828   class { '::heat::engine' :
829     manage_service => false,
830     enabled => false,
831   }
832
833   # httpd/apache and horizon
834   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
835   include ::apache
836   include ::apache::mod::status
837   $vhost_params = {
838     add_listen => false,
839     priority   => 10,
840   }
841   class { 'horizon':
842     cache_server_ip    => hiera('memcache_node_ips', '127.0.0.1'),
843     vhost_extra_params => $vhost_params,
844     server_aliases     => $::hostname,
845   }
846
847   $snmpd_user = hiera('snmpd_readonly_user_name')
848   snmp::snmpv3_user { $snmpd_user:
849     authtype => 'MD5',
850     authpass => hiera('snmpd_readonly_user_password'),
851   }
852   class { 'snmp':
853     agentaddress => ['udp:161','udp6:[::1]:161'],
854     snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
855   }
856
857   hiera_include('controller_classes')
858
859 } #END STEP 3
860
861 if hiera('step') >= 4 {
862   if $pacemaker_master {
863
864     # Keystone
865     pacemaker::resource::service { $::keystone::params::service_name :
866       clone_params => "interleave=true",
867     }
868
869     # Cinder
870     pacemaker::resource::service { $::cinder::params::api_service :
871       clone_params => "interleave=true",
872       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
873     }
874     pacemaker::resource::service { $::cinder::params::scheduler_service :
875       clone_params => "interleave=true",
876     }
877     pacemaker::resource::service { $::cinder::params::volume_service : }
878
879     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
880       constraint_type => 'order',
881       first_resource  => "${::keystone::params::service_name}-clone",
882       second_resource => "${::cinder::params::api_service}-clone",
883       first_action    => 'start',
884       second_action   => 'start',
885       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
886                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
887     }
888     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
889       constraint_type => "order",
890       first_resource => "${::cinder::params::api_service}-clone",
891       second_resource => "${::cinder::params::scheduler_service}-clone",
892       first_action => "start",
893       second_action => "start",
894       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
895                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
896     }
897     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
898       source => "${::cinder::params::scheduler_service}-clone",
899       target => "${::cinder::params::api_service}-clone",
900       score => "INFINITY",
901       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
902                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
903     }
904     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
905       constraint_type => "order",
906       first_resource => "${::cinder::params::scheduler_service}-clone",
907       second_resource => "${::cinder::params::volume_service}",
908       first_action => "start",
909       second_action => "start",
910       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
911                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
912     }
913     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
914       source => "${::cinder::params::volume_service}",
915       target => "${::cinder::params::scheduler_service}-clone",
916       score => "INFINITY",
917       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
918                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
919     }
920
921     # Glance
922     pacemaker::resource::service { $::glance::params::registry_service_name :
923       clone_params => "interleave=true",
924       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
925     }
926     pacemaker::resource::service { $::glance::params::api_service_name :
927       clone_params => "interleave=true",
928     }
929
930     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
931       constraint_type => 'order',
932       first_resource  => "${::keystone::params::service_name}-clone",
933       second_resource => "${::glance::params::registry_service_name}-clone",
934       first_action    => 'start',
935       second_action   => 'start',
936       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
937                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
938     }
939     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
940       constraint_type => "order",
941       first_resource  => "${::glance::params::registry_service_name}-clone",
942       second_resource => "${::glance::params::api_service_name}-clone",
943       first_action    => "start",
944       second_action   => "start",
945       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
946                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
947     }
948     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
949       source  => "${::glance::params::api_service_name}-clone",
950       target  => "${::glance::params::registry_service_name}-clone",
951       score   => "INFINITY",
952       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
953                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
954     }
955
956     # Neutron
957     # NOTE(gfidente): Neutron will try to populate the database with some data
958     # as soon as neutron-server is started; to avoid races we want to make this
959     # happen only on one node, before normal Pacemaker initialization
960     # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
961     exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
962     pacemaker::resource::service { $::neutron::params::server_service:
963       op_params => "start timeout=90",
964       clone_params   => "interleave=true",
965       require => Pacemaker::Resource::Service[$::keystone::params::service_name]
966     }
967     pacemaker::resource::service { $::neutron::params::l3_agent_service:
968       clone_params   => "interleave=true",
969     }
970     pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
971       clone_params   => "interleave=true",
972     }
973     pacemaker::resource::service { $::neutron::params::ovs_agent_service:
974       clone_params => "interleave=true",
975     }
976     pacemaker::resource::service { $::neutron::params::metadata_agent_service:
977       clone_params => "interleave=true",
978     }
979     pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
980       ocf_agent_name => "neutron:OVSCleanup",
981       clone_params => "interleave=true",
982     }
983     pacemaker::resource::ocf { 'neutron-netns-cleanup':
984       ocf_agent_name => "neutron:NetnsCleanup",
985       clone_params => "interleave=true",
986     }
987     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
988       constraint_type => "order",
989       first_resource => "${::keystone::params::service_name}-clone",
990       second_resource => "${::neutron::params::server_service}-clone",
991       first_action => "start",
992       second_action => "start",
993       require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
994                   Pacemaker::Resource::Service[$::neutron::params::server_service]],
995     }
996     pacemaker::constraint::base { 'neutron-server-to-neutron-ovs-cleanup-constraint':
997       constraint_type => "order",
998       first_resource => "${::neutron::params::server_service}-clone",
999       second_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1000       first_action => "start",
1001       second_action => "start",
1002       require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1003                   Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
1004     }
1005     pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1006       constraint_type => "order",
1007       first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1008       second_resource => "neutron-netns-cleanup-clone",
1009       first_action => "start",
1010       second_action => "start",
1011       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
1012                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1013     }
1014     pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1015       source => "neutron-netns-cleanup-clone",
1016       target => "${::neutron::params::ovs_cleanup_service}-clone",
1017       score => "INFINITY",
1018       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
1019                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1020     }
1021     pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1022       constraint_type => "order",
1023       first_resource => "neutron-netns-cleanup-clone",
1024       second_resource => "${::neutron::params::ovs_agent_service}-clone",
1025       first_action => "start",
1026       second_action => "start",
1027       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
1028                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
1029     }
1030     pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1031       source => "${::neutron::params::ovs_agent_service}-clone",
1032       target => "neutron-netns-cleanup-clone",
1033       score => "INFINITY",
1034       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
1035                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
1036     }
1037     pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1038       constraint_type => "order",
1039       first_resource => "${::neutron::params::ovs_agent_service}-clone",
1040       second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1041       first_action => "start",
1042       second_action => "start",
1043       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
1044                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
1045
1046     }
1047     pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1048       source => "${::neutron::params::dhcp_agent_service}-clone",
1049       target => "${::neutron::params::ovs_agent_service}-clone",
1050       score => "INFINITY",
1051       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
1052                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
1053     }
1054     pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1055       constraint_type => "order",
1056       first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1057       second_resource => "${::neutron::params::l3_agent_service}-clone",
1058       first_action => "start",
1059       second_action => "start",
1060       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
1061                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
1062     }
1063     pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1064       source => "${::neutron::params::l3_agent_service}-clone",
1065       target => "${::neutron::params::dhcp_agent_service}-clone",
1066       score => "INFINITY",
1067       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
1068                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
1069     }
1070     pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1071       constraint_type => "order",
1072       first_resource => "${::neutron::params::l3_agent_service}-clone",
1073       second_resource => "${::neutron::params::metadata_agent_service}-clone",
1074       first_action => "start",
1075       second_action => "start",
1076       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
1077                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
1078     }
1079     pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1080       source => "${::neutron::params::metadata_agent_service}-clone",
1081       target => "${::neutron::params::l3_agent_service}-clone",
1082       score => "INFINITY",
1083       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
1084                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
1085     }
1086
1087     # Nova
1088     pacemaker::resource::service { $::nova::params::api_service_name :
1089       clone_params    => "interleave=true",
1090       op_params       => "monitor start-delay=10s",
1091     }
1092     pacemaker::resource::service { $::nova::params::conductor_service_name :
1093       clone_params    => "interleave=true",
1094       op_params       => "monitor start-delay=10s",
1095     }
1096     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1097       clone_params    => "interleave=true",
1098       op_params       => "monitor start-delay=10s",
1099       require         => Pacemaker::Resource::Service[$::keystone::params::service_name],
1100     }
1101     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1102       clone_params    => "interleave=true",
1103       op_params       => "monitor start-delay=10s",
1104     }
1105     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1106       clone_params    => "interleave=true",
1107       op_params       => "monitor start-delay=10s",
1108     }
1109
1110     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1111       constraint_type => 'order',
1112       first_resource  => "${::keystone::params::service_name}-clone",
1113       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1114       first_action    => 'start',
1115       second_action   => 'start',
1116       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1117                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1118     }
1119     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1120       constraint_type => "order",
1121       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1122       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1123       first_action    => "start",
1124       second_action   => "start",
1125       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1126                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1127     }
1128     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1129       source => "${::nova::params::vncproxy_service_name}-clone",
1130       target => "${::nova::params::consoleauth_service_name}-clone",
1131       score => "INFINITY",
1132       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1133                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1134     }
1135     # FIXME(gfidente): novncproxy will not start unless websockify is updated to 0.6
1136     # which is not the case for f20 nor f21; ucomment when it becomes available
1137     #pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1138     #  constraint_type => "order",
1139     #  first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1140     #  second_resource => "${::nova::params::api_service_name}-clone",
1141     #  first_action    => "start",
1142     #  second_action   => "start",
1143     #  require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1144     #              Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1145     #}
1146     #pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1147     #  source => "${::nova::params::api_service_name}-clone",
1148     #  target => "${::nova::params::vncproxy_service_name}-clone",
1149     #  score => "INFINITY",
1150     #  require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1151     #              Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1152     #}
1153     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1154       constraint_type => "order",
1155       first_resource  => "${::nova::params::api_service_name}-clone",
1156       second_resource => "${::nova::params::scheduler_service_name}-clone",
1157       first_action    => "start",
1158       second_action   => "start",
1159       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1160                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1161     }
1162     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1163       source => "${::nova::params::scheduler_service_name}-clone",
1164       target => "${::nova::params::api_service_name}-clone",
1165       score => "INFINITY",
1166       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1167                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1168     }
1169     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1170       constraint_type => "order",
1171       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1172       second_resource => "${::nova::params::conductor_service_name}-clone",
1173       first_action    => "start",
1174       second_action   => "start",
1175       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1176                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1177     }
1178     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1179       source => "${::nova::params::conductor_service_name}-clone",
1180       target => "${::nova::params::scheduler_service_name}-clone",
1181       score => "INFINITY",
1182       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1183                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1184     }
1185
1186     # Ceilometer
1187     pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
1188       clone_params => 'interleave=true',
1189       require      => [Pacemaker::Resource::Service[$::keystone::params::service_name],
1190                        Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1191     }
1192     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1193       clone_params => 'interleave=true',
1194     }
1195     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1196       clone_params => 'interleave=true',
1197     }
1198     pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name :
1199       clone_params => 'interleave=true',
1200     }
1201     pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name :
1202       clone_params => 'interleave=true',
1203     }
1204     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1205       clone_params => 'interleave=true',
1206     }
1207     pacemaker::resource::ocf { 'delay' :
1208       ocf_agent_name  => 'heartbeat:Delay',
1209       clone_params    => 'interleave=true',
1210       resource_params => 'startdelay=10',
1211     }
1212     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1213       constraint_type => 'order',
1214       first_resource  => "${::keystone::params::service_name}-clone",
1215       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1216       first_action    => 'start',
1217       second_action   => 'start',
1218       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1219                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1220     }
1221     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1222       constraint_type => 'order',
1223       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1224       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1225       first_action    => 'start',
1226       second_action   => 'start',
1227       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1228                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1229     }
1230     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1231       constraint_type => 'order',
1232       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1233       second_resource => "${::ceilometer::params::api_service_name}-clone",
1234       first_action    => 'start',
1235       second_action   => 'start',
1236       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1237                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1238     }
1239     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1240       source  => "${::ceilometer::params::api_service_name}-clone",
1241       target  => "${::ceilometer::params::collector_service_name}-clone",
1242       score   => 'INFINITY',
1243       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1244                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1245     }
1246     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1247       constraint_type => 'order',
1248       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1249       second_resource => 'delay-clone',
1250       first_action    => 'start',
1251       second_action   => 'start',
1252       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1253                           Pacemaker::Resource::Ocf['delay']],
1254     }
1255     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1256       source  => 'delay-clone',
1257       target  => "${::ceilometer::params::api_service_name}-clone",
1258       score   => 'INFINITY',
1259       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1260                   Pacemaker::Resource::Ocf['delay']],
1261     }
1262     pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint':
1263       constraint_type => 'order',
1264       first_resource  => 'delay-clone',
1265       second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1266       first_action    => 'start',
1267       second_action   => 'start',
1268       require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1269                           Pacemaker::Resource::Ocf['delay']],
1270     }
1271     pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation':
1272       source  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1273       target  => 'delay-clone',
1274       score   => 'INFINITY',
1275       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1276                   Pacemaker::Resource::Ocf['delay']],
1277     }
1278     pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint':
1279       constraint_type => 'order',
1280       first_resource  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1281       second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1282       first_action    => 'start',
1283       second_action   => 'start',
1284       require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1285                           Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1286     }
1287     pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation':
1288       source  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1289       target  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1290       score   => 'INFINITY',
1291       require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1292                   Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1293     }
1294     pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint':
1295       constraint_type => 'order',
1296       first_resource  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1297       second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1298       first_action    => 'start',
1299       second_action   => 'start',
1300       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
1301                           Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1302     }
1303     pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation':
1304       source  => "${::ceilometer::params::agent_notification_service_name}-clone",
1305       target  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1306       score   => 'INFINITY',
1307       require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
1308                   Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1309     }
1310     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1311       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1312         constraint_type => 'order',
1313         first_resource  => "${::mongodb::params::service_name}-clone",
1314         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1315         first_action    => 'start',
1316         second_action   => 'start',
1317         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1318                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1319       }
1320     }
1321
1322     # Heat
1323     pacemaker::resource::service { $::heat::params::api_service_name :
1324       clone_params => 'interleave=true',
1325     }
1326     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1327       clone_params => 'interleave=true',
1328     }
1329     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1330       clone_params => 'interleave=true',
1331     }
1332     pacemaker::resource::service { $::heat::params::engine_service_name :
1333       clone_params => 'interleave=true',
1334     }
1335     pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1336       constraint_type => 'order',
1337       first_resource  => "${::keystone::params::service_name}-clone",
1338       second_resource => "${::heat::params::api_service_name}-clone",
1339       first_action    => 'start',
1340       second_action   => 'start',
1341       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1342                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1343     }
1344     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1345       constraint_type => 'order',
1346       first_resource  => "${::heat::params::api_service_name}-clone",
1347       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1348       first_action    => 'start',
1349       second_action   => 'start',
1350       require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1351                   Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1352     }
1353     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1354       source  => "${::heat::params::api_cfn_service_name}-clone",
1355       target  => "${::heat::params::api_service_name}-clone",
1356       score   => 'INFINITY',
1357       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1358                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1359     }
1360     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1361       constraint_type => 'order',
1362       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1363       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1364       first_action    => 'start',
1365       second_action   => 'start',
1366       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1367                   Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1368     }
1369     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1370       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1371       target  => "${::heat::params::api_cfn_service_name}-clone",
1372       score   => 'INFINITY',
1373       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1374                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1375     }
1376     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1377       constraint_type => 'order',
1378       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1379       second_resource => "${::heat::params::engine_service_name}-clone",
1380       first_action    => 'start',
1381       second_action   => 'start',
1382       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1383                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1384     }
1385     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1386       source  => "${::heat::params::engine_service_name}-clone",
1387       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1388       score   => 'INFINITY',
1389       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1390                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1391     }
1392     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1393       constraint_type => 'order',
1394       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1395       second_resource => "${::heat::params::api_service_name}-clone",
1396       first_action    => 'start',
1397       second_action   => 'start',
1398       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1399                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1400     }
1401
1402     # Horizon
1403     pacemaker::resource::service { $::horizon::params::http_service:
1404         clone_params => "interleave=true",
1405     }
1406
1407
1408   }
1409
1410 } #END STEP 4