8303245f9c77a9abdc1d519714196d2f712d0f2f
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 include tripleo::packages
22
23 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
24   $pacemaker_master = true
25   $sync_db = true
26 } else {
27   $pacemaker_master = false
28   $sync_db = false
29 }
30
31 $enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5
32
33 # When to start and enable services which haven't been Pacemakerized
34 # FIXME: remove when we start all OpenStack services using Pacemaker
35 # (occurences of this variable will be gradually replaced with false)
36 $non_pcmk_start = hiera('step') >= 4
37
38 if hiera('step') >= 1 {
39
40   create_resources(sysctl::value, hiera('sysctl_settings'), {})
41
42   if count(hiera('ntp::servers')) > 0 {
43     include ::ntp
44   }
45
46   $controller_node_ips = split(hiera('controller_node_ips'), ',')
47   $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
48   class { '::tripleo::loadbalancer' :
49     controller_hosts       => $controller_node_ips,
50     controller_hosts_names => $controller_node_names,
51     manage_vip             => false,
52     mysql_clustercheck     => true,
53     haproxy_service_manage => false,
54   }
55
56   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
57   user { 'hacluster':
58    ensure => present,
59   } ->
60   class { '::pacemaker':
61     hacluster_pwd => hiera('hacluster_pwd'),
62   } ->
63   class { '::pacemaker::corosync':
64     cluster_members => $pacemaker_cluster_members,
65     setup_cluster   => $pacemaker_master,
66   }
67   class { '::pacemaker::stonith':
68     disable => !$enable_fencing,
69   }
70   if $enable_fencing {
71     include tripleo::fencing
72
73     # enable stonith after all fencing devices have been created
74     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
75   }
76
77   # FIXME(gfidente): sets 90secs as default start timeout op
78   # param; until we can use pcmk global defaults we'll still
79   # need to add it to every resource which redefines op params
80   Pacemaker::Resource::Service {
81     op_params => 'start timeout=90s',
82   }
83
84   # Only configure RabbitMQ in this step, don't start it yet to
85   # avoid races where non-master nodes attempt to start without
86   # config (eg. binding on 0.0.0.0)
87   # The module ignores erlang_cookie if cluster_config is false
88   class { '::rabbitmq':
89     service_manage          => false,
90     tcp_keepalive           => false,
91     config_kernel_variables => hiera('rabbitmq_kernel_variables'),
92     config_variables        => hiera('rabbitmq_config_variables'),
93     environment_variables   => hiera('rabbitmq_environment'),
94   } ->
95   file { '/var/lib/rabbitmq/.erlang.cookie':
96     ensure  => 'present',
97     owner   => 'rabbitmq',
98     group   => 'rabbitmq',
99     mode    => '0400',
100     content => hiera('rabbitmq::erlang_cookie'),
101     replace => true,
102   }
103
104   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
105     include ::mongodb::globals
106     class { '::mongodb::server' :
107       service_manage => false,
108     }
109   }
110
111   # Memcached
112   class {'::memcached' :
113     service_manage => false,
114   }
115
116   # Redis
117   class { '::redis' :
118     service_manage => false,
119     notify_service => false,
120   }
121
122   # Galera
123   if str2bool(hiera('enable_galera', 'true')) {
124     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
125   } else {
126     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
127   }
128   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
129   $galera_nodes_count = count(split($galera_nodes, ','))
130
131   $mysqld_options = {
132     'mysqld' => {
133       'skip-name-resolve'             => '1',
134       'binlog_format'                 => 'ROW',
135       'default-storage-engine'        => 'innodb',
136       'innodb_autoinc_lock_mode'      => '2',
137       'innodb_locks_unsafe_for_binlog'=> '1',
138       'query_cache_size'              => '0',
139       'query_cache_type'              => '0',
140       'bind-address'                  => hiera('mysql_bind_host'),
141       'max_connections'               => hiera('mysql_max_connections'),
142       'open_files_limit'              => '-1',
143       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
144       'wsrep_cluster_name'            => 'galera_cluster',
145       'wsrep_slave_threads'           => '1',
146       'wsrep_certify_nonPK'           => '1',
147       'wsrep_max_ws_rows'             => '131072',
148       'wsrep_max_ws_size'             => '1073741824',
149       'wsrep_debug'                   => '0',
150       'wsrep_convert_LOCK_to_trx'     => '0',
151       'wsrep_retry_autocommit'        => '1',
152       'wsrep_auto_increment_control'  => '1',
153       'wsrep_drupal_282555_workaround'=> '0',
154       'wsrep_causal_reads'            => '0',
155       'wsrep_notify_cmd'              => '',
156       'wsrep_sst_method'              => 'rsync',
157     }
158   }
159
160   class { '::mysql::server':
161     create_root_user   => false,
162     create_root_my_cnf => false,
163     config_file        => $mysql_config_file,
164     override_options   => $mysqld_options,
165     service_manage     => false,
166     service_enabled    => false,
167   }
168
169 }
170
171 if hiera('step') >= 2 {
172
173   # NOTE(gfidente): the following vars are needed on all nodes so they
174   # need to stay out of pacemaker_master conditional
175   $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
176   $mongodb_replset = hiera('mongodb::server::replset')
177
178   if $pacemaker_master {
179
180     # FIXME: we should not have to access tripleo::loadbalancer class
181     # parameters here to configure pacemaker VIPs. The configuration
182     # of pacemaker VIPs could move into puppet-tripleo or we should
183     # make use of less specific hiera parameters here for the settings.
184     pacemaker::resource::service { 'haproxy':
185       clone_params => true,
186     }
187
188     $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
189     pacemaker::resource::ip { 'control_vip':
190       ip_address => $control_vip,
191     }
192     pacemaker::constraint::base { 'control_vip-then-haproxy':
193       constraint_type   => 'order',
194       first_resource    => "ip-${control_vip}",
195       second_resource   => 'haproxy-clone',
196       first_action      => 'start',
197       second_action     => 'start',
198       constraint_params => 'kind=Optional',
199       require => [Pacemaker::Resource::Service['haproxy'],
200                   Pacemaker::Resource::Ip['control_vip']],
201     }
202     pacemaker::constraint::colocation { 'control_vip-with-haproxy':
203       source  => "ip-${control_vip}",
204       target  => 'haproxy-clone',
205       score   => 'INFINITY',
206       require => [Pacemaker::Resource::Service['haproxy'],
207                   Pacemaker::Resource::Ip['control_vip']],
208     }
209
210     $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
211     if $public_vip and $public_vip != $control_vip {
212       pacemaker::resource::ip { 'public_vip':
213         ip_address => $public_vip,
214       }
215       pacemaker::constraint::base { 'public_vip-then-haproxy':
216         constraint_type   => 'order',
217         first_resource    => "ip-${public_vip}",
218         second_resource   => 'haproxy-clone',
219         first_action      => 'start',
220         second_action     => 'start',
221         constraint_params => 'kind=Optional',
222         require => [Pacemaker::Resource::Service['haproxy'],
223                     Pacemaker::Resource::Ip['public_vip']],
224       }
225       pacemaker::constraint::colocation { 'public_vip-with-haproxy':
226         source  => "ip-${public_vip}",
227         target  => 'haproxy-clone',
228         score   => 'INFINITY',
229         require => [Pacemaker::Resource::Service['haproxy'],
230                     Pacemaker::Resource::Ip['public_vip']],
231       }
232     }
233
234     $redis_vip = hiera('redis_vip')
235     if $redis_vip and $redis_vip != $control_vip {
236       pacemaker::resource::ip { 'redis_vip':
237         ip_address => $redis_vip,
238       }
239       pacemaker::constraint::base { 'redis_vip-then-haproxy':
240         constraint_type   => 'order',
241         first_resource    => "ip-${redis_vip}",
242         second_resource   => 'haproxy-clone',
243         first_action      => 'start',
244         second_action     => 'start',
245         constraint_params => 'kind=Optional',
246         require => [Pacemaker::Resource::Service['haproxy'],
247                     Pacemaker::Resource::Ip['redis_vip']],
248       }
249       pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
250         source  => "ip-${redis_vip}",
251         target  => 'haproxy-clone',
252         score   => 'INFINITY',
253         require => [Pacemaker::Resource::Service['haproxy'],
254                     Pacemaker::Resource::Ip['redis_vip']],
255       }
256     }
257
258     $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
259     if $internal_api_vip and $internal_api_vip != $control_vip {
260       pacemaker::resource::ip { 'internal_api_vip':
261         ip_address => $internal_api_vip,
262       }
263       pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
264         constraint_type   => 'order',
265         first_resource    => "ip-${internal_api_vip}",
266         second_resource   => 'haproxy-clone',
267         first_action      => 'start',
268         second_action     => 'start',
269         constraint_params => 'kind=Optional',
270         require => [Pacemaker::Resource::Service['haproxy'],
271                     Pacemaker::Resource::Ip['internal_api_vip']],
272       }
273       pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
274         source  => "ip-${internal_api_vip}",
275         target  => 'haproxy-clone',
276         score   => 'INFINITY',
277         require => [Pacemaker::Resource::Service['haproxy'],
278                     Pacemaker::Resource::Ip['internal_api_vip']],
279       }
280     }
281
282     $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
283     if $storage_vip and $storage_vip != $control_vip {
284       pacemaker::resource::ip { 'storage_vip':
285         ip_address => $storage_vip,
286       }
287       pacemaker::constraint::base { 'storage_vip-then-haproxy':
288         constraint_type   => 'order',
289         first_resource    => "ip-${storage_vip}",
290         second_resource   => 'haproxy-clone',
291         first_action      => 'start',
292         second_action     => 'start',
293         constraint_params => 'kind=Optional',
294         require => [Pacemaker::Resource::Service['haproxy'],
295                     Pacemaker::Resource::Ip['storage_vip']],
296       }
297       pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
298         source  => "ip-${storage_vip}",
299         target  => 'haproxy-clone',
300         score   => 'INFINITY',
301         require => [Pacemaker::Resource::Service['haproxy'],
302                     Pacemaker::Resource::Ip['storage_vip']],
303       }
304     }
305
306     $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
307     if $storage_mgmt_vip and $storage_mgmt_vip != $control_vip {
308       pacemaker::resource::ip { 'storage_mgmt_vip':
309         ip_address => $storage_mgmt_vip,
310       }
311       pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
312         constraint_type   => 'order',
313         first_resource    => "ip-${storage_mgmt_vip}",
314         second_resource   => 'haproxy-clone',
315         first_action      => 'start',
316         second_action     => 'start',
317         constraint_params => 'kind=Optional',
318         require => [Pacemaker::Resource::Service['haproxy'],
319                     Pacemaker::Resource::Ip['storage_mgmt_vip']],
320       }
321       pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
322         source  => "ip-${storage_mgmt_vip}",
323         target  => 'haproxy-clone',
324         score   => 'INFINITY',
325         require => [Pacemaker::Resource::Service['haproxy'],
326                     Pacemaker::Resource::Ip['storage_mgmt_vip']],
327       }
328     }
329
330     pacemaker::resource::service { $::memcached::params::service_name :
331       clone_params => true,
332       require      => Class['::memcached'],
333     }
334
335     pacemaker::resource::ocf { 'rabbitmq':
336       ocf_agent_name  => 'heartbeat:rabbitmq-cluster',
337       resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
338       clone_params    => 'ordered=true interleave=true',
339       require         => Class['::rabbitmq'],
340     }
341
342     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
343       pacemaker::resource::service { $::mongodb::params::service_name :
344         op_params    => 'start timeout=120s',
345         clone_params => true,
346         require      => Class['::mongodb::server'],
347       }
348       # NOTE (spredzy) : The replset can only be run
349       # once all the nodes have joined the cluster.
350       mongodb_conn_validator { $mongo_node_ips_with_port :
351         timeout => '600',
352         require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
353         before  => Mongodb_replset[$mongodb_replset],
354       }
355       mongodb_replset { $mongodb_replset :
356         members => $mongo_node_ips_with_port,
357       }
358     }
359
360     pacemaker::resource::ocf { 'galera' :
361       ocf_agent_name  => 'heartbeat:galera',
362       op_params       => 'promote timeout=300s on-fail=block',
363       master_params   => '',
364       meta_params     => "master-max=${galera_nodes_count} ordered=true",
365       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
366       require         => Class['::mysql::server'],
367       before          => Exec['galera-ready'],
368     }
369
370     pacemaker::resource::ocf { 'redis':
371       ocf_agent_name  => 'heartbeat:redis',
372       master_params   => '',
373       meta_params     => 'notify=true ordered=true interleave=true',
374       resource_params => 'wait_last_known_master=true',
375       require         => Class['::redis'],
376     }
377
378   }
379
380   exec { 'galera-ready' :
381     command     => '/usr/bin/clustercheck >/dev/null',
382     timeout     => 30,
383     tries       => 180,
384     try_sleep   => 10,
385     environment => ["AVAILABLE_WHEN_READONLY=0"],
386     require     => File['/etc/sysconfig/clustercheck'],
387   }
388
389   file { '/etc/sysconfig/clustercheck' :
390     ensure  => file,
391     content => "MYSQL_USERNAME=root\n
392 MYSQL_PASSWORD=''\n
393 MYSQL_HOST=localhost\n",
394   }
395
396   xinetd::service { 'galera-monitor' :
397     port           => '9200',
398     server         => '/usr/bin/clustercheck',
399     per_source     => 'UNLIMITED',
400     log_on_success => '',
401     log_on_failure => 'HOST',
402     flags          => 'REUSE',
403     service_type   => 'UNLISTED',
404     user           => 'root',
405     group          => 'root',
406     require        => File['/etc/sysconfig/clustercheck'],
407   }
408
409   # Create all the database schemas
410   # Example DSN format: mysql://user:password@host/dbname
411   if $sync_db {
412     $allowed_hosts = ['%',hiera('mysql_bind_host')]
413     $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]')
414     class { 'keystone::db::mysql':
415       user          => $keystone_dsn[3],
416       password      => $keystone_dsn[4],
417       host          => $keystone_dsn[5],
418       dbname        => $keystone_dsn[6],
419       allowed_hosts => $allowed_hosts,
420       require       => Exec['galera-ready'],
421     }
422     $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]')
423     class { 'glance::db::mysql':
424       user          => $glance_dsn[3],
425       password      => $glance_dsn[4],
426       host          => $glance_dsn[5],
427       dbname        => $glance_dsn[6],
428       allowed_hosts => $allowed_hosts,
429       require       => Exec['galera-ready'],
430     }
431     $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]')
432     class { 'nova::db::mysql':
433       user          => $nova_dsn[3],
434       password      => $nova_dsn[4],
435       host          => $nova_dsn[5],
436       dbname        => $nova_dsn[6],
437       allowed_hosts => $allowed_hosts,
438       require       => Exec['galera-ready'],
439     }
440     $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]')
441     class { 'neutron::db::mysql':
442       user          => $neutron_dsn[3],
443       password      => $neutron_dsn[4],
444       host          => $neutron_dsn[5],
445       dbname        => $neutron_dsn[6],
446       allowed_hosts => $allowed_hosts,
447       require       => Exec['galera-ready'],
448     }
449     $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]')
450     class { 'cinder::db::mysql':
451       user          => $cinder_dsn[3],
452       password      => $cinder_dsn[4],
453       host          => $cinder_dsn[5],
454       dbname        => $cinder_dsn[6],
455       allowed_hosts => $allowed_hosts,
456       require       => Exec['galera-ready'],
457     }
458     $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]')
459     class { 'heat::db::mysql':
460       user          => $heat_dsn[3],
461       password      => $heat_dsn[4],
462       host          => $heat_dsn[5],
463       dbname        => $heat_dsn[6],
464       allowed_hosts => $allowed_hosts,
465       require       => Exec['galera-ready'],
466     }
467     if downcase(hiera('ceilometer_backend')) == 'mysql' {
468       $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]')
469       class { 'ceilometer::db::mysql':
470         user          => $ceilometer_dsn[3],
471         password      => $ceilometer_dsn[4],
472         host          => $ceilometer_dsn[5],
473         dbname        => $ceilometer_dsn[6],
474         allowed_hosts => $allowed_hosts,
475         require       => Exec['galera-ready'],
476       }
477     }
478   }
479
480   # pre-install swift here so we can build rings
481   include ::swift
482
483   # Ceph
484   $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false)
485   $enable_ceph = $cinder_enable_rbd_backend
486
487   if $enable_ceph {
488     class { 'ceph::profile::params':
489       mon_initial_members => downcase(hiera('ceph_mon_initial_members'))
490     }
491     include ::ceph::profile::mon
492   }
493
494   if str2bool(hiera('enable_ceph_storage', 'false')) {
495     if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
496       exec { 'set selinux to permissive on boot':
497         command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
498         onlyif  => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
499         path    => ["/usr/bin", "/usr/sbin"],
500       }
501
502       exec { 'set selinux to permissive':
503         command => "setenforce 0",
504         onlyif  => "which setenforce && getenforce | grep -i 'enforcing'",
505         path    => ["/usr/bin", "/usr/sbin"],
506       } -> Class['ceph::profile::osd']
507     }
508
509     include ::ceph::profile::client
510     include ::ceph::profile::osd
511   }
512
513
514 } #END STEP 2
515
516 if hiera('step') >= 3 {
517
518   class { '::keystone':
519     sync_db => $sync_db,
520     manage_service => false,
521     enabled => false,
522   }
523
524   #TODO: need a cleanup-keystone-tokens.sh solution here
525   keystone_config {
526     'ec2/driver': value => 'keystone.contrib.ec2.backends.sql.Ec2';
527   }
528   file { [ '/etc/keystone/ssl', '/etc/keystone/ssl/certs', '/etc/keystone/ssl/private' ]:
529     ensure  => 'directory',
530     owner   => 'keystone',
531     group   => 'keystone',
532     require => Package['keystone'],
533   }
534   file { '/etc/keystone/ssl/certs/signing_cert.pem':
535     content => hiera('keystone_signing_certificate'),
536     owner   => 'keystone',
537     group   => 'keystone',
538     notify  => Service['keystone'],
539     require => File['/etc/keystone/ssl/certs'],
540   }
541   file { '/etc/keystone/ssl/private/signing_key.pem':
542     content => hiera('keystone_signing_key'),
543     owner   => 'keystone',
544     group   => 'keystone',
545     notify  => Service['keystone'],
546     require => File['/etc/keystone/ssl/private'],
547   }
548   file { '/etc/keystone/ssl/certs/ca.pem':
549     content => hiera('keystone_ca_certificate'),
550     owner   => 'keystone',
551     group   => 'keystone',
552     notify  => Service['keystone'],
553     require => File['/etc/keystone/ssl/certs'],
554   }
555
556   $glance_backend = downcase(hiera('glance_backend', 'swift'))
557   case $glance_backend {
558       swift: { $backend_store = 'glance.store.swift.Store' }
559       file: { $backend_store = 'glance.store.filesystem.Store' }
560       rbd: { $backend_store = 'glance.store.rbd.Store' }
561       default: { fail('Unrecognized glance_backend parameter.') }
562   }
563   $http_store = ['glance.store.http.Store']
564   $glance_store = concat($http_store, $backend_store)
565
566   # TODO: notifications, scrubber, etc.
567   include ::glance
568   class { 'glance::api':
569     known_stores => $glance_store,
570     manage_service => false,
571     enabled => false,
572   }
573   class { '::glance::registry' :
574     sync_db => $sync_db,
575     manage_service => false,
576     enabled => false,
577   }
578   include join(['::glance::backend::', $glance_backend])
579
580   class { '::nova' :
581     memcached_servers => suffix(hiera('memcache_node_ips'), ':11211'),
582   }
583
584   include ::nova::config
585
586   class { '::nova::api' :
587     sync_db => $sync_db,
588     manage_service => false,
589     enabled => false,
590   }
591   class { '::nova::cert' :
592     manage_service => false,
593     enabled => false,
594   }
595   class { '::nova::conductor' :
596     manage_service => false,
597     enabled => false,
598   }
599   class { '::nova::consoleauth' :
600     manage_service => false,
601     enabled => false,
602   }
603   class { '::nova::vncproxy' :
604     manage_service => false,
605     enabled => false,
606   }
607   class { '::nova::scheduler' :
608     manage_service => false,
609     enabled => false,
610   }
611   include ::nova::network::neutron
612
613   # Neutron class definitions
614   include ::neutron
615   class { '::neutron::server' :
616     sync_db => $sync_db,
617     manage_service => false,
618     enabled => false,
619   }
620   class { '::neutron::agents::dhcp' :
621     manage_service => false,
622     enabled => false,
623   }
624   class { '::neutron::agents::l3' :
625     manage_service => false,
626     enabled => false,
627   }
628   class { 'neutron::agents::metadata':
629     manage_service => false,
630     enabled => false,
631   }
632   file { '/etc/neutron/dnsmasq-neutron.conf':
633     content => hiera('neutron_dnsmasq_options'),
634     owner   => 'neutron',
635     group   => 'neutron',
636     notify  => Service['neutron-dhcp-service'],
637     require => Package['neutron'],
638   }
639   class { 'neutron::plugins::ml2':
640     flat_networks   => split(hiera('neutron_flat_networks'), ','),
641     tenant_network_types => [hiera('neutron_tenant_network_type')],
642   }
643   class { 'neutron::agents::ml2::ovs':
644     manage_service   => false,
645     enabled          => false,
646     bridge_mappings  => split(hiera('neutron_bridge_mappings'), ','),
647     tunnel_types     => split(hiera('neutron_tunnel_types'), ','),
648   }
649
650   include ::cinder
651   class { '::cinder::api':
652     sync_db => $sync_db,
653     manage_service => false,
654     enabled => false,
655   }
656   class { '::cinder::scheduler' :
657     manage_service => false,
658     enabled => false,
659   }
660   class { '::cinder::volume' :
661     manage_service => false,
662     enabled => false,
663   }
664   include ::cinder::glance
665   class {'cinder::setup_test_volume':
666     size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
667   }
668
669   $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
670   if $cinder_enable_iscsi {
671     $cinder_iscsi_backend = 'tripleo_iscsi'
672
673     cinder::backend::iscsi { $cinder_iscsi_backend :
674       iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
675       iscsi_helper     => hiera('cinder_iscsi_helper'),
676     }
677   }
678
679   if $enable_ceph {
680
681     Ceph_pool {
682       pg_num  => hiera('ceph::profile::params::osd_pool_default_pg_num'),
683       pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
684       size    => hiera('ceph::profile::params::osd_pool_default_size'),
685     }
686
687     $ceph_pools = hiera('ceph_pools')
688     ceph::pool { $ceph_pools : }
689   }
690
691   if $cinder_enable_rbd_backend {
692     $cinder_rbd_backend = 'tripleo_ceph'
693
694     cinder_config {
695       "${cinder_rbd_backend}/host": value => 'hostgroup';
696     }
697
698     cinder::backend::rbd { $cinder_rbd_backend :
699       rbd_pool        => 'volumes',
700       rbd_user        => 'openstack',
701       rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
702       require         => Ceph::Pool['volumes'],
703     }
704   }
705
706   if hiera('cinder_enable_netapp_backend', false) {
707     $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
708
709     cinder_config {
710       "${cinder_netapp_backend}/host": value => 'hostgroup';
711     }
712
713     if hiera('cinder::backend::netapp::nfs_shares', undef) {
714       $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
715     }
716
717     cinder::backend::netapp { $cinder_netapp_backend :
718       netapp_login                 => hiera('cinder::backend::netapp::netapp_login', undef),
719       netapp_password              => hiera('cinder::backend::netapp::netapp_password', undef),
720       netapp_server_hostname       => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
721       netapp_server_port           => hiera('cinder::backend::netapp::netapp_server_port', undef),
722       netapp_size_multiplier       => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
723       netapp_storage_family        => hiera('cinder::backend::netapp::netapp_storage_family', undef),
724       netapp_storage_protocol      => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
725       netapp_transport_type        => hiera('cinder::backend::netapp::netapp_transport_type', undef),
726       netapp_vfiler                => hiera('cinder::backend::netapp::netapp_vfiler', undef),
727       netapp_volume_list           => hiera('cinder::backend::netapp::netapp_volume_list', undef),
728       netapp_vserver               => hiera('cinder::backend::netapp::netapp_vserver', undef),
729       netapp_partner_backend_name  => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
730       nfs_shares                   => $cinder_netapp_nfs_shares,
731       nfs_shares_config            => hiera('cinder::backend::netapp::nfs_shares_config', undef),
732       netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
733       netapp_controller_ips        => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
734       netapp_sa_password           => hiera('cinder::backend::netapp::netapp_sa_password', undef),
735       netapp_storage_pools         => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
736       netapp_eseries_host_type     => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
737       netapp_webservice_path       => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
738     }
739   }
740
741   if hiera('cinder_enable_nfs_backend', false) {
742     $cinder_nfs_backend = 'tripleo_nfs'
743
744     if ($::selinux != "false") {
745       selboolean { 'virt_use_nfs':
746           value => on,
747           persistent => true,
748       } -> Package['nfs-utils']
749     }
750
751     package {'nfs-utils': } ->
752     cinder::backend::nfs { $cinder_nfs_backend:
753       nfs_servers         => hiera('cinder_nfs_servers'),
754       nfs_mount_options   => hiera('cinder_nfs_mount_options'),
755       nfs_shares_config   => '/etc/cinder/shares-nfs.conf',
756     }
757   }
758
759   $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend, $cinder_nfs_backend])
760   class { '::cinder::backends' :
761     enabled_backends => $cinder_enabled_backends,
762   }
763
764   # swift proxy
765   class { '::swift::proxy' :
766     manage_service => $non_pcmk_start,
767     enabled => $non_pcmk_start,
768   }
769   include ::swift::proxy::proxy_logging
770   include ::swift::proxy::healthcheck
771   include ::swift::proxy::cache
772   include ::swift::proxy::keystone
773   include ::swift::proxy::authtoken
774   include ::swift::proxy::staticweb
775   include ::swift::proxy::ratelimit
776   include ::swift::proxy::catch_errors
777   include ::swift::proxy::tempurl
778   include ::swift::proxy::formpost
779
780   # swift storage
781   if str2bool(hiera('enable_swift_storage', 'true')) {
782     class {'::swift::storage::all':
783       mount_check => str2bool(hiera('swift_mount_check'))
784     }
785     class {'::swift::storage::account':
786       manage_service => $non_pcmk_start,
787       enabled => $non_pcmk_start,
788     }
789     class {'::swift::storage::container':
790       manage_service => $non_pcmk_start,
791       enabled => $non_pcmk_start,
792     }
793     class {'::swift::storage::object':
794       manage_service => $non_pcmk_start,
795       enabled => $non_pcmk_start,
796     }
797     if(!defined(File['/srv/node'])) {
798       file { '/srv/node':
799         ensure  => directory,
800         owner   => 'swift',
801         group   => 'swift',
802         require => Package['openstack-swift'],
803       }
804     }
805     $swift_components = ['account', 'container', 'object']
806     swift::storage::filter::recon { $swift_components : }
807     swift::storage::filter::healthcheck { $swift_components : }
808   }
809
810   # Ceilometer
811   $ceilometer_backend = downcase(hiera('ceilometer_backend'))
812   case $ceilometer_backend {
813     /mysql/ : {
814       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
815     }
816     default : {
817       $mongo_node_string = join($mongo_node_ips_with_port, ',')
818       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
819     }
820   }
821   include ::ceilometer
822   class { '::ceilometer::api' :
823     manage_service => false,
824     enabled => false,
825   }
826   class { '::ceilometer::agent::notification' :
827     manage_service => false,
828     enabled => false,
829   }
830   class { '::ceilometer::agent::central' :
831     manage_service => false,
832     enabled => false,
833   }
834   class { '::ceilometer::alarm::notifier' :
835     manage_service => false,
836     enabled => false,
837   }
838   class { '::ceilometer::alarm::evaluator' :
839     manage_service => false,
840     enabled => false,
841   }
842   class { '::ceilometer::collector' :
843     manage_service => false,
844     enabled => false,
845   }
846   include ::ceilometer::expirer
847   class { '::ceilometer::db' :
848     database_connection => $ceilometer_database_connection,
849     sync_db             => $sync_db,
850   }
851   include ceilometer::agent::auth
852
853   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
854
855   # Heat
856   class { '::heat' :
857     sync_db => $sync_db,
858   }
859   class { '::heat::api' :
860     manage_service => false,
861     enabled => false,
862   }
863   class { '::heat::api_cfn' :
864     manage_service => false,
865     enabled => false,
866   }
867   class { '::heat::api_cloudwatch' :
868     manage_service => false,
869     enabled => false,
870   }
871   class { '::heat::engine' :
872     manage_service => false,
873     enabled => false,
874   }
875
876   # httpd/apache and horizon
877   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
878   include ::apache
879   include ::apache::mod::status
880   $vhost_params = {
881     add_listen => false,
882     priority   => 10,
883   }
884   class { 'horizon':
885     cache_server_ip    => hiera('memcache_node_ips', '127.0.0.1'),
886     vhost_extra_params => $vhost_params,
887     server_aliases     => $::hostname,
888   }
889
890   $snmpd_user = hiera('snmpd_readonly_user_name')
891   snmp::snmpv3_user { $snmpd_user:
892     authtype => 'MD5',
893     authpass => hiera('snmpd_readonly_user_password'),
894   }
895   class { 'snmp':
896     agentaddress => ['udp:161','udp6:[::1]:161'],
897     snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc  cron', 'includeAllDisks  10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
898   }
899
900   hiera_include('controller_classes')
901
902 } #END STEP 3
903
904 if hiera('step') >= 4 {
905   include ::keystone::cron::token_flush
906
907   if $pacemaker_master {
908
909     # Keystone
910     pacemaker::resource::service { $::keystone::params::service_name :
911       clone_params => "interleave=true",
912     }
913
914     # Cinder
915     pacemaker::resource::service { $::cinder::params::api_service :
916       clone_params => "interleave=true",
917       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
918     }
919     pacemaker::resource::service { $::cinder::params::scheduler_service :
920       clone_params => "interleave=true",
921     }
922     pacemaker::resource::service { $::cinder::params::volume_service : }
923
924     pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
925       constraint_type => 'order',
926       first_resource  => "${::keystone::params::service_name}-clone",
927       second_resource => "${::cinder::params::api_service}-clone",
928       first_action    => 'start',
929       second_action   => 'start',
930       require         => [Pacemaker::Resource::Service[$::cinder::params::api_service],
931                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
932     }
933     pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
934       constraint_type => "order",
935       first_resource => "${::cinder::params::api_service}-clone",
936       second_resource => "${::cinder::params::scheduler_service}-clone",
937       first_action => "start",
938       second_action => "start",
939       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
940                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
941     }
942     pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
943       source => "${::cinder::params::scheduler_service}-clone",
944       target => "${::cinder::params::api_service}-clone",
945       score => "INFINITY",
946       require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
947                   Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
948     }
949     pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
950       constraint_type => "order",
951       first_resource => "${::cinder::params::scheduler_service}-clone",
952       second_resource => "${::cinder::params::volume_service}",
953       first_action => "start",
954       second_action => "start",
955       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
956                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
957     }
958     pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
959       source => "${::cinder::params::volume_service}",
960       target => "${::cinder::params::scheduler_service}-clone",
961       score => "INFINITY",
962       require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
963                   Pacemaker::Resource::Service[$::cinder::params::volume_service]],
964     }
965
966     # Glance
967     pacemaker::resource::service { $::glance::params::registry_service_name :
968       clone_params => "interleave=true",
969       require      => Pacemaker::Resource::Service[$::keystone::params::service_name],
970     }
971     pacemaker::resource::service { $::glance::params::api_service_name :
972       clone_params => "interleave=true",
973     }
974
975     pacemaker::constraint::base { 'keystone-then-glance-registry-constraint':
976       constraint_type => 'order',
977       first_resource  => "${::keystone::params::service_name}-clone",
978       second_resource => "${::glance::params::registry_service_name}-clone",
979       first_action    => 'start',
980       second_action   => 'start',
981       require         => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
982                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
983     }
984     pacemaker::constraint::base { 'glance-registry-then-glance-api-constraint':
985       constraint_type => "order",
986       first_resource  => "${::glance::params::registry_service_name}-clone",
987       second_resource => "${::glance::params::api_service_name}-clone",
988       first_action    => "start",
989       second_action   => "start",
990       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
991                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
992     }
993     pacemaker::constraint::colocation { 'glance-api-with-glance-registry-colocation':
994       source  => "${::glance::params::api_service_name}-clone",
995       target  => "${::glance::params::registry_service_name}-clone",
996       score   => "INFINITY",
997       require => [Pacemaker::Resource::Service[$::glance::params::registry_service_name],
998                   Pacemaker::Resource::Service[$::glance::params::api_service_name]],
999     }
1000
1001     # Neutron
1002     # NOTE(gfidente): Neutron will try to populate the database with some data
1003     # as soon as neutron-server is started; to avoid races we want to make this
1004     # happen only on one node, before normal Pacemaker initialization
1005     # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1006     exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
1007     pacemaker::resource::service { $::neutron::params::server_service:
1008       op_params => "start timeout=90",
1009       clone_params   => "interleave=true",
1010       require => Pacemaker::Resource::Service[$::keystone::params::service_name]
1011     }
1012     pacemaker::resource::service { $::neutron::params::l3_agent_service:
1013       clone_params   => "interleave=true",
1014     }
1015     pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1016       clone_params   => "interleave=true",
1017     }
1018     pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1019       clone_params => "interleave=true",
1020     }
1021     pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1022       clone_params => "interleave=true",
1023     }
1024     pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1025       ocf_agent_name => "neutron:OVSCleanup",
1026       clone_params => "interleave=true",
1027     }
1028     pacemaker::resource::ocf { 'neutron-netns-cleanup':
1029       ocf_agent_name => "neutron:NetnsCleanup",
1030       clone_params => "interleave=true",
1031     }
1032     pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1033       constraint_type => "order",
1034       first_resource => "${::keystone::params::service_name}-clone",
1035       second_resource => "${::neutron::params::server_service}-clone",
1036       first_action => "start",
1037       second_action => "start",
1038       require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
1039                   Pacemaker::Resource::Service[$::neutron::params::server_service]],
1040     }
1041     pacemaker::constraint::base { 'neutron-server-to-neutron-ovs-cleanup-constraint':
1042       constraint_type => "order",
1043       first_resource => "${::neutron::params::server_service}-clone",
1044       second_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1045       first_action => "start",
1046       second_action => "start",
1047       require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1048                   Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
1049     }
1050     pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1051       constraint_type => "order",
1052       first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1053       second_resource => "neutron-netns-cleanup-clone",
1054       first_action => "start",
1055       second_action => "start",
1056       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
1057                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1058     }
1059     pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1060       source => "neutron-netns-cleanup-clone",
1061       target => "${::neutron::params::ovs_cleanup_service}-clone",
1062       score => "INFINITY",
1063       require => [Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"],
1064                   Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1065     }
1066     pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1067       constraint_type => "order",
1068       first_resource => "neutron-netns-cleanup-clone",
1069       second_resource => "${::neutron::params::ovs_agent_service}-clone",
1070       first_action => "start",
1071       second_action => "start",
1072       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
1073                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
1074     }
1075     pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1076       source => "${::neutron::params::ovs_agent_service}-clone",
1077       target => "neutron-netns-cleanup-clone",
1078       score => "INFINITY",
1079       require => [Pacemaker::Resource::Ocf["neutron-netns-cleanup"],
1080                   Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"]],
1081     }
1082     pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1083       constraint_type => "order",
1084       first_resource => "${::neutron::params::ovs_agent_service}-clone",
1085       second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1086       first_action => "start",
1087       second_action => "start",
1088       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
1089                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
1090
1091     }
1092     pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1093       source => "${::neutron::params::dhcp_agent_service}-clone",
1094       target => "${::neutron::params::ovs_agent_service}-clone",
1095       score => "INFINITY",
1096       require => [Pacemaker::Resource::Service["${::neutron::params::ovs_agent_service}"],
1097                   Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"]],
1098     }
1099     pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1100       constraint_type => "order",
1101       first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1102       second_resource => "${::neutron::params::l3_agent_service}-clone",
1103       first_action => "start",
1104       second_action => "start",
1105       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
1106                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
1107     }
1108     pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1109       source => "${::neutron::params::l3_agent_service}-clone",
1110       target => "${::neutron::params::dhcp_agent_service}-clone",
1111       score => "INFINITY",
1112       require => [Pacemaker::Resource::Service["${::neutron::params::dhcp_agent_service}"],
1113                   Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"]]
1114     }
1115     pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1116       constraint_type => "order",
1117       first_resource => "${::neutron::params::l3_agent_service}-clone",
1118       second_resource => "${::neutron::params::metadata_agent_service}-clone",
1119       first_action => "start",
1120       second_action => "start",
1121       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
1122                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
1123     }
1124     pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1125       source => "${::neutron::params::metadata_agent_service}-clone",
1126       target => "${::neutron::params::l3_agent_service}-clone",
1127       score => "INFINITY",
1128       require => [Pacemaker::Resource::Service["${::neutron::params::l3_agent_service}"],
1129                   Pacemaker::Resource::Service["${::neutron::params::metadata_agent_service}"]]
1130     }
1131
1132     # Nova
1133     pacemaker::resource::service { $::nova::params::api_service_name :
1134       clone_params    => "interleave=true",
1135       op_params       => "start timeout=90s monitor start-delay=10s",
1136     }
1137     pacemaker::resource::service { $::nova::params::conductor_service_name :
1138       clone_params    => "interleave=true",
1139       op_params       => "start timeout=90s monitor start-delay=10s",
1140     }
1141     pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1142       clone_params    => "interleave=true",
1143       op_params       => "start timeout=90s monitor start-delay=10s",
1144       require         => Pacemaker::Resource::Service[$::keystone::params::service_name],
1145     }
1146     pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1147       clone_params    => "interleave=true",
1148       op_params       => "start timeout=90s monitor start-delay=10s",
1149     }
1150     pacemaker::resource::service { $::nova::params::scheduler_service_name :
1151       clone_params    => "interleave=true",
1152       op_params       => "start timeout=90s monitor start-delay=10s",
1153     }
1154
1155     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1156       constraint_type => 'order',
1157       first_resource  => "${::keystone::params::service_name}-clone",
1158       second_resource => "${::nova::params::consoleauth_service_name}-clone",
1159       first_action    => 'start',
1160       second_action   => 'start',
1161       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1162                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1163     }
1164     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1165       constraint_type => "order",
1166       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
1167       second_resource => "${::nova::params::vncproxy_service_name}-clone",
1168       first_action    => "start",
1169       second_action   => "start",
1170       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1171                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1172     }
1173     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1174       source => "${::nova::params::vncproxy_service_name}-clone",
1175       target => "${::nova::params::consoleauth_service_name}-clone",
1176       score => "INFINITY",
1177       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1178                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1179     }
1180     # FIXME(gfidente): novncproxy will not start unless websockify is updated to 0.6
1181     # which is not the case for f20 nor f21; ucomment when it becomes available
1182     #pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1183     #  constraint_type => "order",
1184     #  first_resource  => "${::nova::params::vncproxy_service_name}-clone",
1185     #  second_resource => "${::nova::params::api_service_name}-clone",
1186     #  first_action    => "start",
1187     #  second_action   => "start",
1188     #  require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1189     #              Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1190     #}
1191     #pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1192     #  source => "${::nova::params::api_service_name}-clone",
1193     #  target => "${::nova::params::vncproxy_service_name}-clone",
1194     #  score => "INFINITY",
1195     #  require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1196     #              Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1197     #}
1198     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1199       constraint_type => "order",
1200       first_resource  => "${::nova::params::api_service_name}-clone",
1201       second_resource => "${::nova::params::scheduler_service_name}-clone",
1202       first_action    => "start",
1203       second_action   => "start",
1204       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1205                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1206     }
1207     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1208       source => "${::nova::params::scheduler_service_name}-clone",
1209       target => "${::nova::params::api_service_name}-clone",
1210       score => "INFINITY",
1211       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1212                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1213     }
1214     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1215       constraint_type => "order",
1216       first_resource  => "${::nova::params::scheduler_service_name}-clone",
1217       second_resource => "${::nova::params::conductor_service_name}-clone",
1218       first_action    => "start",
1219       second_action   => "start",
1220       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1221                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1222     }
1223     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1224       source => "${::nova::params::conductor_service_name}-clone",
1225       target => "${::nova::params::scheduler_service_name}-clone",
1226       score => "INFINITY",
1227       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1228                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1229     }
1230
1231     # Ceilometer
1232     pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
1233       clone_params => 'interleave=true',
1234       require      => [Pacemaker::Resource::Service[$::keystone::params::service_name],
1235                        Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1236     }
1237     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1238       clone_params => 'interleave=true',
1239     }
1240     pacemaker::resource::service { $::ceilometer::params::api_service_name :
1241       clone_params => 'interleave=true',
1242     }
1243     pacemaker::resource::service { $::ceilometer::params::alarm_evaluator_service_name :
1244       clone_params => 'interleave=true',
1245     }
1246     pacemaker::resource::service { $::ceilometer::params::alarm_notifier_service_name :
1247       clone_params => 'interleave=true',
1248     }
1249     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1250       clone_params => 'interleave=true',
1251     }
1252     pacemaker::resource::ocf { 'delay' :
1253       ocf_agent_name  => 'heartbeat:Delay',
1254       clone_params    => 'interleave=true',
1255       resource_params => 'startdelay=10',
1256     }
1257     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1258       constraint_type => 'order',
1259       first_resource  => "${::keystone::params::service_name}-clone",
1260       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1261       first_action    => 'start',
1262       second_action   => 'start',
1263       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1264                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1265     }
1266     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1267       constraint_type => 'order',
1268       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
1269       second_resource => "${::ceilometer::params::collector_service_name}-clone",
1270       first_action    => 'start',
1271       second_action   => 'start',
1272       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1273                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1274     }
1275     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1276       constraint_type => 'order',
1277       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
1278       second_resource => "${::ceilometer::params::api_service_name}-clone",
1279       first_action    => 'start',
1280       second_action   => 'start',
1281       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1282                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1283     }
1284     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1285       source  => "${::ceilometer::params::api_service_name}-clone",
1286       target  => "${::ceilometer::params::collector_service_name}-clone",
1287       score   => 'INFINITY',
1288       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1289                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1290     }
1291     pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1292       constraint_type => 'order',
1293       first_resource  => "${::ceilometer::params::api_service_name}-clone",
1294       second_resource => 'delay-clone',
1295       first_action    => 'start',
1296       second_action   => 'start',
1297       require         => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1298                           Pacemaker::Resource::Ocf['delay']],
1299     }
1300     pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1301       source  => 'delay-clone',
1302       target  => "${::ceilometer::params::api_service_name}-clone",
1303       score   => 'INFINITY',
1304       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1305                   Pacemaker::Resource::Ocf['delay']],
1306     }
1307     pacemaker::constraint::base { 'ceilometer-delay-then-ceilometer-alarm-evaluator-constraint':
1308       constraint_type => 'order',
1309       first_resource  => 'delay-clone',
1310       second_resource => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1311       first_action    => 'start',
1312       second_action   => 'start',
1313       require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1314                           Pacemaker::Resource::Ocf['delay']],
1315     }
1316     pacemaker::constraint::colocation { 'ceilometer-alarm-evaluator-with-ceilometer-delay-colocation':
1317       source  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1318       target  => 'delay-clone',
1319       score   => 'INFINITY',
1320       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1321                   Pacemaker::Resource::Ocf['delay']],
1322     }
1323     pacemaker::constraint::base { 'ceilometer-alarm-evaluator-then-ceilometer-alarm-notifier-constraint':
1324       constraint_type => 'order',
1325       first_resource  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1326       second_resource => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1327       first_action    => 'start',
1328       second_action   => 'start',
1329       require         => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1330                           Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1331     }
1332     pacemaker::constraint::colocation { 'ceilometer-alarm-notifier-with-ceilometer-alarm-evaluator-colocation':
1333       source  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1334       target  => "${::ceilometer::params::alarm_evaluator_service_name}-clone",
1335       score   => 'INFINITY',
1336       require => [Pacemaker::Resource::Service[$::ceilometer::params::alarm_evaluator_service_name],
1337                   Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1338     }
1339     pacemaker::constraint::base { 'ceilometer-alarm-notifier-then-ceilometer-notification-constraint':
1340       constraint_type => 'order',
1341       first_resource  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1342       second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1343       first_action    => 'start',
1344       second_action   => 'start',
1345       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
1346                           Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1347     }
1348     pacemaker::constraint::colocation { 'ceilometer-notification-with-ceilometer-alarm-notifier-colocation':
1349       source  => "${::ceilometer::params::agent_notification_service_name}-clone",
1350       target  => "${::ceilometer::params::alarm_notifier_service_name}-clone",
1351       score   => 'INFINITY',
1352       require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name],
1353                   Pacemaker::Resource::Service[$::ceilometer::params::alarm_notifier_service_name]],
1354     }
1355     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1356       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1357         constraint_type => 'order',
1358         first_resource  => "${::mongodb::params::service_name}-clone",
1359         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1360         first_action    => 'start',
1361         second_action   => 'start',
1362         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1363                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1364       }
1365     }
1366
1367     # Heat
1368     pacemaker::resource::service { $::heat::params::api_service_name :
1369       clone_params => 'interleave=true',
1370     }
1371     pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1372       clone_params => 'interleave=true',
1373     }
1374     pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1375       clone_params => 'interleave=true',
1376     }
1377     pacemaker::resource::service { $::heat::params::engine_service_name :
1378       clone_params => 'interleave=true',
1379     }
1380     pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
1381       constraint_type => 'order',
1382       first_resource  => "${::keystone::params::service_name}-clone",
1383       second_resource => "${::heat::params::api_service_name}-clone",
1384       first_action    => 'start',
1385       second_action   => 'start',
1386       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1387                           Pacemaker::Resource::Service[$::keystone::params::service_name]],
1388     }
1389     pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1390       constraint_type => 'order',
1391       first_resource  => "${::heat::params::api_service_name}-clone",
1392       second_resource => "${::heat::params::api_cfn_service_name}-clone",
1393       first_action    => 'start',
1394       second_action   => 'start',
1395       require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1396                   Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1397     }
1398     pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1399       source  => "${::heat::params::api_cfn_service_name}-clone",
1400       target  => "${::heat::params::api_service_name}-clone",
1401       score   => 'INFINITY',
1402       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1403                   Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1404     }
1405     pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1406       constraint_type => 'order',
1407       first_resource  => "${::heat::params::api_cfn_service_name}-clone",
1408       second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1409       first_action    => 'start',
1410       second_action   => 'start',
1411       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1412                   Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1413     }
1414     pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1415       source  => "${::heat::params::api_cloudwatch_service_name}-clone",
1416       target  => "${::heat::params::api_cfn_service_name}-clone",
1417       score   => 'INFINITY',
1418       require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1419                   Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1420     }
1421     pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1422       constraint_type => 'order',
1423       first_resource  => "${::heat::params::api_cloudwatch_service_name}-clone",
1424       second_resource => "${::heat::params::engine_service_name}-clone",
1425       first_action    => 'start',
1426       second_action   => 'start',
1427       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1428                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1429     }
1430     pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1431       source  => "${::heat::params::engine_service_name}-clone",
1432       target  => "${::heat::params::api_cloudwatch_service_name}-clone",
1433       score   => 'INFINITY',
1434       require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1435                   Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1436     }
1437     pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1438       constraint_type => 'order',
1439       first_resource  => "${::ceilometer::params::agent_notification_service_name}-clone",
1440       second_resource => "${::heat::params::api_service_name}-clone",
1441       first_action    => 'start',
1442       second_action   => 'start',
1443       require         => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1444                           Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1445     }
1446
1447     # Horizon
1448     pacemaker::resource::service { $::horizon::params::http_service:
1449         clone_params => "interleave=true",
1450     }
1451
1452
1453   }
1454
1455 } #END STEP 4