Merge "Restore the NtpServer parameter name"
[apex-tripleo-heat-templates.git] / puppet / manifests / overcloud_controller_pacemaker.pp
1 # Copyright 2015 Red Hat, Inc.
2 # All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 Pcmk_resource <| |> {
17   tries     => 10,
18   try_sleep => 3,
19 }
20
21 # TODO(jistr): use pcs resource provider instead of just no-ops
22 Service <|
23   tag == 'aodh-service' or
24   tag == 'ceilometer-service' or
25   tag == 'gnocchi-service'
26 |> {
27   hasrestart => true,
28   restart    => '/bin/true',
29   start      => '/bin/true',
30   stop       => '/bin/true',
31 }
32
33 include ::tripleo::packages
34 include ::tripleo::firewall
35
36 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
37   $pacemaker_master = true
38   $sync_db = true
39 } else {
40   $pacemaker_master = false
41   $sync_db = false
42 }
43
44 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
45 $enable_load_balancer = hiera('enable_load_balancer', true)
46
47 # When to start and enable services which haven't been Pacemakerized
48 # FIXME: remove when we start all OpenStack services using Pacemaker
49 # (occurrences of this variable will be gradually replaced with false)
50 $non_pcmk_start = hiera('step') >= 5
51
52 if hiera('step') >= 1 {
53
54   $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
55   $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
56   if $corosync_ipv6 {
57     $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
58   } else {
59     $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
60   }
61   class { '::pacemaker':
62     hacluster_pwd => hiera('hacluster_pwd'),
63   } ->
64   class { '::pacemaker::corosync':
65     cluster_members      => $pacemaker_cluster_members,
66     setup_cluster        => $pacemaker_master,
67     cluster_setup_extras => $cluster_setup_extras,
68   }
69   class { '::pacemaker::stonith':
70     disable => !$enable_fencing,
71   }
72   if $enable_fencing {
73     include ::tripleo::fencing
74
75     # enable stonith after all Pacemaker resources have been created
76     Pcmk_resource<||> -> Class['tripleo::fencing']
77     Pcmk_constraint<||> -> Class['tripleo::fencing']
78     Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing']
79     # enable stonith after all fencing devices have been created
80     Class['tripleo::fencing'] -> Class['pacemaker::stonith']
81   }
82
83   # FIXME(gfidente): sets 200secs as default start timeout op
84   # param; until we can use pcmk global defaults we'll still
85   # need to add it to every resource which redefines op params
86   Pacemaker::Resource::Service {
87     op_params => 'start timeout=200s stop timeout=200s',
88   }
89
90   if downcase(hiera('ceilometer_backend')) == 'mongodb' {
91     include ::mongodb::params
92   }
93
94   # Galera
95   if str2bool(hiera('enable_galera', true)) {
96     $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
97   } else {
98     $mysql_config_file = '/etc/my.cnf.d/server.cnf'
99   }
100   $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
101   $galera_nodes_count = count(split($galera_nodes, ','))
102
103   # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
104   # set bind-address to a hostname instead of an ip address; to move Mysql
105   # from internal_api on another network we'll have to customize both
106   # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
107   $mysql_bind_host = hiera('mysql_bind_host')
108   $mysqld_options = {
109     'mysqld' => {
110       'skip-name-resolve'             => '1',
111       'binlog_format'                 => 'ROW',
112       'default-storage-engine'        => 'innodb',
113       'innodb_autoinc_lock_mode'      => '2',
114       'innodb_locks_unsafe_for_binlog'=> '1',
115       'query_cache_size'              => '0',
116       'query_cache_type'              => '0',
117       'bind-address'                  => $::hostname,
118       'max_connections'               => hiera('mysql_max_connections'),
119       'open_files_limit'              => '-1',
120       'wsrep_on'                      => 'ON',
121       'wsrep_provider'                => '/usr/lib64/galera/libgalera_smm.so',
122       'wsrep_cluster_name'            => 'galera_cluster',
123       'wsrep_cluster_address'         => "gcomm://${galera_nodes}",
124       'wsrep_slave_threads'           => '1',
125       'wsrep_certify_nonPK'           => '1',
126       'wsrep_max_ws_rows'             => '131072',
127       'wsrep_max_ws_size'             => '1073741824',
128       'wsrep_debug'                   => '0',
129       'wsrep_convert_LOCK_to_trx'     => '0',
130       'wsrep_retry_autocommit'        => '1',
131       'wsrep_auto_increment_control'  => '1',
132       'wsrep_drupal_282555_workaround'=> '0',
133       'wsrep_causal_reads'            => '0',
134       'wsrep_sst_method'              => 'rsync',
135       'wsrep_provider_options'        => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
136     },
137   }
138
139   class { '::mysql::server':
140     create_root_user        => false,
141     create_root_my_cnf      => false,
142     config_file             => $mysql_config_file,
143     override_options        => $mysqld_options,
144     remove_default_accounts => $pacemaker_master,
145     service_manage          => false,
146     service_enabled         => false,
147   }
148
149 }
150
151 if hiera('step') >= 2 {
152
153
154   # NOTE(gfidente): the following vars are needed on all nodes so they
155   # need to stay out of pacemaker_master conditional.
156   # The addresses mangling will hopefully go away when we'll be able to
157   # configure the connection string via hostnames, until then, we need to pass
158   # the list of IPv6 addresses *with* port and without the brackets as 'members'
159   # argument for the 'mongodb_replset' resource.
160   if str2bool(hiera('mongodb::server::ipv6', false)) {
161     $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
162     $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
163     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
164   } else {
165     $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
166     $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
167   }
168   $mongodb_replset = hiera('mongodb::server::replset')
169
170   if $pacemaker_master {
171
172     include ::pacemaker::resource_defaults
173
174     # Create an openstack-core dummy resource. See RHBZ 1290121
175     pacemaker::resource::ocf { 'openstack-core':
176       ocf_agent_name => 'heartbeat:Dummy',
177       clone_params   => true,
178     }
179
180     pacemaker::resource::ocf { 'galera' :
181       ocf_agent_name  => 'heartbeat:galera',
182       op_params       => 'promote timeout=300s on-fail=block',
183       master_params   => '',
184       meta_params     => "master-max=${galera_nodes_count} ordered=true",
185       resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
186       require         => Class['::mysql::server'],
187       before          => Exec['galera-ready'],
188     }
189   }
190   $mysql_root_password = hiera('mysql::server::root_password')
191   $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
192   # This step is to create a sysconfig clustercheck file with the root user and empty password
193   # on the first install only (because later on the clustercheck db user will be used)
194   # We are using exec and not file in order to not have duplicate definition errors in puppet
195   # when we later set the the file to contain the clustercheck data
196   exec { 'create-root-sysconfig-clustercheck':
197     command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
198     unless  => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
199   }
200
201   exec { 'galera-ready' :
202     command     => '/usr/bin/clustercheck >/dev/null',
203     timeout     => 30,
204     tries       => 180,
205     try_sleep   => 10,
206     environment => ['AVAILABLE_WHEN_READONLY=0'],
207     require     => Exec['create-root-sysconfig-clustercheck'],
208   }
209
210   xinetd::service { 'galera-monitor' :
211     port           => '9200',
212     server         => '/usr/bin/clustercheck',
213     per_source     => 'UNLIMITED',
214     log_on_success => '',
215     log_on_failure => 'HOST',
216     flags          => 'REUSE',
217     service_type   => 'UNLISTED',
218     user           => 'root',
219     group          => 'root',
220     require        => Exec['create-root-sysconfig-clustercheck'],
221   }
222   # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
223   # to it in a later step. We do this only on one node as it will replicate on
224   # the other members. We also make sure that the permissions are the minimum necessary
225   if $pacemaker_master {
226     mysql_user { 'clustercheck@localhost':
227       ensure        => 'present',
228       password_hash => mysql_password($mysql_clustercheck_password),
229       require       => Exec['galera-ready'],
230     }
231     mysql_grant { 'clustercheck@localhost/*.*':
232       ensure     => 'present',
233       options    => ['GRANT'],
234       privileges => ['PROCESS'],
235       table      => '*.*',
236       user       => 'clustercheck@localhost',
237     }
238   }
239
240   # Create all the database schemas
241   if $sync_db {
242     if downcase(hiera('ceilometer_backend')) == 'mysql' {
243       class { '::ceilometer::db::mysql':
244         require => Exec['galera-ready'],
245       }
246     }
247
248     if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
249       class { '::gnocchi::db::mysql':
250         require => Exec['galera-ready'],
251       }
252     }
253
254     class { '::aodh::db::mysql':
255         require => Exec['galera-ready'],
256       }
257   }
258
259 } #END STEP 2
260
261 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
262   # At this stage we are guaranteed that the clustercheck db user exists
263   # so we switch the resource agent to use it.
264   file { '/etc/sysconfig/clustercheck' :
265     ensure  => file,
266     mode    => '0600',
267     owner   => 'root',
268     group   => 'root',
269     content => "MYSQL_USERNAME=clustercheck\n
270 MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
271 MYSQL_HOST=localhost\n",
272   }
273
274   $nova_ipv6 = hiera('nova::use_ipv6', false)
275   if $nova_ipv6 {
276     $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
277   } else {
278     $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
279   }
280
281   class { '::nova' :
282     memcached_servers => $memcached_servers
283   }
284
285   include ::nova::config
286
287   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
288
289     # TODO(devvesa) provide non-controller ips for these services
290     $zookeeper_node_ips = hiera('neutron_api_node_ips')
291     $cassandra_node_ips = hiera('neutron_api_node_ips')
292
293     # Run zookeeper in the controller if configured
294     if hiera('enable_zookeeper_on_controller') {
295       class {'::tripleo::cluster::zookeeper':
296         zookeeper_server_ips => $zookeeper_node_ips,
297         # TODO: create a 'bind' hiera key for zookeeper
298         zookeeper_client_ip  => hiera('neutron::bind_host'),
299         zookeeper_hostnames  => split(hiera('controller_node_names'), ',')
300       }
301     }
302
303     # Run cassandra in the controller if configured
304     if hiera('enable_cassandra_on_controller') {
305       class {'::tripleo::cluster::cassandra':
306         cassandra_servers => $cassandra_node_ips,
307         # TODO: create a 'bind' hiera key for cassandra
308         cassandra_ip      => hiera('neutron::bind_host'),
309       }
310     }
311
312     class {'::tripleo::network::midonet::agent':
313       zookeeper_servers => $zookeeper_node_ips,
314       cassandra_seeds   => $cassandra_node_ips
315     }
316
317     class {'::tripleo::network::midonet::api':
318       zookeeper_servers    => $zookeeper_node_ips,
319       vip                  => hiera('public_virtual_ip'),
320       keystone_ip          => hiera('public_virtual_ip'),
321       keystone_admin_token => hiera('keystone::admin_token'),
322       # TODO: create a 'bind' hiera key for api
323       bind_address         => hiera('neutron::bind_host'),
324       admin_password       => hiera('admin_password')
325     }
326
327     # Configure Neutron
328     # TODO: when doing the composable midonet plugin, don't forget to
329     # set service_plugins to an empty array in Hiera.
330     class {'::neutron':
331       service_plugins => []
332     }
333
334   }
335
336   if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
337     class {'::neutron::plugins::midonet':
338       midonet_api_ip    => hiera('public_virtual_ip'),
339       keystone_tenant   => hiera('neutron::server::auth_tenant'),
340       keystone_password => hiera('neutron::server::password')
341     }
342   }
343
344   # Ceilometer
345   case downcase(hiera('ceilometer_backend')) {
346     /mysql/: {
347       $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
348     }
349     default: {
350       $mongo_node_string = join($mongo_node_ips_with_port, ',')
351       $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
352     }
353   }
354   include ::ceilometer
355   include ::ceilometer::config
356   class { '::ceilometer::api' :
357     manage_service => false,
358     enabled        => false,
359   }
360   class { '::ceilometer::agent::notification' :
361     manage_service => false,
362     enabled        => false,
363   }
364   class { '::ceilometer::agent::central' :
365     manage_service => false,
366     enabled        => false,
367   }
368   class { '::ceilometer::collector' :
369     manage_service => false,
370     enabled        => false,
371   }
372   include ::ceilometer::expirer
373   class { '::ceilometer::db' :
374     database_connection => $ceilometer_database_connection,
375     sync_db             => $sync_db,
376   }
377   include ::ceilometer::agent::auth
378   include ::ceilometer::dispatcher::gnocchi
379
380   Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
381
382   # httpd/apache and horizon
383   # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
384   class { '::apache' :
385     service_enable => false,
386     # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
387   }
388   include ::apache::mod::remoteip
389   include ::apache::mod::status
390   if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
391     $_profile_support = 'cisco'
392   } else {
393     $_profile_support = 'None'
394   }
395   $neutron_options   = merge({'profile_support' => $_profile_support },hiera('horizon::neutron_options',undef))
396
397   $memcached_ipv6 = hiera('memcached_ipv6', false)
398   if $memcached_ipv6 {
399     $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
400   } else {
401     $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
402   }
403
404   class { '::horizon':
405     cache_server_ip => $horizon_memcached_servers,
406     neutron_options => $neutron_options,
407   }
408
409   # Aodh
410   class { '::aodh' :
411     database_connection => hiera('aodh_mysql_conn_string'),
412   }
413   include ::aodh::config
414   include ::aodh::auth
415   include ::aodh::client
416   include ::aodh::wsgi::apache
417   class { '::aodh::api':
418     manage_service => false,
419     enabled        => false,
420     service_name   => 'httpd',
421   }
422   class { '::aodh::evaluator':
423     manage_service => false,
424     enabled        => false,
425   }
426   class { '::aodh::notifier':
427     manage_service => false,
428     enabled        => false,
429   }
430   class { '::aodh::listener':
431     manage_service => false,
432     enabled        => false,
433   }
434
435   # Gnocchi
436   $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
437   include ::gnocchi::client
438   if $sync_db {
439     include ::gnocchi::db::sync
440   }
441   include ::gnocchi::storage
442   $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
443   case $gnocchi_backend {
444       'swift': { include ::gnocchi::storage::swift }
445       'file': { include ::gnocchi::storage::file }
446       'rbd': { include ::gnocchi::storage::ceph }
447       default: { fail('Unrecognized gnocchi_backend parameter.') }
448   }
449   class { '::gnocchi':
450     database_connection => $gnocchi_database_connection,
451   }
452   class { '::gnocchi::api' :
453     manage_service => false,
454     enabled        => false,
455     service_name   => 'httpd',
456   }
457   class { '::gnocchi::wsgi::apache' :
458     ssl => false,
459   }
460   class { '::gnocchi::metricd' :
461     manage_service => false,
462     enabled        => false,
463   }
464   class { '::gnocchi::statsd' :
465     manage_service => false,
466     enabled        => false,
467   }
468
469   hiera_include('controller_classes')
470
471 } #END STEP 4
472
473 if hiera('step') >= 5 {
474   # We now make sure that the root db password is set to a random one
475   # At first installation /root/.my.cnf will be empty and we connect without a root
476   # password. On second runs or updates /root/.my.cnf will already be populated
477   # with proper credentials. This step happens on every node because this sql
478   # statement does not automatically replicate across nodes.
479   exec { 'galera-set-root-password':
480     command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
481   }
482   file { '/root/.my.cnf' :
483     ensure  => file,
484     mode    => '0600',
485     owner   => 'root',
486     group   => 'root',
487     content => "[client]
488 user=root
489 password=\"${mysql_root_password}\"
490
491 [mysql]
492 user=root
493 password=\"${mysql_root_password}\"",
494     require => Exec['galera-set-root-password'],
495   }
496
497   $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
498
499   if $nova_enable_db_purge {
500     include ::nova::cron::archive_deleted_rows
501   }
502
503   if $pacemaker_master {
504
505     pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
506       constraint_type => 'order',
507       first_resource  => 'openstack-core-clone',
508       second_resource => "${::apache::params::service_name}-clone",
509       first_action    => 'start',
510       second_action   => 'start',
511       require         => [Pacemaker::Resource::Service[$::apache::params::service_name],
512                           Pacemaker::Resource::Ocf['openstack-core']],
513     }
514     pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
515       constraint_type => 'order',
516       first_resource  => 'galera-master',
517       second_resource => 'openstack-core-clone',
518       first_action    => 'promote',
519       second_action   => 'start',
520       require         => [Pacemaker::Resource::Ocf['galera'],
521                           Pacemaker::Resource::Ocf['openstack-core']],
522     }
523
524     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
525       pacemaker::resource::service {'tomcat':
526         clone_params => 'interleave=true',
527       }
528     }
529     if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
530       #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
531       pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
532         constraint_type => 'order',
533         first_resource  => "${::neutron::params::server_service}-clone",
534         second_resource => "${::neutron::params::dhcp_agent_service}-clone",
535         first_action    => 'start',
536         second_action   => 'start',
537         require         => [Pacemaker::Resource::Service[$::neutron::params::server_service],
538                             Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
539       }
540       pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
541         constraint_type => 'order',
542         first_resource  => "${::neutron::params::dhcp_agent_service}-clone",
543         second_resource => "${::neutron::params::metadata_agent_service}-clone",
544         first_action    => 'start',
545         second_action   => 'start',
546         require         => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
547                             Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
548       }
549       pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
550         constraint_type => 'order',
551         first_resource  => "${::neutron::params::metadata_agent_service}-clone",
552         second_resource => 'tomcat-clone',
553         first_action    => 'start',
554         second_action   => 'start',
555         require         => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
556                             Pacemaker::Resource::Service['tomcat']],
557       }
558       pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
559         source  => "${::neutron::params::metadata_agent_service}-clone",
560         target  => "${::neutron::params::dhcp_agent_service}-clone",
561         score   => 'INFINITY',
562         require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
563                     Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
564       }
565     }
566
567     # Nova
568     pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
569       constraint_type => 'order',
570       first_resource  => 'openstack-core-clone',
571       second_resource => "${::nova::params::consoleauth_service_name}-clone",
572       first_action    => 'start',
573       second_action   => 'start',
574       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
575                           Pacemaker::Resource::Ocf['openstack-core']],
576     }
577     pacemaker::constraint::colocation { 'nova-consoleauth-with-openstack-core':
578       source  => "${::nova::params::consoleauth_service_name}-clone",
579       target  => 'openstack-core-clone',
580       score   => 'INFINITY',
581       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
582                   Pacemaker::Resource::Ocf['openstack-core']],
583     }
584     pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
585       constraint_type => 'order',
586       first_resource  => "${::nova::params::consoleauth_service_name}-clone",
587       second_resource => "${::nova::params::vncproxy_service_name}-clone",
588       first_action    => 'start',
589       second_action   => 'start',
590       require         => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
591                           Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
592     }
593     pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
594       source  => "${::nova::params::vncproxy_service_name}-clone",
595       target  => "${::nova::params::consoleauth_service_name}-clone",
596       score   => 'INFINITY',
597       require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
598                   Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
599     }
600     pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
601       constraint_type => 'order',
602       first_resource  => "${::nova::params::vncproxy_service_name}-clone",
603       second_resource => "${::nova::params::api_service_name}-clone",
604       first_action    => 'start',
605       second_action   => 'start',
606       require         => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
607                           Pacemaker::Resource::Service[$::nova::params::api_service_name]],
608     }
609     pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
610       source  => "${::nova::params::api_service_name}-clone",
611       target  => "${::nova::params::vncproxy_service_name}-clone",
612       score   => 'INFINITY',
613       require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
614                   Pacemaker::Resource::Service[$::nova::params::api_service_name]],
615     }
616     pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
617       constraint_type => 'order',
618       first_resource  => "${::nova::params::api_service_name}-clone",
619       second_resource => "${::nova::params::scheduler_service_name}-clone",
620       first_action    => 'start',
621       second_action   => 'start',
622       require         => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
623                           Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
624     }
625     pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
626       source  => "${::nova::params::scheduler_service_name}-clone",
627       target  => "${::nova::params::api_service_name}-clone",
628       score   => 'INFINITY',
629       require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
630                   Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
631     }
632     pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
633       constraint_type => 'order',
634       first_resource  => "${::nova::params::scheduler_service_name}-clone",
635       second_resource => "${::nova::params::conductor_service_name}-clone",
636       first_action    => 'start',
637       second_action   => 'start',
638       require         => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
639                           Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
640     }
641     pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
642       source  => "${::nova::params::conductor_service_name}-clone",
643       target  => "${::nova::params::scheduler_service_name}-clone",
644       score   => 'INFINITY',
645       require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
646                   Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
647     }
648
649     # Ceilometer and Aodh
650     case downcase(hiera('ceilometer_backend')) {
651       /mysql/: {
652         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
653           clone_params => 'interleave=true',
654           require      => Pacemaker::Resource::Ocf['openstack-core'],
655         }
656       }
657       default: {
658         pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
659           clone_params => 'interleave=true',
660           require      => [Pacemaker::Resource::Ocf['openstack-core'],
661                           Pacemaker::Resource::Service[$::mongodb::params::service_name]],
662         }
663       }
664     }
665     pacemaker::resource::service { $::ceilometer::params::collector_service_name :
666       clone_params => 'interleave=true',
667     }
668     pacemaker::resource::service { $::ceilometer::params::api_service_name :
669       clone_params => 'interleave=true',
670     }
671     pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
672       clone_params => 'interleave=true',
673     }
674     # Fedora doesn't know `require-all` parameter for constraints yet
675     if $::operatingsystem == 'Fedora' {
676       $redis_ceilometer_constraint_params = undef
677       $redis_aodh_constraint_params = undef
678     } else {
679       $redis_ceilometer_constraint_params = 'require-all=false'
680       $redis_aodh_constraint_params = 'require-all=false'
681     }
682     pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
683       constraint_type   => 'order',
684       first_resource    => 'redis-master',
685       second_resource   => "${::ceilometer::params::agent_central_service_name}-clone",
686       first_action      => 'promote',
687       second_action     => 'start',
688       constraint_params => $redis_ceilometer_constraint_params,
689       require           => [Pacemaker::Resource::Ocf['redis'],
690                             Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
691     }
692     pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
693       constraint_type   => 'order',
694       first_resource    => 'redis-master',
695       second_resource   => "${::aodh::params::evaluator_service_name}-clone",
696       first_action      => 'promote',
697       second_action     => 'start',
698       constraint_params => $redis_aodh_constraint_params,
699       require           => [Pacemaker::Resource::Ocf['redis'],
700                             Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
701     }
702     pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
703       constraint_type => 'order',
704       first_resource  => 'openstack-core-clone',
705       second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
706       first_action    => 'start',
707       second_action   => 'start',
708       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
709                           Pacemaker::Resource::Ocf['openstack-core']],
710     }
711     pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
712       constraint_type => 'order',
713       first_resource  => 'openstack-core-clone',
714       second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
715       first_action    => 'start',
716       second_action   => 'start',
717       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
718                           Pacemaker::Resource::Ocf['openstack-core']],
719     }
720     pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
721       constraint_type => 'order',
722       first_resource  => "${::ceilometer::params::agent_central_service_name}-clone",
723       second_resource => "${::ceilometer::params::collector_service_name}-clone",
724       first_action    => 'start',
725       second_action   => 'start',
726       require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
727                           Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
728     }
729     pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
730       constraint_type => 'order',
731       first_resource  => "${::ceilometer::params::collector_service_name}-clone",
732       second_resource => "${::ceilometer::params::api_service_name}-clone",
733       first_action    => 'start',
734       second_action   => 'start',
735       require         => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
736                           Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
737     }
738     pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
739       source  => "${::ceilometer::params::api_service_name}-clone",
740       target  => "${::ceilometer::params::collector_service_name}-clone",
741       score   => 'INFINITY',
742       require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
743                   Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
744     }
745     # Aodh
746     pacemaker::resource::service { $::aodh::params::evaluator_service_name :
747       clone_params => 'interleave=true',
748     }
749     pacemaker::resource::service { $::aodh::params::notifier_service_name :
750       clone_params => 'interleave=true',
751     }
752     pacemaker::resource::service { $::aodh::params::listener_service_name :
753       clone_params => 'interleave=true',
754     }
755     pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
756       constraint_type => 'order',
757       first_resource  => "${::aodh::params::evaluator_service_name}-clone",
758       second_resource => "${::aodh::params::notifier_service_name}-clone",
759       first_action    => 'start',
760       second_action   => 'start',
761       require         => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
762                           Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
763     }
764     pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
765       source  => "${::aodh::params::notifier_service_name}-clone",
766       target  => "${::aodh::params::evaluator_service_name}-clone",
767       score   => 'INFINITY',
768       require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
769                   Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
770     }
771     pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
772       constraint_type => 'order',
773       first_resource  => "${::aodh::params::evaluator_service_name}-clone",
774       second_resource => "${::aodh::params::listener_service_name}-clone",
775       first_action    => 'start',
776       second_action   => 'start',
777       require         => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
778                           Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
779     }
780     pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
781       source  => "${::aodh::params::listener_service_name}-clone",
782       target  => "${::aodh::params::evaluator_service_name}-clone",
783       score   => 'INFINITY',
784       require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
785                   Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
786     }
787     if downcase(hiera('ceilometer_backend')) == 'mongodb' {
788       pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
789         constraint_type => 'order',
790         first_resource  => "${::mongodb::params::service_name}-clone",
791         second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
792         first_action    => 'start',
793         second_action   => 'start',
794         require         => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
795                             Pacemaker::Resource::Service[$::mongodb::params::service_name]],
796       }
797     }
798
799     # gnocchi
800     pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
801       clone_params => 'interleave=true',
802     }
803     pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
804       clone_params => 'interleave=true',
805     }
806     pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
807       constraint_type => 'order',
808       first_resource  => "${::gnocchi::params::metricd_service_name}-clone",
809       second_resource => "${::gnocchi::params::statsd_service_name}-clone",
810       first_action    => 'start',
811       second_action   => 'start',
812       require         => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
813                           Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
814     }
815     pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
816       source  => "${::gnocchi::params::statsd_service_name}-clone",
817       target  => "${::gnocchi::params::metricd_service_name}-clone",
818       score   => 'INFINITY',
819       require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
820                   Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
821     }
822
823     # Horizon and Keystone
824     pacemaker::resource::service { $::apache::params::service_name:
825       clone_params     => 'interleave=true',
826       verify_on_create => true,
827       require          => [File['/etc/keystone/ssl/certs/ca.pem'],
828       File['/etc/keystone/ssl/private/signing_key.pem'],
829       File['/etc/keystone/ssl/certs/signing_cert.pem']],
830     }
831
832     #VSM
833     if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
834       pacemaker::resource::ocf { 'vsm-p' :
835         ocf_agent_name  => 'heartbeat:VirtualDomain',
836         resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
837         require         => Class['n1k_vsm'],
838         meta_params     => 'resource-stickiness=INFINITY',
839       }
840       if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
841         pacemaker::resource::ocf { 'vsm-s' :
842           ocf_agent_name  => 'heartbeat:VirtualDomain',
843           resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
844           require         => Class['n1k_vsm'],
845           meta_params     => 'resource-stickiness=INFINITY',
846         }
847         pacemaker::constraint::colocation { 'vsm-colocation-contraint':
848           source  => 'vsm-p',
849           target  => 'vsm-s',
850           score   => '-INFINITY',
851           require => [Pacemaker::Resource::Ocf['vsm-p'],
852                       Pacemaker::Resource::Ocf['vsm-s']],
853         }
854       }
855     }
856
857   }
858
859 } #END STEP 5
860
861 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
862 package_manifest{$package_manifest_name: ensure => present}