1 # Copyright 2015 Red Hat, Inc.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
21 # TODO(jistr): use pcs resource provider instead of just no-ops
23 tag == 'aodh-service' or
24 tag == 'ceilometer-service' or
25 tag == 'gnocchi-service'
28 restart => '/bin/true',
33 include ::tripleo::packages
34 include ::tripleo::firewall
36 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
37 $pacemaker_master = true
40 $pacemaker_master = false
44 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
45 $enable_load_balancer = hiera('enable_load_balancer', true)
47 # When to start and enable services which haven't been Pacemakerized
48 # FIXME: remove when we start all OpenStack services using Pacemaker
49 # (occurrences of this variable will be gradually replaced with false)
50 $non_pcmk_start = hiera('step') >= 5
52 if hiera('step') >= 1 {
54 create_resources(kmod::load, hiera('kernel_modules'), {})
55 create_resources(sysctl::value, hiera('sysctl_settings'), {})
56 Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
58 $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
59 $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
61 $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
63 $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
65 class { '::pacemaker':
66 hacluster_pwd => hiera('hacluster_pwd'),
68 class { '::pacemaker::corosync':
69 cluster_members => $pacemaker_cluster_members,
70 setup_cluster => $pacemaker_master,
71 cluster_setup_extras => $cluster_setup_extras,
73 class { '::pacemaker::stonith':
74 disable => !$enable_fencing,
77 include ::tripleo::fencing
79 # enable stonith after all Pacemaker resources have been created
80 Pcmk_resource<||> -> Class['tripleo::fencing']
81 Pcmk_constraint<||> -> Class['tripleo::fencing']
82 Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing']
83 # enable stonith after all fencing devices have been created
84 Class['tripleo::fencing'] -> Class['pacemaker::stonith']
87 # FIXME(gfidente): sets 200secs as default start timeout op
88 # param; until we can use pcmk global defaults we'll still
89 # need to add it to every resource which redefines op params
90 Pacemaker::Resource::Service {
91 op_params => 'start timeout=200s stop timeout=200s',
94 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
95 include ::mongodb::params
99 if str2bool(hiera('enable_galera', true)) {
100 $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
102 $mysql_config_file = '/etc/my.cnf.d/server.cnf'
104 $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
105 $galera_nodes_count = count(split($galera_nodes, ','))
107 # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
108 # set bind-address to a hostname instead of an ip address; to move Mysql
109 # from internal_api on another network we'll have to customize both
110 # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
111 $mysql_bind_host = hiera('mysql_bind_host')
114 'skip-name-resolve' => '1',
115 'binlog_format' => 'ROW',
116 'default-storage-engine' => 'innodb',
117 'innodb_autoinc_lock_mode' => '2',
118 'innodb_locks_unsafe_for_binlog'=> '1',
119 'query_cache_size' => '0',
120 'query_cache_type' => '0',
121 'bind-address' => $::hostname,
122 'max_connections' => hiera('mysql_max_connections'),
123 'open_files_limit' => '-1',
125 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
126 'wsrep_cluster_name' => 'galera_cluster',
127 'wsrep_cluster_address' => "gcomm://${galera_nodes}",
128 'wsrep_slave_threads' => '1',
129 'wsrep_certify_nonPK' => '1',
130 'wsrep_max_ws_rows' => '131072',
131 'wsrep_max_ws_size' => '1073741824',
132 'wsrep_debug' => '0',
133 'wsrep_convert_LOCK_to_trx' => '0',
134 'wsrep_retry_autocommit' => '1',
135 'wsrep_auto_increment_control' => '1',
136 'wsrep_drupal_282555_workaround'=> '0',
137 'wsrep_causal_reads' => '0',
138 'wsrep_sst_method' => 'rsync',
139 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
143 class { '::mysql::server':
144 create_root_user => false,
145 create_root_my_cnf => false,
146 config_file => $mysql_config_file,
147 override_options => $mysqld_options,
148 remove_default_accounts => $pacemaker_master,
149 service_manage => false,
150 service_enabled => false,
155 if hiera('step') >= 2 {
158 # NOTE(gfidente): the following vars are needed on all nodes so they
159 # need to stay out of pacemaker_master conditional.
160 # The addresses mangling will hopefully go away when we'll be able to
161 # configure the connection string via hostnames, until then, we need to pass
162 # the list of IPv6 addresses *with* port and without the brackets as 'members'
163 # argument for the 'mongodb_replset' resource.
164 if str2bool(hiera('mongodb::server::ipv6', false)) {
165 $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
166 $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
167 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
169 $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
170 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
172 $mongodb_replset = hiera('mongodb::server::replset')
174 if $pacemaker_master {
176 include ::pacemaker::resource_defaults
178 # Create an openstack-core dummy resource. See RHBZ 1290121
179 pacemaker::resource::ocf { 'openstack-core':
180 ocf_agent_name => 'heartbeat:Dummy',
181 clone_params => true,
184 pacemaker::resource::ocf { 'galera' :
185 ocf_agent_name => 'heartbeat:galera',
186 op_params => 'promote timeout=300s on-fail=block',
188 meta_params => "master-max=${galera_nodes_count} ordered=true",
189 resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
190 require => Class['::mysql::server'],
191 before => Exec['galera-ready'],
194 $mysql_root_password = hiera('mysql::server::root_password')
195 $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
196 # This step is to create a sysconfig clustercheck file with the root user and empty password
197 # on the first install only (because later on the clustercheck db user will be used)
198 # We are using exec and not file in order to not have duplicate definition errors in puppet
199 # when we later set the the file to contain the clustercheck data
200 exec { 'create-root-sysconfig-clustercheck':
201 command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
202 unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
205 exec { 'galera-ready' :
206 command => '/usr/bin/clustercheck >/dev/null',
210 environment => ['AVAILABLE_WHEN_READONLY=0'],
211 require => Exec['create-root-sysconfig-clustercheck'],
214 xinetd::service { 'galera-monitor' :
216 server => '/usr/bin/clustercheck',
217 per_source => 'UNLIMITED',
218 log_on_success => '',
219 log_on_failure => 'HOST',
221 service_type => 'UNLISTED',
224 require => Exec['create-root-sysconfig-clustercheck'],
226 # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
227 # to it in a later step. We do this only on one node as it will replicate on
228 # the other members. We also make sure that the permissions are the minimum necessary
229 if $pacemaker_master {
230 mysql_user { 'clustercheck@localhost':
232 password_hash => mysql_password($mysql_clustercheck_password),
233 require => Exec['galera-ready'],
235 mysql_grant { 'clustercheck@localhost/*.*':
237 options => ['GRANT'],
238 privileges => ['PROCESS'],
240 user => 'clustercheck@localhost',
244 # Create all the database schemas
246 if downcase(hiera('ceilometer_backend')) == 'mysql' {
247 class { '::ceilometer::db::mysql':
248 require => Exec['galera-ready'],
252 if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
253 class { '::gnocchi::db::mysql':
254 require => Exec['galera-ready'],
258 class { '::aodh::db::mysql':
259 require => Exec['galera-ready'],
265 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
266 # At this stage we are guaranteed that the clustercheck db user exists
267 # so we switch the resource agent to use it.
268 file { '/etc/sysconfig/clustercheck' :
273 content => "MYSQL_USERNAME=clustercheck\n
274 MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
275 MYSQL_HOST=localhost\n",
278 $nova_ipv6 = hiera('nova::use_ipv6', false)
280 $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
282 $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
286 memcached_servers => $memcached_servers
289 include ::nova::config
291 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
293 # TODO(devvesa) provide non-controller ips for these services
294 $zookeeper_node_ips = hiera('neutron_api_node_ips')
295 $cassandra_node_ips = hiera('neutron_api_node_ips')
297 # Run zookeeper in the controller if configured
298 if hiera('enable_zookeeper_on_controller') {
299 class {'::tripleo::cluster::zookeeper':
300 zookeeper_server_ips => $zookeeper_node_ips,
301 # TODO: create a 'bind' hiera key for zookeeper
302 zookeeper_client_ip => hiera('neutron::bind_host'),
303 zookeeper_hostnames => split(hiera('controller_node_names'), ',')
307 # Run cassandra in the controller if configured
308 if hiera('enable_cassandra_on_controller') {
309 class {'::tripleo::cluster::cassandra':
310 cassandra_servers => $cassandra_node_ips,
311 # TODO: create a 'bind' hiera key for cassandra
312 cassandra_ip => hiera('neutron::bind_host'),
316 class {'::tripleo::network::midonet::agent':
317 zookeeper_servers => $zookeeper_node_ips,
318 cassandra_seeds => $cassandra_node_ips
321 class {'::tripleo::network::midonet::api':
322 zookeeper_servers => $zookeeper_node_ips,
323 vip => hiera('public_virtual_ip'),
324 keystone_ip => hiera('public_virtual_ip'),
325 keystone_admin_token => hiera('keystone::admin_token'),
326 # TODO: create a 'bind' hiera key for api
327 bind_address => hiera('neutron::bind_host'),
328 admin_password => hiera('admin_password')
332 # TODO: when doing the composable midonet plugin, don't forget to
333 # set service_plugins to an empty array in Hiera.
335 service_plugins => []
340 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
341 class {'::neutron::plugins::midonet':
342 midonet_api_ip => hiera('public_virtual_ip'),
343 keystone_tenant => hiera('neutron::server::auth_tenant'),
344 keystone_password => hiera('neutron::server::password')
349 case downcase(hiera('ceilometer_backend')) {
351 $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
354 $mongo_node_string = join($mongo_node_ips_with_port, ',')
355 $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
359 include ::ceilometer::config
360 class { '::ceilometer::api' :
361 manage_service => false,
364 class { '::ceilometer::agent::notification' :
365 manage_service => false,
368 class { '::ceilometer::agent::central' :
369 manage_service => false,
372 class { '::ceilometer::collector' :
373 manage_service => false,
376 include ::ceilometer::expirer
377 class { '::ceilometer::db' :
378 database_connection => $ceilometer_database_connection,
381 include ::ceilometer::agent::auth
382 include ::ceilometer::dispatcher::gnocchi
384 Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
386 # httpd/apache and horizon
387 # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
389 service_enable => false,
390 # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
392 include ::apache::mod::remoteip
393 include ::apache::mod::status
394 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
395 $_profile_support = 'cisco'
397 $_profile_support = 'None'
399 $neutron_options = merge({'profile_support' => $_profile_support },hiera('horizon::neutron_options',undef))
401 $memcached_ipv6 = hiera('memcached_ipv6', false)
403 $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
405 $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
409 cache_server_ip => $horizon_memcached_servers,
410 neutron_options => $neutron_options,
415 database_connection => hiera('aodh_mysql_conn_string'),
417 include ::aodh::config
419 include ::aodh::client
420 include ::aodh::wsgi::apache
421 class { '::aodh::api':
422 manage_service => false,
424 service_name => 'httpd',
426 class { '::aodh::evaluator':
427 manage_service => false,
430 class { '::aodh::notifier':
431 manage_service => false,
434 class { '::aodh::listener':
435 manage_service => false,
440 $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
441 include ::gnocchi::client
443 include ::gnocchi::db::sync
445 include ::gnocchi::storage
446 $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
447 case $gnocchi_backend {
448 'swift': { include ::gnocchi::storage::swift }
449 'file': { include ::gnocchi::storage::file }
450 'rbd': { include ::gnocchi::storage::ceph }
451 default: { fail('Unrecognized gnocchi_backend parameter.') }
454 database_connection => $gnocchi_database_connection,
456 class { '::gnocchi::api' :
457 manage_service => false,
459 service_name => 'httpd',
461 class { '::gnocchi::wsgi::apache' :
464 class { '::gnocchi::metricd' :
465 manage_service => false,
468 class { '::gnocchi::statsd' :
469 manage_service => false,
473 hiera_include('controller_classes')
477 if hiera('step') >= 5 {
478 # We now make sure that the root db password is set to a random one
479 # At first installation /root/.my.cnf will be empty and we connect without a root
480 # password. On second runs or updates /root/.my.cnf will already be populated
481 # with proper credentials. This step happens on every node because this sql
482 # statement does not automatically replicate across nodes.
483 exec { 'galera-set-root-password':
484 command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
486 file { '/root/.my.cnf' :
493 password=\"${mysql_root_password}\"
497 password=\"${mysql_root_password}\"",
498 require => Exec['galera-set-root-password'],
501 $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
503 if $nova_enable_db_purge {
504 include ::nova::cron::archive_deleted_rows
507 if $pacemaker_master {
509 pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
510 constraint_type => 'order',
511 first_resource => 'openstack-core-clone',
512 second_resource => "${::apache::params::service_name}-clone",
513 first_action => 'start',
514 second_action => 'start',
515 require => [Pacemaker::Resource::Service[$::apache::params::service_name],
516 Pacemaker::Resource::Ocf['openstack-core']],
518 pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
519 constraint_type => 'order',
520 first_resource => 'galera-master',
521 second_resource => 'openstack-core-clone',
522 first_action => 'promote',
523 second_action => 'start',
524 require => [Pacemaker::Resource::Ocf['galera'],
525 Pacemaker::Resource::Ocf['openstack-core']],
528 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
529 pacemaker::resource::service {'tomcat':
530 clone_params => 'interleave=true',
533 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
534 #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
535 pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
536 constraint_type => 'order',
537 first_resource => "${::neutron::params::server_service}-clone",
538 second_resource => "${::neutron::params::dhcp_agent_service}-clone",
539 first_action => 'start',
540 second_action => 'start',
541 require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
542 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
544 pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
545 constraint_type => 'order',
546 first_resource => "${::neutron::params::dhcp_agent_service}-clone",
547 second_resource => "${::neutron::params::metadata_agent_service}-clone",
548 first_action => 'start',
549 second_action => 'start',
550 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
551 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
553 pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
554 constraint_type => 'order',
555 first_resource => "${::neutron::params::metadata_agent_service}-clone",
556 second_resource => 'tomcat-clone',
557 first_action => 'start',
558 second_action => 'start',
559 require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
560 Pacemaker::Resource::Service['tomcat']],
562 pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
563 source => "${::neutron::params::metadata_agent_service}-clone",
564 target => "${::neutron::params::dhcp_agent_service}-clone",
566 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
567 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
572 pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
573 constraint_type => 'order',
574 first_resource => 'openstack-core-clone',
575 second_resource => "${::nova::params::consoleauth_service_name}-clone",
576 first_action => 'start',
577 second_action => 'start',
578 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
579 Pacemaker::Resource::Ocf['openstack-core']],
581 pacemaker::constraint::colocation { 'nova-consoleauth-with-openstack-core':
582 source => "${::nova::params::consoleauth_service_name}-clone",
583 target => 'openstack-core-clone',
585 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
586 Pacemaker::Resource::Ocf['openstack-core']],
588 pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
589 constraint_type => 'order',
590 first_resource => "${::nova::params::consoleauth_service_name}-clone",
591 second_resource => "${::nova::params::vncproxy_service_name}-clone",
592 first_action => 'start',
593 second_action => 'start',
594 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
595 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
597 pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
598 source => "${::nova::params::vncproxy_service_name}-clone",
599 target => "${::nova::params::consoleauth_service_name}-clone",
601 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
602 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
604 pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
605 constraint_type => 'order',
606 first_resource => "${::nova::params::vncproxy_service_name}-clone",
607 second_resource => "${::nova::params::api_service_name}-clone",
608 first_action => 'start',
609 second_action => 'start',
610 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
611 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
613 pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
614 source => "${::nova::params::api_service_name}-clone",
615 target => "${::nova::params::vncproxy_service_name}-clone",
617 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
618 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
620 pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
621 constraint_type => 'order',
622 first_resource => "${::nova::params::api_service_name}-clone",
623 second_resource => "${::nova::params::scheduler_service_name}-clone",
624 first_action => 'start',
625 second_action => 'start',
626 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
627 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
629 pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
630 source => "${::nova::params::scheduler_service_name}-clone",
631 target => "${::nova::params::api_service_name}-clone",
633 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
634 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
636 pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
637 constraint_type => 'order',
638 first_resource => "${::nova::params::scheduler_service_name}-clone",
639 second_resource => "${::nova::params::conductor_service_name}-clone",
640 first_action => 'start',
641 second_action => 'start',
642 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
643 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
645 pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
646 source => "${::nova::params::conductor_service_name}-clone",
647 target => "${::nova::params::scheduler_service_name}-clone",
649 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
650 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
653 # Ceilometer and Aodh
654 case downcase(hiera('ceilometer_backend')) {
656 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
657 clone_params => 'interleave=true',
658 require => Pacemaker::Resource::Ocf['openstack-core'],
662 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
663 clone_params => 'interleave=true',
664 require => [Pacemaker::Resource::Ocf['openstack-core'],
665 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
669 pacemaker::resource::service { $::ceilometer::params::collector_service_name :
670 clone_params => 'interleave=true',
672 pacemaker::resource::service { $::ceilometer::params::api_service_name :
673 clone_params => 'interleave=true',
675 pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
676 clone_params => 'interleave=true',
678 # Fedora doesn't know `require-all` parameter for constraints yet
679 if $::operatingsystem == 'Fedora' {
680 $redis_ceilometer_constraint_params = undef
681 $redis_aodh_constraint_params = undef
683 $redis_ceilometer_constraint_params = 'require-all=false'
684 $redis_aodh_constraint_params = 'require-all=false'
686 pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
687 constraint_type => 'order',
688 first_resource => 'redis-master',
689 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
690 first_action => 'promote',
691 second_action => 'start',
692 constraint_params => $redis_ceilometer_constraint_params,
693 require => [Pacemaker::Resource::Ocf['redis'],
694 Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
696 pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
697 constraint_type => 'order',
698 first_resource => 'redis-master',
699 second_resource => "${::aodh::params::evaluator_service_name}-clone",
700 first_action => 'promote',
701 second_action => 'start',
702 constraint_params => $redis_aodh_constraint_params,
703 require => [Pacemaker::Resource::Ocf['redis'],
704 Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
706 pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
707 constraint_type => 'order',
708 first_resource => 'openstack-core-clone',
709 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
710 first_action => 'start',
711 second_action => 'start',
712 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
713 Pacemaker::Resource::Ocf['openstack-core']],
715 pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
716 constraint_type => 'order',
717 first_resource => 'openstack-core-clone',
718 second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
719 first_action => 'start',
720 second_action => 'start',
721 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
722 Pacemaker::Resource::Ocf['openstack-core']],
724 pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
725 constraint_type => 'order',
726 first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
727 second_resource => "${::ceilometer::params::collector_service_name}-clone",
728 first_action => 'start',
729 second_action => 'start',
730 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
731 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
733 pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
734 constraint_type => 'order',
735 first_resource => "${::ceilometer::params::collector_service_name}-clone",
736 second_resource => "${::ceilometer::params::api_service_name}-clone",
737 first_action => 'start',
738 second_action => 'start',
739 require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
740 Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
742 pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
743 source => "${::ceilometer::params::api_service_name}-clone",
744 target => "${::ceilometer::params::collector_service_name}-clone",
746 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
747 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
750 pacemaker::resource::service { $::aodh::params::evaluator_service_name :
751 clone_params => 'interleave=true',
753 pacemaker::resource::service { $::aodh::params::notifier_service_name :
754 clone_params => 'interleave=true',
756 pacemaker::resource::service { $::aodh::params::listener_service_name :
757 clone_params => 'interleave=true',
759 pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
760 constraint_type => 'order',
761 first_resource => "${::aodh::params::evaluator_service_name}-clone",
762 second_resource => "${::aodh::params::notifier_service_name}-clone",
763 first_action => 'start',
764 second_action => 'start',
765 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
766 Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
768 pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
769 source => "${::aodh::params::notifier_service_name}-clone",
770 target => "${::aodh::params::evaluator_service_name}-clone",
772 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
773 Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
775 pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
776 constraint_type => 'order',
777 first_resource => "${::aodh::params::evaluator_service_name}-clone",
778 second_resource => "${::aodh::params::listener_service_name}-clone",
779 first_action => 'start',
780 second_action => 'start',
781 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
782 Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
784 pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
785 source => "${::aodh::params::listener_service_name}-clone",
786 target => "${::aodh::params::evaluator_service_name}-clone",
788 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
789 Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
791 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
792 pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
793 constraint_type => 'order',
794 first_resource => "${::mongodb::params::service_name}-clone",
795 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
796 first_action => 'start',
797 second_action => 'start',
798 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
799 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
804 pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
805 clone_params => 'interleave=true',
807 pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
808 clone_params => 'interleave=true',
810 pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
811 constraint_type => 'order',
812 first_resource => "${::gnocchi::params::metricd_service_name}-clone",
813 second_resource => "${::gnocchi::params::statsd_service_name}-clone",
814 first_action => 'start',
815 second_action => 'start',
816 require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
817 Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
819 pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
820 source => "${::gnocchi::params::statsd_service_name}-clone",
821 target => "${::gnocchi::params::metricd_service_name}-clone",
823 require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
824 Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
827 # Horizon and Keystone
828 pacemaker::resource::service { $::apache::params::service_name:
829 clone_params => 'interleave=true',
830 verify_on_create => true,
831 require => [File['/etc/keystone/ssl/certs/ca.pem'],
832 File['/etc/keystone/ssl/private/signing_key.pem'],
833 File['/etc/keystone/ssl/certs/signing_cert.pem']],
837 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
838 pacemaker::resource::ocf { 'vsm-p' :
839 ocf_agent_name => 'heartbeat:VirtualDomain',
840 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
841 require => Class['n1k_vsm'],
842 meta_params => 'resource-stickiness=INFINITY',
844 if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
845 pacemaker::resource::ocf { 'vsm-s' :
846 ocf_agent_name => 'heartbeat:VirtualDomain',
847 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
848 require => Class['n1k_vsm'],
849 meta_params => 'resource-stickiness=INFINITY',
851 pacemaker::constraint::colocation { 'vsm-colocation-contraint':
854 score => '-INFINITY',
855 require => [Pacemaker::Resource::Ocf['vsm-p'],
856 Pacemaker::Resource::Ocf['vsm-s']],
865 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
866 package_manifest{$package_manifest_name: ensure => present}