1 # Copyright 2015 Red Hat, Inc.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
21 # TODO(jistr): use pcs resource provider instead of just no-ops
23 tag == 'aodh-service' or
24 tag == 'ceilometer-service' or
25 tag == 'gnocchi-service' or
26 tag == 'neutron-service' or
30 restart => '/bin/true',
35 include ::tripleo::packages
36 include ::tripleo::firewall
38 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
39 $pacemaker_master = true
42 $pacemaker_master = false
46 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
47 $enable_load_balancer = hiera('enable_load_balancer', true)
49 # When to start and enable services which haven't been Pacemakerized
50 # FIXME: remove when we start all OpenStack services using Pacemaker
51 # (occurrences of this variable will be gradually replaced with false)
52 $non_pcmk_start = hiera('step') >= 5
54 if hiera('step') >= 1 {
56 create_resources(kmod::load, hiera('kernel_modules'), {})
57 create_resources(sysctl::value, hiera('sysctl_settings'), {})
58 Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
60 $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
61 $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
63 $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
65 $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
67 class { '::pacemaker':
68 hacluster_pwd => hiera('hacluster_pwd'),
70 class { '::pacemaker::corosync':
71 cluster_members => $pacemaker_cluster_members,
72 setup_cluster => $pacemaker_master,
73 cluster_setup_extras => $cluster_setup_extras,
75 class { '::pacemaker::stonith':
76 disable => !$enable_fencing,
79 include ::tripleo::fencing
81 # enable stonith after all Pacemaker resources have been created
82 Pcmk_resource<||> -> Class['tripleo::fencing']
83 Pcmk_constraint<||> -> Class['tripleo::fencing']
84 Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing']
85 # enable stonith after all fencing devices have been created
86 Class['tripleo::fencing'] -> Class['pacemaker::stonith']
89 # FIXME(gfidente): sets 200secs as default start timeout op
90 # param; until we can use pcmk global defaults we'll still
91 # need to add it to every resource which redefines op params
92 Pacemaker::Resource::Service {
93 op_params => 'start timeout=200s stop timeout=200s',
96 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
97 include ::mongodb::params
101 if str2bool(hiera('enable_galera', true)) {
102 $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
104 $mysql_config_file = '/etc/my.cnf.d/server.cnf'
106 $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
107 $galera_nodes_count = count(split($galera_nodes, ','))
109 # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
110 # set bind-address to a hostname instead of an ip address; to move Mysql
111 # from internal_api on another network we'll have to customize both
112 # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
113 $mysql_bind_host = hiera('mysql_bind_host')
116 'skip-name-resolve' => '1',
117 'binlog_format' => 'ROW',
118 'default-storage-engine' => 'innodb',
119 'innodb_autoinc_lock_mode' => '2',
120 'innodb_locks_unsafe_for_binlog'=> '1',
121 'query_cache_size' => '0',
122 'query_cache_type' => '0',
123 'bind-address' => $::hostname,
124 'max_connections' => hiera('mysql_max_connections'),
125 'open_files_limit' => '-1',
127 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
128 'wsrep_cluster_name' => 'galera_cluster',
129 'wsrep_cluster_address' => "gcomm://${galera_nodes}",
130 'wsrep_slave_threads' => '1',
131 'wsrep_certify_nonPK' => '1',
132 'wsrep_max_ws_rows' => '131072',
133 'wsrep_max_ws_size' => '1073741824',
134 'wsrep_debug' => '0',
135 'wsrep_convert_LOCK_to_trx' => '0',
136 'wsrep_retry_autocommit' => '1',
137 'wsrep_auto_increment_control' => '1',
138 'wsrep_drupal_282555_workaround'=> '0',
139 'wsrep_causal_reads' => '0',
140 'wsrep_sst_method' => 'rsync',
141 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
145 class { '::mysql::server':
146 create_root_user => false,
147 create_root_my_cnf => false,
148 config_file => $mysql_config_file,
149 override_options => $mysqld_options,
150 remove_default_accounts => $pacemaker_master,
151 service_manage => false,
152 service_enabled => false,
157 if hiera('step') >= 2 {
160 # NOTE(gfidente): the following vars are needed on all nodes so they
161 # need to stay out of pacemaker_master conditional.
162 # The addresses mangling will hopefully go away when we'll be able to
163 # configure the connection string via hostnames, until then, we need to pass
164 # the list of IPv6 addresses *with* port and without the brackets as 'members'
165 # argument for the 'mongodb_replset' resource.
166 if str2bool(hiera('mongodb::server::ipv6', false)) {
167 $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
168 $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
169 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
171 $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
172 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
174 $mongodb_replset = hiera('mongodb::server::replset')
176 if $pacemaker_master {
178 include ::pacemaker::resource_defaults
180 # Create an openstack-core dummy resource. See RHBZ 1290121
181 pacemaker::resource::ocf { 'openstack-core':
182 ocf_agent_name => 'heartbeat:Dummy',
183 clone_params => true,
186 pacemaker::resource::ocf { 'galera' :
187 ocf_agent_name => 'heartbeat:galera',
188 op_params => 'promote timeout=300s on-fail=block',
190 meta_params => "master-max=${galera_nodes_count} ordered=true",
191 resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
192 require => Class['::mysql::server'],
193 before => Exec['galera-ready'],
196 $mysql_root_password = hiera('mysql::server::root_password')
197 $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
198 # This step is to create a sysconfig clustercheck file with the root user and empty password
199 # on the first install only (because later on the clustercheck db user will be used)
200 # We are using exec and not file in order to not have duplicate definition errors in puppet
201 # when we later set the the file to contain the clustercheck data
202 exec { 'create-root-sysconfig-clustercheck':
203 command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
204 unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
207 exec { 'galera-ready' :
208 command => '/usr/bin/clustercheck >/dev/null',
212 environment => ['AVAILABLE_WHEN_READONLY=0'],
213 require => Exec['create-root-sysconfig-clustercheck'],
216 xinetd::service { 'galera-monitor' :
218 server => '/usr/bin/clustercheck',
219 per_source => 'UNLIMITED',
220 log_on_success => '',
221 log_on_failure => 'HOST',
223 service_type => 'UNLISTED',
226 require => Exec['create-root-sysconfig-clustercheck'],
228 # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
229 # to it in a later step. We do this only on one node as it will replicate on
230 # the other members. We also make sure that the permissions are the minimum necessary
231 if $pacemaker_master {
232 mysql_user { 'clustercheck@localhost':
234 password_hash => mysql_password($mysql_clustercheck_password),
235 require => Exec['galera-ready'],
237 mysql_grant { 'clustercheck@localhost/*.*':
239 options => ['GRANT'],
240 privileges => ['PROCESS'],
242 user => 'clustercheck@localhost',
246 # Create all the database schemas
248 if downcase(hiera('ceilometer_backend')) == 'mysql' {
249 class { '::ceilometer::db::mysql':
250 require => Exec['galera-ready'],
254 if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
255 class { '::gnocchi::db::mysql':
256 require => Exec['galera-ready'],
260 class { '::aodh::db::mysql':
261 require => Exec['galera-ready'],
267 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
268 # At this stage we are guaranteed that the clustercheck db user exists
269 # so we switch the resource agent to use it.
270 file { '/etc/sysconfig/clustercheck' :
275 content => "MYSQL_USERNAME=clustercheck\n
276 MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
277 MYSQL_HOST=localhost\n",
280 $nova_ipv6 = hiera('nova::use_ipv6', false)
282 $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
284 $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
288 memcached_servers => $memcached_servers
291 include ::nova::config
293 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
295 # TODO(devvesa) provide non-controller ips for these services
296 $zookeeper_node_ips = hiera('neutron_api_node_ips')
297 $cassandra_node_ips = hiera('neutron_api_node_ips')
299 # Run zookeeper in the controller if configured
300 if hiera('enable_zookeeper_on_controller') {
301 class {'::tripleo::cluster::zookeeper':
302 zookeeper_server_ips => $zookeeper_node_ips,
303 # TODO: create a 'bind' hiera key for zookeeper
304 zookeeper_client_ip => hiera('neutron::bind_host'),
305 zookeeper_hostnames => split(hiera('controller_node_names'), ',')
309 # Run cassandra in the controller if configured
310 if hiera('enable_cassandra_on_controller') {
311 class {'::tripleo::cluster::cassandra':
312 cassandra_servers => $cassandra_node_ips,
313 # TODO: create a 'bind' hiera key for cassandra
314 cassandra_ip => hiera('neutron::bind_host'),
318 class {'::tripleo::network::midonet::agent':
319 zookeeper_servers => $zookeeper_node_ips,
320 cassandra_seeds => $cassandra_node_ips
323 class {'::tripleo::network::midonet::api':
324 zookeeper_servers => $zookeeper_node_ips,
325 vip => hiera('public_virtual_ip'),
326 keystone_ip => hiera('public_virtual_ip'),
327 keystone_admin_token => hiera('keystone::admin_token'),
328 # TODO: create a 'bind' hiera key for api
329 bind_address => hiera('neutron::bind_host'),
330 admin_password => hiera('admin_password')
334 # TODO: when doing the composable midonet plugin, don't forget to
335 # set service_plugins to an empty array in Hiera.
337 service_plugins => []
342 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
343 class {'::neutron::plugins::midonet':
344 midonet_api_ip => hiera('public_virtual_ip'),
345 keystone_tenant => hiera('neutron::server::auth_tenant'),
346 keystone_password => hiera('neutron::server::password')
351 case downcase(hiera('ceilometer_backend')) {
353 $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
356 $mongo_node_string = join($mongo_node_ips_with_port, ',')
357 $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
361 include ::ceilometer::config
362 class { '::ceilometer::api' :
363 manage_service => false,
366 class { '::ceilometer::agent::notification' :
367 manage_service => false,
370 class { '::ceilometer::agent::central' :
371 manage_service => false,
374 class { '::ceilometer::collector' :
375 manage_service => false,
378 include ::ceilometer::expirer
379 class { '::ceilometer::db' :
380 database_connection => $ceilometer_database_connection,
383 include ::ceilometer::agent::auth
384 include ::ceilometer::dispatcher::gnocchi
386 Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
388 # httpd/apache and horizon
389 # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
391 service_enable => false,
392 # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
394 include ::apache::mod::remoteip
395 include ::apache::mod::status
396 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
397 $_profile_support = 'cisco'
399 $_profile_support = 'None'
401 $neutron_options = {'profile_support' => $_profile_support }
403 $memcached_ipv6 = hiera('memcached_ipv6', false)
405 $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
407 $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
411 cache_server_ip => $horizon_memcached_servers,
412 neutron_options => $neutron_options,
417 database_connection => hiera('aodh_mysql_conn_string'),
419 include ::aodh::config
421 include ::aodh::client
422 include ::aodh::wsgi::apache
423 class { '::aodh::api':
424 manage_service => false,
426 service_name => 'httpd',
428 class { '::aodh::evaluator':
429 manage_service => false,
432 class { '::aodh::notifier':
433 manage_service => false,
436 class { '::aodh::listener':
437 manage_service => false,
442 $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
443 include ::gnocchi::client
445 include ::gnocchi::db::sync
447 include ::gnocchi::storage
448 $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
449 case $gnocchi_backend {
450 'swift': { include ::gnocchi::storage::swift }
451 'file': { include ::gnocchi::storage::file }
452 'rbd': { include ::gnocchi::storage::ceph }
453 default: { fail('Unrecognized gnocchi_backend parameter.') }
456 database_connection => $gnocchi_database_connection,
458 class { '::gnocchi::api' :
459 manage_service => false,
461 service_name => 'httpd',
463 class { '::gnocchi::wsgi::apache' :
466 class { '::gnocchi::metricd' :
467 manage_service => false,
470 class { '::gnocchi::statsd' :
471 manage_service => false,
475 hiera_include('controller_classes')
479 if hiera('step') >= 5 {
480 # We now make sure that the root db password is set to a random one
481 # At first installation /root/.my.cnf will be empty and we connect without a root
482 # password. On second runs or updates /root/.my.cnf will already be populated
483 # with proper credentials. This step happens on every node because this sql
484 # statement does not automatically replicate across nodes.
485 exec { 'galera-set-root-password':
486 command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
488 file { '/root/.my.cnf' :
495 password=\"${mysql_root_password}\"
499 password=\"${mysql_root_password}\"",
500 require => Exec['galera-set-root-password'],
503 $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
505 if $nova_enable_db_purge {
506 include ::nova::cron::archive_deleted_rows
509 if $pacemaker_master {
511 pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
512 constraint_type => 'order',
513 first_resource => 'openstack-core-clone',
514 second_resource => "${::apache::params::service_name}-clone",
515 first_action => 'start',
516 second_action => 'start',
517 require => [Pacemaker::Resource::Service[$::apache::params::service_name],
518 Pacemaker::Resource::Ocf['openstack-core']],
520 pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
521 constraint_type => 'order',
522 first_resource => 'galera-master',
523 second_resource => 'openstack-core-clone',
524 first_action => 'promote',
525 second_action => 'start',
526 require => [Pacemaker::Resource::Ocf['galera'],
527 Pacemaker::Resource::Ocf['openstack-core']],
530 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
531 pacemaker::resource::service {'tomcat':
532 clone_params => 'interleave=true',
535 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
536 #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
537 pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
538 constraint_type => 'order',
539 first_resource => "${::neutron::params::server_service}-clone",
540 second_resource => "${::neutron::params::dhcp_agent_service}-clone",
541 first_action => 'start',
542 second_action => 'start',
543 require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
544 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
546 pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
547 constraint_type => 'order',
548 first_resource => "${::neutron::params::dhcp_agent_service}-clone",
549 second_resource => "${::neutron::params::metadata_agent_service}-clone",
550 first_action => 'start',
551 second_action => 'start',
552 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
553 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
555 pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
556 constraint_type => 'order',
557 first_resource => "${::neutron::params::metadata_agent_service}-clone",
558 second_resource => 'tomcat-clone',
559 first_action => 'start',
560 second_action => 'start',
561 require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
562 Pacemaker::Resource::Service['tomcat']],
564 pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
565 source => "${::neutron::params::metadata_agent_service}-clone",
566 target => "${::neutron::params::dhcp_agent_service}-clone",
568 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
569 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
574 pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
575 constraint_type => 'order',
576 first_resource => 'openstack-core-clone',
577 second_resource => "${::nova::params::consoleauth_service_name}-clone",
578 first_action => 'start',
579 second_action => 'start',
580 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
581 Pacemaker::Resource::Ocf['openstack-core']],
583 pacemaker::constraint::colocation { 'nova-consoleauth-with-openstack-core':
584 source => "${::nova::params::consoleauth_service_name}-clone",
585 target => 'openstack-core-clone',
587 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
588 Pacemaker::Resource::Ocf['openstack-core']],
590 pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
591 constraint_type => 'order',
592 first_resource => "${::nova::params::consoleauth_service_name}-clone",
593 second_resource => "${::nova::params::vncproxy_service_name}-clone",
594 first_action => 'start',
595 second_action => 'start',
596 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
597 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
599 pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
600 source => "${::nova::params::vncproxy_service_name}-clone",
601 target => "${::nova::params::consoleauth_service_name}-clone",
603 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
604 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
606 pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
607 constraint_type => 'order',
608 first_resource => "${::nova::params::vncproxy_service_name}-clone",
609 second_resource => "${::nova::params::api_service_name}-clone",
610 first_action => 'start',
611 second_action => 'start',
612 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
613 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
615 pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
616 source => "${::nova::params::api_service_name}-clone",
617 target => "${::nova::params::vncproxy_service_name}-clone",
619 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
620 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
622 pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
623 constraint_type => 'order',
624 first_resource => "${::nova::params::api_service_name}-clone",
625 second_resource => "${::nova::params::scheduler_service_name}-clone",
626 first_action => 'start',
627 second_action => 'start',
628 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
629 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
631 pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
632 source => "${::nova::params::scheduler_service_name}-clone",
633 target => "${::nova::params::api_service_name}-clone",
635 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
636 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
638 pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
639 constraint_type => 'order',
640 first_resource => "${::nova::params::scheduler_service_name}-clone",
641 second_resource => "${::nova::params::conductor_service_name}-clone",
642 first_action => 'start',
643 second_action => 'start',
644 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
645 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
647 pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
648 source => "${::nova::params::conductor_service_name}-clone",
649 target => "${::nova::params::scheduler_service_name}-clone",
651 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
652 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
655 # Ceilometer and Aodh
656 case downcase(hiera('ceilometer_backend')) {
658 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
659 clone_params => 'interleave=true',
660 require => Pacemaker::Resource::Ocf['openstack-core'],
664 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
665 clone_params => 'interleave=true',
666 require => [Pacemaker::Resource::Ocf['openstack-core'],
667 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
671 pacemaker::resource::service { $::ceilometer::params::collector_service_name :
672 clone_params => 'interleave=true',
674 pacemaker::resource::service { $::ceilometer::params::api_service_name :
675 clone_params => 'interleave=true',
677 pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
678 clone_params => 'interleave=true',
680 # Fedora doesn't know `require-all` parameter for constraints yet
681 if $::operatingsystem == 'Fedora' {
682 $redis_ceilometer_constraint_params = undef
683 $redis_aodh_constraint_params = undef
685 $redis_ceilometer_constraint_params = 'require-all=false'
686 $redis_aodh_constraint_params = 'require-all=false'
688 pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
689 constraint_type => 'order',
690 first_resource => 'redis-master',
691 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
692 first_action => 'promote',
693 second_action => 'start',
694 constraint_params => $redis_ceilometer_constraint_params,
695 require => [Pacemaker::Resource::Ocf['redis'],
696 Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
698 pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
699 constraint_type => 'order',
700 first_resource => 'redis-master',
701 second_resource => "${::aodh::params::evaluator_service_name}-clone",
702 first_action => 'promote',
703 second_action => 'start',
704 constraint_params => $redis_aodh_constraint_params,
705 require => [Pacemaker::Resource::Ocf['redis'],
706 Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
708 pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
709 constraint_type => 'order',
710 first_resource => 'openstack-core-clone',
711 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
712 first_action => 'start',
713 second_action => 'start',
714 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
715 Pacemaker::Resource::Ocf['openstack-core']],
717 pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
718 constraint_type => 'order',
719 first_resource => 'openstack-core-clone',
720 second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
721 first_action => 'start',
722 second_action => 'start',
723 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
724 Pacemaker::Resource::Ocf['openstack-core']],
726 pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
727 constraint_type => 'order',
728 first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
729 second_resource => "${::ceilometer::params::collector_service_name}-clone",
730 first_action => 'start',
731 second_action => 'start',
732 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
733 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
735 pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
736 constraint_type => 'order',
737 first_resource => "${::ceilometer::params::collector_service_name}-clone",
738 second_resource => "${::ceilometer::params::api_service_name}-clone",
739 first_action => 'start',
740 second_action => 'start',
741 require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
742 Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
744 pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
745 source => "${::ceilometer::params::api_service_name}-clone",
746 target => "${::ceilometer::params::collector_service_name}-clone",
748 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
749 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
752 pacemaker::resource::service { $::aodh::params::evaluator_service_name :
753 clone_params => 'interleave=true',
755 pacemaker::resource::service { $::aodh::params::notifier_service_name :
756 clone_params => 'interleave=true',
758 pacemaker::resource::service { $::aodh::params::listener_service_name :
759 clone_params => 'interleave=true',
761 pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
762 constraint_type => 'order',
763 first_resource => "${::aodh::params::evaluator_service_name}-clone",
764 second_resource => "${::aodh::params::notifier_service_name}-clone",
765 first_action => 'start',
766 second_action => 'start',
767 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
768 Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
770 pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
771 source => "${::aodh::params::notifier_service_name}-clone",
772 target => "${::aodh::params::evaluator_service_name}-clone",
774 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
775 Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
777 pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
778 constraint_type => 'order',
779 first_resource => "${::aodh::params::evaluator_service_name}-clone",
780 second_resource => "${::aodh::params::listener_service_name}-clone",
781 first_action => 'start',
782 second_action => 'start',
783 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
784 Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
786 pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
787 source => "${::aodh::params::listener_service_name}-clone",
788 target => "${::aodh::params::evaluator_service_name}-clone",
790 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
791 Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
793 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
794 pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
795 constraint_type => 'order',
796 first_resource => "${::mongodb::params::service_name}-clone",
797 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
798 first_action => 'start',
799 second_action => 'start',
800 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
801 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
806 pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
807 clone_params => 'interleave=true',
809 pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
810 clone_params => 'interleave=true',
812 pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
813 constraint_type => 'order',
814 first_resource => "${::gnocchi::params::metricd_service_name}-clone",
815 second_resource => "${::gnocchi::params::statsd_service_name}-clone",
816 first_action => 'start',
817 second_action => 'start',
818 require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
819 Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
821 pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
822 source => "${::gnocchi::params::statsd_service_name}-clone",
823 target => "${::gnocchi::params::metricd_service_name}-clone",
825 require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
826 Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
829 # Horizon and Keystone
830 pacemaker::resource::service { $::apache::params::service_name:
831 clone_params => 'interleave=true',
832 verify_on_create => true,
833 require => [File['/etc/keystone/ssl/certs/ca.pem'],
834 File['/etc/keystone/ssl/private/signing_key.pem'],
835 File['/etc/keystone/ssl/certs/signing_cert.pem']],
839 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
840 pacemaker::resource::ocf { 'vsm-p' :
841 ocf_agent_name => 'heartbeat:VirtualDomain',
842 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
843 require => Class['n1k_vsm'],
844 meta_params => 'resource-stickiness=INFINITY',
846 if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
847 pacemaker::resource::ocf { 'vsm-s' :
848 ocf_agent_name => 'heartbeat:VirtualDomain',
849 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
850 require => Class['n1k_vsm'],
851 meta_params => 'resource-stickiness=INFINITY',
853 pacemaker::constraint::colocation { 'vsm-colocation-contraint':
856 score => '-INFINITY',
857 require => [Pacemaker::Resource::Ocf['vsm-p'],
858 Pacemaker::Resource::Ocf['vsm-s']],
867 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
868 package_manifest{$package_manifest_name: ensure => present}