1 # Copyright 2015 Red Hat, Inc.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
21 # TODO(jistr): use pcs resource provider instead of just no-ops
23 tag == 'aodh-service' or
24 tag == 'cinder-service' or
25 tag == 'ceilometer-service' or
26 tag == 'gnocchi-service' or
27 tag == 'neutron-service' or
28 tag == 'nova-service' or
29 tag == 'sahara-service'
32 restart => '/bin/true',
37 include ::tripleo::packages
38 include ::tripleo::firewall
40 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
41 $pacemaker_master = true
44 $pacemaker_master = false
48 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 5
49 $enable_load_balancer = hiera('enable_load_balancer', true)
51 # When to start and enable services which haven't been Pacemakerized
52 # FIXME: remove when we start all OpenStack services using Pacemaker
53 # (occurrences of this variable will be gradually replaced with false)
54 $non_pcmk_start = hiera('step') >= 5
56 if hiera('step') >= 1 {
58 create_resources(kmod::load, hiera('kernel_modules'), {})
59 create_resources(sysctl::value, hiera('sysctl_settings'), {})
60 Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
64 if count(hiera('ntp::servers')) > 0 {
68 $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
69 $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
71 $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
73 $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
75 class { '::pacemaker':
76 hacluster_pwd => hiera('hacluster_pwd'),
78 class { '::pacemaker::corosync':
79 cluster_members => $pacemaker_cluster_members,
80 setup_cluster => $pacemaker_master,
81 cluster_setup_extras => $cluster_setup_extras,
83 class { '::pacemaker::stonith':
84 disable => !$enable_fencing,
87 include ::tripleo::fencing
89 # enable stonith after all Pacemaker resources have been created
90 Pcmk_resource<||> -> Class['tripleo::fencing']
91 Pcmk_constraint<||> -> Class['tripleo::fencing']
92 Exec <| tag == 'pacemaker_constraint' |> -> Class['tripleo::fencing']
93 # enable stonith after all fencing devices have been created
94 Class['tripleo::fencing'] -> Class['pacemaker::stonith']
97 # FIXME(gfidente): sets 200secs as default start timeout op
98 # param; until we can use pcmk global defaults we'll still
99 # need to add it to every resource which redefines op params
100 Pacemaker::Resource::Service {
101 op_params => 'start timeout=200s stop timeout=200s',
104 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
105 include ::mongodb::globals
106 include ::mongodb::client
107 class { '::mongodb::server' :
108 service_manage => false,
114 service_manage => false,
115 notify_service => false,
119 if str2bool(hiera('enable_galera', true)) {
120 $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
122 $mysql_config_file = '/etc/my.cnf.d/server.cnf'
124 $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
125 $galera_nodes_count = count(split($galera_nodes, ','))
127 # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
128 # set bind-address to a hostname instead of an ip address; to move Mysql
129 # from internal_api on another network we'll have to customize both
130 # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
131 $mysql_bind_host = hiera('mysql_bind_host')
134 'skip-name-resolve' => '1',
135 'binlog_format' => 'ROW',
136 'default-storage-engine' => 'innodb',
137 'innodb_autoinc_lock_mode' => '2',
138 'innodb_locks_unsafe_for_binlog'=> '1',
139 'query_cache_size' => '0',
140 'query_cache_type' => '0',
141 'bind-address' => $::hostname,
142 'max_connections' => hiera('mysql_max_connections'),
143 'open_files_limit' => '-1',
145 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
146 'wsrep_cluster_name' => 'galera_cluster',
147 'wsrep_cluster_address' => "gcomm://${galera_nodes}",
148 'wsrep_slave_threads' => '1',
149 'wsrep_certify_nonPK' => '1',
150 'wsrep_max_ws_rows' => '131072',
151 'wsrep_max_ws_size' => '1073741824',
152 'wsrep_debug' => '0',
153 'wsrep_convert_LOCK_to_trx' => '0',
154 'wsrep_retry_autocommit' => '1',
155 'wsrep_auto_increment_control' => '1',
156 'wsrep_drupal_282555_workaround'=> '0',
157 'wsrep_causal_reads' => '0',
158 'wsrep_sst_method' => 'rsync',
159 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
163 class { '::mysql::server':
164 create_root_user => false,
165 create_root_my_cnf => false,
166 config_file => $mysql_config_file,
167 override_options => $mysqld_options,
168 remove_default_accounts => $pacemaker_master,
169 service_manage => false,
170 service_enabled => false,
175 if hiera('step') >= 2 {
177 # NOTE(gfidente): the following vars are needed on all nodes so they
178 # need to stay out of pacemaker_master conditional.
179 # The addresses mangling will hopefully go away when we'll be able to
180 # configure the connection string via hostnames, until then, we need to pass
181 # the list of IPv6 addresses *with* port and without the brackets as 'members'
182 # argument for the 'mongodb_replset' resource.
183 if str2bool(hiera('mongodb::server::ipv6', false)) {
184 $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
185 $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
186 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
188 $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
189 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
191 $mongodb_replset = hiera('mongodb::server::replset')
193 if $pacemaker_master {
195 include ::pacemaker::resource_defaults
197 # Create an openstack-core dummy resource. See RHBZ 1290121
198 pacemaker::resource::ocf { 'openstack-core':
199 ocf_agent_name => 'heartbeat:Dummy',
200 clone_params => true,
203 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
204 pacemaker::resource::service { $::mongodb::params::service_name :
205 op_params => 'start timeout=370s stop timeout=200s',
206 clone_params => true,
207 require => Class['::mongodb::server'],
209 # NOTE (spredzy) : The replset can only be run
210 # once all the nodes have joined the cluster.
211 mongodb_conn_validator { $mongo_node_ips_with_port :
213 require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
214 before => Mongodb_replset[$mongodb_replset],
216 mongodb_replset { $mongodb_replset :
217 members => $mongo_node_ips_with_port_nobr,
221 pacemaker::resource::ocf { 'galera' :
222 ocf_agent_name => 'heartbeat:galera',
223 op_params => 'promote timeout=300s on-fail=block',
225 meta_params => "master-max=${galera_nodes_count} ordered=true",
226 resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
227 require => Class['::mysql::server'],
228 before => Exec['galera-ready'],
231 pacemaker::resource::ocf { 'redis':
232 ocf_agent_name => 'heartbeat:redis',
234 meta_params => 'notify=true ordered=true interleave=true',
235 resource_params => 'wait_last_known_master=true',
236 require => Class['::redis'],
240 $mysql_root_password = hiera('mysql::server::root_password')
241 $mysql_clustercheck_password = hiera('mysql_clustercheck_password')
242 # This step is to create a sysconfig clustercheck file with the root user and empty password
243 # on the first install only (because later on the clustercheck db user will be used)
244 # We are using exec and not file in order to not have duplicate definition errors in puppet
245 # when we later set the the file to contain the clustercheck data
246 exec { 'create-root-sysconfig-clustercheck':
247 command => "/bin/echo 'MYSQL_USERNAME=root\nMYSQL_PASSWORD=\'\'\nMYSQL_HOST=localhost\n' > /etc/sysconfig/clustercheck",
248 unless => '/bin/test -e /etc/sysconfig/clustercheck && grep -q clustercheck /etc/sysconfig/clustercheck',
251 exec { 'galera-ready' :
252 command => '/usr/bin/clustercheck >/dev/null',
256 environment => ['AVAILABLE_WHEN_READONLY=0'],
257 require => Exec['create-root-sysconfig-clustercheck'],
260 xinetd::service { 'galera-monitor' :
262 server => '/usr/bin/clustercheck',
263 per_source => 'UNLIMITED',
264 log_on_success => '',
265 log_on_failure => 'HOST',
267 service_type => 'UNLISTED',
270 require => Exec['create-root-sysconfig-clustercheck'],
272 # We add a clustercheck db user and we will switch /etc/sysconfig/clustercheck
273 # to it in a later step. We do this only on one node as it will replicate on
274 # the other members. We also make sure that the permissions are the minimum necessary
275 if $pacemaker_master {
276 mysql_user { 'clustercheck@localhost':
278 password_hash => mysql_password($mysql_clustercheck_password),
279 require => Exec['galera-ready'],
281 mysql_grant { 'clustercheck@localhost/*.*':
283 options => ['GRANT'],
284 privileges => ['PROCESS'],
286 user => 'clustercheck@localhost',
290 # Create all the database schemas
292 class { '::nova::db::mysql':
293 require => Exec['galera-ready'],
295 class { '::nova::db::mysql_api':
296 require => Exec['galera-ready'],
298 class { '::neutron::db::mysql':
299 require => Exec['galera-ready'],
301 class { '::cinder::db::mysql':
302 require => Exec['galera-ready'],
305 if downcase(hiera('ceilometer_backend')) == 'mysql' {
306 class { '::ceilometer::db::mysql':
307 require => Exec['galera-ready'],
311 if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
312 class { '::gnocchi::db::mysql':
313 require => Exec['galera-ready'],
316 class { '::sahara::db::mysql':
317 require => Exec['galera-ready'],
322 $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
325 $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
326 if str2bool(hiera('ceph_ipv6', false)) {
327 $mon_host = hiera('ceph_mon_host_v6')
329 $mon_host = hiera('ceph_mon_host')
331 class { '::ceph::profile::params':
332 mon_initial_members => $mon_initial_members,
333 mon_host => $mon_host,
336 include ::ceph::profile::mon
339 if str2bool(hiera('enable_ceph_storage', false)) {
340 if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
341 exec { 'set selinux to permissive on boot':
342 command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
343 onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
344 path => ['/usr/bin', '/usr/sbin'],
347 exec { 'set selinux to permissive':
348 command => 'setenforce 0',
349 onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
350 path => ['/usr/bin', '/usr/sbin'],
351 } -> Class['ceph::profile::osd']
355 include ::ceph::profile::osd
358 if str2bool(hiera('enable_external_ceph', false)) {
359 if str2bool(hiera('ceph_ipv6', false)) {
360 $mon_host = hiera('ceph_mon_host_v6')
362 $mon_host = hiera('ceph_mon_host')
364 class { '::ceph::profile::params':
365 mon_host => $mon_host,
368 include ::ceph::profile::client
374 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
375 # At this stage we are guaranteed that the clustercheck db user exists
376 # so we switch the resource agent to use it.
377 file { '/etc/sysconfig/clustercheck' :
382 content => "MYSQL_USERNAME=clustercheck\n
383 MYSQL_PASSWORD='${mysql_clustercheck_password}'\n
384 MYSQL_HOST=localhost\n",
387 $nova_ipv6 = hiera('nova::use_ipv6', false)
389 $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
391 $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
395 memcached_servers => $memcached_servers
398 include ::nova::config
400 class { '::nova::api' :
402 sync_db_api => $sync_db,
403 manage_service => false,
406 class { '::nova::cert' :
407 manage_service => false,
410 class { '::nova::conductor' :
411 manage_service => false,
414 class { '::nova::consoleauth' :
415 manage_service => false,
418 class { '::nova::vncproxy' :
419 manage_service => false,
422 include ::nova::scheduler::filter
423 class { '::nova::scheduler' :
424 manage_service => false,
427 include ::nova::network::neutron
429 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
431 # TODO(devvesa) provide non-controller ips for these services
432 $zookeeper_node_ips = hiera('neutron_api_node_ips')
433 $cassandra_node_ips = hiera('neutron_api_node_ips')
435 # Run zookeeper in the controller if configured
436 if hiera('enable_zookeeper_on_controller') {
437 class {'::tripleo::cluster::zookeeper':
438 zookeeper_server_ips => $zookeeper_node_ips,
439 # TODO: create a 'bind' hiera key for zookeeper
440 zookeeper_client_ip => hiera('neutron::bind_host'),
441 zookeeper_hostnames => split(hiera('controller_node_names'), ',')
445 # Run cassandra in the controller if configured
446 if hiera('enable_cassandra_on_controller') {
447 class {'::tripleo::cluster::cassandra':
448 cassandra_servers => $cassandra_node_ips,
449 # TODO: create a 'bind' hiera key for cassandra
450 cassandra_ip => hiera('neutron::bind_host'),
454 class {'::tripleo::network::midonet::agent':
455 zookeeper_servers => $zookeeper_node_ips,
456 cassandra_seeds => $cassandra_node_ips
459 class {'::tripleo::network::midonet::api':
460 zookeeper_servers => $zookeeper_node_ips,
461 vip => hiera('public_virtual_ip'),
462 keystone_ip => hiera('public_virtual_ip'),
463 keystone_admin_token => hiera('keystone::admin_token'),
464 # TODO: create a 'bind' hiera key for api
465 bind_address => hiera('neutron::bind_host'),
466 admin_password => hiera('admin_password')
471 service_plugins => []
476 # Neutron class definitions
480 include ::neutron::config
481 class { '::neutron::server' :
483 manage_service => false,
486 include ::neutron::server::notifications
487 if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
488 include ::neutron::plugins::nuage
490 if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
491 include ::neutron::plugins::opencontrail
493 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
494 class {'::neutron::plugins::midonet':
495 midonet_api_ip => hiera('public_virtual_ip'),
496 keystone_tenant => hiera('neutron::server::auth_tenant'),
497 keystone_password => hiera('neutron::server::password')
500 if hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
501 class { '::neutron::plugins::plumgrid' :
502 connection => hiera('neutron::server::database_connection'),
503 controller_priv_host => hiera('keystone_admin_api_vip'),
504 admin_password => hiera('admin_password'),
505 metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
508 include ::neutron::plugins::ml2
509 class { '::neutron::agents::ml2::ovs':
510 manage_service => false,
514 if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
515 include ::neutron::plugins::ml2::cisco::ucsm
517 if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
518 include ::neutron::plugins::ml2::cisco::nexus
519 include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
521 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
522 include ::neutron::plugins::ml2::cisco::nexus1000v
524 class { '::neutron::agents::n1kv_vem':
525 n1kv_source => hiera('n1kv_vem_source', undef),
526 n1kv_version => hiera('n1kv_vem_version', undef),
530 n1kv_source => hiera('n1kv_vsm_source', undef),
531 n1kv_version => hiera('n1kv_vsm_version', undef),
535 if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
536 include ::neutron::plugins::ml2::bigswitch::restproxy
537 include ::neutron::agents::bigswitch
541 include ::cinder::config
542 class { '::cinder::api':
544 manage_service => false,
547 class { '::cinder::scheduler' :
548 manage_service => false,
551 class { '::cinder::volume' :
552 manage_service => false,
555 include ::cinder::glance
556 include ::cinder::ceilometer
557 class { '::cinder::setup_test_volume':
558 size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
561 $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
562 if $cinder_enable_iscsi {
563 $cinder_iscsi_backend = 'tripleo_iscsi'
565 cinder::backend::iscsi { $cinder_iscsi_backend :
566 iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
567 iscsi_helper => hiera('cinder_iscsi_helper'),
573 $ceph_pools = hiera('ceph_pools')
574 ceph::pool { $ceph_pools :
575 pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'),
576 pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
577 size => hiera('ceph::profile::params::osd_pool_default_size'),
580 $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
583 $cinder_pool_requires = []
586 if hiera('cinder_enable_rbd_backend', false) {
587 $cinder_rbd_backend = 'tripleo_ceph'
589 cinder::backend::rbd { $cinder_rbd_backend :
590 backend_host => hiera('cinder::host'),
591 rbd_pool => hiera('cinder_rbd_pool_name'),
592 rbd_user => hiera('ceph_client_user_name'),
593 rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
594 require => $cinder_pool_requires,
598 if hiera('cinder_enable_eqlx_backend', false) {
599 $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
601 cinder::backend::eqlx { $cinder_eqlx_backend :
602 volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
603 san_ip => hiera('cinder::backend::eqlx::san_ip', undef),
604 san_login => hiera('cinder::backend::eqlx::san_login', undef),
605 san_password => hiera('cinder::backend::eqlx::san_password', undef),
606 san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef),
607 eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
608 eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef),
609 eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
610 eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
611 eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
615 if hiera('cinder_enable_dellsc_backend', false) {
616 $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
618 cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
619 volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
620 san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
621 san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
622 san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
623 dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
624 iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
625 iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
626 dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
627 dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
628 dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
632 if hiera('cinder_enable_netapp_backend', false) {
633 $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
635 if hiera('cinder::backend::netapp::nfs_shares', undef) {
636 $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
639 cinder::backend::netapp { $cinder_netapp_backend :
640 netapp_login => hiera('cinder::backend::netapp::netapp_login', undef),
641 netapp_password => hiera('cinder::backend::netapp::netapp_password', undef),
642 netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
643 netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef),
644 netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
645 netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef),
646 netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
647 netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef),
648 netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef),
649 netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef),
650 netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef),
651 netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
652 nfs_shares => $cinder_netapp_nfs_shares,
653 nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef),
654 netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
655 netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
656 netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef),
657 netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
658 netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
659 netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
663 if hiera('cinder_enable_nfs_backend', false) {
664 $cinder_nfs_backend = 'tripleo_nfs'
666 if str2bool($::selinux) {
667 selboolean { 'virt_use_nfs':
670 } -> Package['nfs-utils']
673 package { 'nfs-utils': } ->
674 cinder::backend::nfs { $cinder_nfs_backend:
675 nfs_servers => hiera('cinder_nfs_servers'),
676 nfs_mount_options => hiera('cinder_nfs_mount_options',''),
677 nfs_shares_config => '/etc/cinder/shares-nfs.conf',
681 $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
682 class { '::cinder::backends' :
683 enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
689 class { '::sahara::service::api':
690 manage_service => false,
693 class { '::sahara::service::engine':
694 manage_service => false,
699 if str2bool(hiera('enable_swift_storage', true)) {
700 class {'::swift::storage::all':
701 mount_check => str2bool(hiera('swift_mount_check')),
703 class {'::swift::storage::account':
704 manage_service => $non_pcmk_start,
705 enabled => $non_pcmk_start,
707 class {'::swift::storage::container':
708 manage_service => $non_pcmk_start,
709 enabled => $non_pcmk_start,
711 class {'::swift::storage::object':
712 manage_service => $non_pcmk_start,
713 enabled => $non_pcmk_start,
715 if(!defined(File['/srv/node'])) {
720 require => Package['openstack-swift'],
723 $swift_components = ['account', 'container', 'object']
724 swift::storage::filter::recon { $swift_components : }
725 swift::storage::filter::healthcheck { $swift_components : }
729 case downcase(hiera('ceilometer_backend')) {
731 $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
734 $mongo_node_string = join($mongo_node_ips_with_port, ',')
735 $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
739 include ::ceilometer::config
740 class { '::ceilometer::api' :
741 manage_service => false,
744 class { '::ceilometer::agent::notification' :
745 manage_service => false,
748 class { '::ceilometer::agent::central' :
749 manage_service => false,
752 class { '::ceilometer::collector' :
753 manage_service => false,
756 include ::ceilometer::expirer
757 class { '::ceilometer::db' :
758 database_connection => $ceilometer_database_connection,
761 include ::ceilometer::agent::auth
762 include ::ceilometer::dispatcher::gnocchi
764 Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
766 # httpd/apache and horizon
767 # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
769 service_enable => false,
770 # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
772 include ::apache::mod::remoteip
773 include ::apache::mod::status
774 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
775 $_profile_support = 'cisco'
777 $_profile_support = 'None'
779 $neutron_options = {'profile_support' => $_profile_support }
781 $memcached_ipv6 = hiera('memcached_ipv6', false)
783 $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
785 $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
789 cache_server_ip => $horizon_memcached_servers,
790 neutron_options => $neutron_options,
795 database_connection => $ceilometer_database_connection,
797 include ::aodh::config
799 include ::aodh::client
800 include ::aodh::wsgi::apache
801 class { '::aodh::api':
802 manage_service => false,
804 service_name => 'httpd',
806 class { '::aodh::evaluator':
807 manage_service => false,
810 class { '::aodh::notifier':
811 manage_service => false,
814 class { '::aodh::listener':
815 manage_service => false,
820 $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
821 include ::gnocchi::client
823 include ::gnocchi::db::sync
825 include ::gnocchi::storage
826 $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
827 case $gnocchi_backend {
828 'swift': { include ::gnocchi::storage::swift }
829 'file': { include ::gnocchi::storage::file }
830 'rbd': { include ::gnocchi::storage::ceph }
831 default: { fail('Unrecognized gnocchi_backend parameter.') }
834 database_connection => $gnocchi_database_connection,
836 class { '::gnocchi::api' :
837 manage_service => false,
839 service_name => 'httpd',
841 class { '::gnocchi::wsgi::apache' :
844 class { '::gnocchi::metricd' :
845 manage_service => false,
848 class { '::gnocchi::statsd' :
849 manage_service => false,
853 $snmpd_user = hiera('snmpd_readonly_user_name')
854 snmp::snmpv3_user { $snmpd_user:
856 authpass => hiera('snmpd_readonly_user_password'),
859 agentaddress => ['udp:161','udp6:[::1]:161'],
860 snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
863 hiera_include('controller_classes')
867 if hiera('step') >= 5 {
868 # We now make sure that the root db password is set to a random one
869 # At first installation /root/.my.cnf will be empty and we connect without a root
870 # password. On second runs or updates /root/.my.cnf will already be populated
871 # with proper credentials. This step happens on every node because this sql
872 # statement does not automatically replicate across nodes.
873 exec { 'galera-set-root-password':
874 command => "/bin/touch /root/.my.cnf && /bin/echo \"UPDATE mysql.user SET Password = PASSWORD('${mysql_root_password}') WHERE user = 'root'; flush privileges;\" | /bin/mysql --defaults-extra-file=/root/.my.cnf -u root",
876 file { '/root/.my.cnf' :
883 password=\"${mysql_root_password}\"
887 password=\"${mysql_root_password}\"",
888 require => Exec['galera-set-root-password'],
891 $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
892 $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
894 if $nova_enable_db_purge {
895 include ::nova::cron::archive_deleted_rows
897 if $cinder_enable_db_purge {
898 include ::cinder::cron::db_purge
901 if $pacemaker_master {
903 pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
904 constraint_type => 'order',
905 first_resource => 'openstack-core-clone',
906 second_resource => "${::apache::params::service_name}-clone",
907 first_action => 'start',
908 second_action => 'start',
909 require => [Pacemaker::Resource::Service[$::apache::params::service_name],
910 Pacemaker::Resource::Ocf['openstack-core']],
912 pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
913 constraint_type => 'order',
914 first_resource => 'galera-master',
915 second_resource => 'openstack-core-clone',
916 first_action => 'promote',
917 second_action => 'start',
918 require => [Pacemaker::Resource::Ocf['galera'],
919 Pacemaker::Resource::Ocf['openstack-core']],
923 pacemaker::resource::service { $::cinder::params::api_service :
924 clone_params => 'interleave=true',
925 require => Pacemaker::Resource::Ocf['openstack-core'],
927 pacemaker::resource::service { $::cinder::params::scheduler_service :
928 clone_params => 'interleave=true',
930 pacemaker::resource::service { $::cinder::params::volume_service : }
932 pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
933 constraint_type => 'order',
934 first_resource => 'openstack-core-clone',
935 second_resource => "${::cinder::params::api_service}-clone",
936 first_action => 'start',
937 second_action => 'start',
938 require => [Pacemaker::Resource::Ocf['openstack-core'],
939 Pacemaker::Resource::Service[$::cinder::params::api_service]],
941 pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
942 constraint_type => 'order',
943 first_resource => "${::cinder::params::api_service}-clone",
944 second_resource => "${::cinder::params::scheduler_service}-clone",
945 first_action => 'start',
946 second_action => 'start',
947 require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
948 Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
950 pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
951 source => "${::cinder::params::scheduler_service}-clone",
952 target => "${::cinder::params::api_service}-clone",
954 require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
955 Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
957 pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
958 constraint_type => 'order',
959 first_resource => "${::cinder::params::scheduler_service}-clone",
960 second_resource => $::cinder::params::volume_service,
961 first_action => 'start',
962 second_action => 'start',
963 require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
964 Pacemaker::Resource::Service[$::cinder::params::volume_service]],
966 pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
967 source => $::cinder::params::volume_service,
968 target => "${::cinder::params::scheduler_service}-clone",
970 require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
971 Pacemaker::Resource::Service[$::cinder::params::volume_service]],
975 pacemaker::resource::service { $::sahara::params::api_service_name :
976 clone_params => 'interleave=true',
977 require => Pacemaker::Resource::Ocf['openstack-core'],
979 pacemaker::resource::service { $::sahara::params::engine_service_name :
980 clone_params => 'interleave=true',
982 pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
983 constraint_type => 'order',
984 first_resource => 'openstack-core-clone',
985 second_resource => "${::sahara::params::api_service_name}-clone",
986 first_action => 'start',
987 second_action => 'start',
988 require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
989 Pacemaker::Resource::Ocf['openstack-core']],
991 pacemaker::constraint::base { 'sahara-api-then-sahara-engine-constraint':
992 constraint_type => 'order',
993 first_resource => "${::sahara::params::api_service_name}-clone",
994 second_resource => "${::sahara::params::engine_service_name}-clone",
995 first_action => 'start',
996 second_action => 'start',
997 require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
998 Pacemaker::Resource::Service[$::sahara::params::engine_service_name]],
1001 if hiera('neutron::enable_ovs_agent', true) {
1002 pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1003 clone_params => 'interleave=true',
1006 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1007 pacemaker::resource::service {'tomcat':
1008 clone_params => 'interleave=true',
1011 if hiera('neutron::enable_ovs_agent', true) {
1012 pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1013 ocf_agent_name => 'neutron:OVSCleanup',
1014 clone_params => 'interleave=true',
1016 pacemaker::resource::ocf { 'neutron-netns-cleanup':
1017 ocf_agent_name => 'neutron:NetnsCleanup',
1018 clone_params => 'interleave=true',
1021 # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1022 pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1023 constraint_type => 'order',
1024 first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1025 second_resource => 'neutron-netns-cleanup-clone',
1026 first_action => 'start',
1027 second_action => 'start',
1028 require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1029 Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1031 pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1032 source => 'neutron-netns-cleanup-clone',
1033 target => "${::neutron::params::ovs_cleanup_service}-clone",
1034 score => 'INFINITY',
1035 require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1036 Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1038 pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1039 constraint_type => 'order',
1040 first_resource => 'neutron-netns-cleanup-clone',
1041 second_resource => "${::neutron::params::ovs_agent_service}-clone",
1042 first_action => 'start',
1043 second_action => 'start',
1044 require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1045 Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1047 pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1048 source => "${::neutron::params::ovs_agent_service}-clone",
1049 target => 'neutron-netns-cleanup-clone',
1050 score => 'INFINITY',
1051 require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1052 Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1055 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1056 #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
1057 pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
1058 constraint_type => 'order',
1059 first_resource => "${::neutron::params::server_service}-clone",
1060 second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1061 first_action => 'start',
1062 second_action => 'start',
1063 require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1064 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1066 pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
1067 constraint_type => 'order',
1068 first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1069 second_resource => "${::neutron::params::metadata_agent_service}-clone",
1070 first_action => 'start',
1071 second_action => 'start',
1072 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1073 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1075 pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
1076 constraint_type => 'order',
1077 first_resource => "${::neutron::params::metadata_agent_service}-clone",
1078 second_resource => 'tomcat-clone',
1079 first_action => 'start',
1080 second_action => 'start',
1081 require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
1082 Pacemaker::Resource::Service['tomcat']],
1084 pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
1085 source => "${::neutron::params::metadata_agent_service}-clone",
1086 target => "${::neutron::params::dhcp_agent_service}-clone",
1087 score => 'INFINITY',
1088 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1089 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1094 pacemaker::resource::service { $::nova::params::api_service_name :
1095 clone_params => 'interleave=true',
1097 pacemaker::resource::service { $::nova::params::conductor_service_name :
1098 clone_params => 'interleave=true',
1100 pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1101 clone_params => 'interleave=true',
1102 require => Pacemaker::Resource::Ocf['openstack-core'],
1104 pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1105 clone_params => 'interleave=true',
1107 pacemaker::resource::service { $::nova::params::scheduler_service_name :
1108 clone_params => 'interleave=true',
1111 pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1112 constraint_type => 'order',
1113 first_resource => 'openstack-core-clone',
1114 second_resource => "${::nova::params::consoleauth_service_name}-clone",
1115 first_action => 'start',
1116 second_action => 'start',
1117 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1118 Pacemaker::Resource::Ocf['openstack-core']],
1120 pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1121 constraint_type => 'order',
1122 first_resource => "${::nova::params::consoleauth_service_name}-clone",
1123 second_resource => "${::nova::params::vncproxy_service_name}-clone",
1124 first_action => 'start',
1125 second_action => 'start',
1126 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1127 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1129 pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1130 source => "${::nova::params::vncproxy_service_name}-clone",
1131 target => "${::nova::params::consoleauth_service_name}-clone",
1132 score => 'INFINITY',
1133 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1134 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1136 pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1137 constraint_type => 'order',
1138 first_resource => "${::nova::params::vncproxy_service_name}-clone",
1139 second_resource => "${::nova::params::api_service_name}-clone",
1140 first_action => 'start',
1141 second_action => 'start',
1142 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1143 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1145 pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1146 source => "${::nova::params::api_service_name}-clone",
1147 target => "${::nova::params::vncproxy_service_name}-clone",
1148 score => 'INFINITY',
1149 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1150 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1152 pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1153 constraint_type => 'order',
1154 first_resource => "${::nova::params::api_service_name}-clone",
1155 second_resource => "${::nova::params::scheduler_service_name}-clone",
1156 first_action => 'start',
1157 second_action => 'start',
1158 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1159 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1161 pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1162 source => "${::nova::params::scheduler_service_name}-clone",
1163 target => "${::nova::params::api_service_name}-clone",
1164 score => 'INFINITY',
1165 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1166 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1168 pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1169 constraint_type => 'order',
1170 first_resource => "${::nova::params::scheduler_service_name}-clone",
1171 second_resource => "${::nova::params::conductor_service_name}-clone",
1172 first_action => 'start',
1173 second_action => 'start',
1174 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1175 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1177 pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1178 source => "${::nova::params::conductor_service_name}-clone",
1179 target => "${::nova::params::scheduler_service_name}-clone",
1180 score => 'INFINITY',
1181 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1182 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1185 # Ceilometer and Aodh
1186 case downcase(hiera('ceilometer_backend')) {
1188 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1189 clone_params => 'interleave=true',
1190 require => Pacemaker::Resource::Ocf['openstack-core'],
1194 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1195 clone_params => 'interleave=true',
1196 require => [Pacemaker::Resource::Ocf['openstack-core'],
1197 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1201 pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1202 clone_params => 'interleave=true',
1204 pacemaker::resource::service { $::ceilometer::params::api_service_name :
1205 clone_params => 'interleave=true',
1207 pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1208 clone_params => 'interleave=true',
1210 # Fedora doesn't know `require-all` parameter for constraints yet
1211 if $::operatingsystem == 'Fedora' {
1212 $redis_ceilometer_constraint_params = undef
1213 $redis_aodh_constraint_params = undef
1215 $redis_ceilometer_constraint_params = 'require-all=false'
1216 $redis_aodh_constraint_params = 'require-all=false'
1218 pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1219 constraint_type => 'order',
1220 first_resource => 'redis-master',
1221 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1222 first_action => 'promote',
1223 second_action => 'start',
1224 constraint_params => $redis_ceilometer_constraint_params,
1225 require => [Pacemaker::Resource::Ocf['redis'],
1226 Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1228 pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
1229 constraint_type => 'order',
1230 first_resource => 'redis-master',
1231 second_resource => "${::aodh::params::evaluator_service_name}-clone",
1232 first_action => 'promote',
1233 second_action => 'start',
1234 constraint_params => $redis_aodh_constraint_params,
1235 require => [Pacemaker::Resource::Ocf['redis'],
1236 Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
1238 pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1239 constraint_type => 'order',
1240 first_resource => 'openstack-core-clone',
1241 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1242 first_action => 'start',
1243 second_action => 'start',
1244 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1245 Pacemaker::Resource::Ocf['openstack-core']],
1247 pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
1248 constraint_type => 'order',
1249 first_resource => 'openstack-core-clone',
1250 second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1251 first_action => 'start',
1252 second_action => 'start',
1253 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1254 Pacemaker::Resource::Ocf['openstack-core']],
1256 pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1257 constraint_type => 'order',
1258 first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1259 second_resource => "${::ceilometer::params::collector_service_name}-clone",
1260 first_action => 'start',
1261 second_action => 'start',
1262 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1263 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1265 pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1266 constraint_type => 'order',
1267 first_resource => "${::ceilometer::params::collector_service_name}-clone",
1268 second_resource => "${::ceilometer::params::api_service_name}-clone",
1269 first_action => 'start',
1270 second_action => 'start',
1271 require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1272 Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1274 pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1275 source => "${::ceilometer::params::api_service_name}-clone",
1276 target => "${::ceilometer::params::collector_service_name}-clone",
1277 score => 'INFINITY',
1278 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1279 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1282 pacemaker::resource::service { $::aodh::params::evaluator_service_name :
1283 clone_params => 'interleave=true',
1285 pacemaker::resource::service { $::aodh::params::notifier_service_name :
1286 clone_params => 'interleave=true',
1288 pacemaker::resource::service { $::aodh::params::listener_service_name :
1289 clone_params => 'interleave=true',
1291 pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
1292 constraint_type => 'order',
1293 first_resource => "${::aodh::params::evaluator_service_name}-clone",
1294 second_resource => "${::aodh::params::notifier_service_name}-clone",
1295 first_action => 'start',
1296 second_action => 'start',
1297 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1298 Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
1300 pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
1301 source => "${::aodh::params::notifier_service_name}-clone",
1302 target => "${::aodh::params::evaluator_service_name}-clone",
1303 score => 'INFINITY',
1304 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1305 Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
1307 pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
1308 constraint_type => 'order',
1309 first_resource => "${::aodh::params::evaluator_service_name}-clone",
1310 second_resource => "${::aodh::params::listener_service_name}-clone",
1311 first_action => 'start',
1312 second_action => 'start',
1313 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1314 Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
1316 pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
1317 source => "${::aodh::params::listener_service_name}-clone",
1318 target => "${::aodh::params::evaluator_service_name}-clone",
1319 score => 'INFINITY',
1320 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1321 Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
1323 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1324 pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1325 constraint_type => 'order',
1326 first_resource => "${::mongodb::params::service_name}-clone",
1327 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1328 first_action => 'start',
1329 second_action => 'start',
1330 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1331 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1336 pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
1337 clone_params => 'interleave=true',
1339 pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
1340 clone_params => 'interleave=true',
1342 pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
1343 constraint_type => 'order',
1344 first_resource => "${::gnocchi::params::metricd_service_name}-clone",
1345 second_resource => "${::gnocchi::params::statsd_service_name}-clone",
1346 first_action => 'start',
1347 second_action => 'start',
1348 require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
1349 Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
1351 pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
1352 source => "${::gnocchi::params::statsd_service_name}-clone",
1353 target => "${::gnocchi::params::metricd_service_name}-clone",
1354 score => 'INFINITY',
1355 require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
1356 Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
1359 # Horizon and Keystone
1360 pacemaker::resource::service { $::apache::params::service_name:
1361 clone_params => 'interleave=true',
1362 verify_on_create => true,
1363 require => [File['/etc/keystone/ssl/certs/ca.pem'],
1364 File['/etc/keystone/ssl/private/signing_key.pem'],
1365 File['/etc/keystone/ssl/certs/signing_cert.pem']],
1369 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1370 pacemaker::resource::ocf { 'vsm-p' :
1371 ocf_agent_name => 'heartbeat:VirtualDomain',
1372 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1373 require => Class['n1k_vsm'],
1374 meta_params => 'resource-stickiness=INFINITY',
1376 if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
1377 pacemaker::resource::ocf { 'vsm-s' :
1378 ocf_agent_name => 'heartbeat:VirtualDomain',
1379 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1380 require => Class['n1k_vsm'],
1381 meta_params => 'resource-stickiness=INFINITY',
1383 pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1386 score => '-INFINITY',
1387 require => [Pacemaker::Resource::Ocf['vsm-p'],
1388 Pacemaker::Resource::Ocf['vsm-s']],
1397 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1398 package_manifest{$package_manifest_name: ensure => present}