1 # Copyright 2015 Red Hat, Inc.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
21 # TODO(jistr): use pcs resource provider instead of just no-ops
23 tag == 'aodh-service' or
24 tag == 'cinder-service' or
25 tag == 'ceilometer-service' or
26 tag == 'gnocchi-service' or
27 tag == 'heat-service' or
28 tag == 'neutron-service' or
29 tag == 'nova-service' or
30 tag == 'sahara-service'
33 restart => '/bin/true',
38 include ::tripleo::packages
39 include ::tripleo::firewall
41 if $::hostname == downcase(hiera('bootstrap_nodeid')) {
42 $pacemaker_master = true
45 $pacemaker_master = false
49 $enable_fencing = str2bool(hiera('enable_fencing', false)) and hiera('step') >= 6
50 $enable_load_balancer = hiera('enable_load_balancer', true)
52 # When to start and enable services which haven't been Pacemakerized
53 # FIXME: remove when we start all OpenStack services using Pacemaker
54 # (occurrences of this variable will be gradually replaced with false)
55 $non_pcmk_start = hiera('step') >= 5
57 if hiera('step') >= 1 {
59 create_resources(kmod::load, hiera('kernel_modules'), {})
60 create_resources(sysctl::value, hiera('sysctl_settings'), {})
61 Exec <| tag == 'kmod::load' |> -> Sysctl <| |>
65 if count(hiera('ntp::servers')) > 0 {
69 $controller_node_ips = split(hiera('controller_node_ips'), ',')
70 $controller_node_names = split(downcase(hiera('controller_node_names')), ',')
71 if $enable_load_balancer {
72 class { '::tripleo::loadbalancer' :
73 controller_hosts => $controller_node_ips,
74 controller_hosts_names => $controller_node_names,
76 mysql_clustercheck => true,
77 haproxy_service_manage => false,
81 $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G'))
82 $corosync_ipv6 = str2bool(hiera('corosync_ipv6', false))
84 $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000), '--ipv6' => '' }
86 $cluster_setup_extras = { '--token' => hiera('corosync_token_timeout', 1000) }
88 class { '::pacemaker':
89 hacluster_pwd => hiera('hacluster_pwd'),
91 class { '::pacemaker::corosync':
92 cluster_members => $pacemaker_cluster_members,
93 setup_cluster => $pacemaker_master,
94 cluster_setup_extras => $cluster_setup_extras,
96 class { '::pacemaker::stonith':
97 disable => !$enable_fencing,
100 include ::tripleo::fencing
102 # enable stonith after all fencing devices have been created
103 Class['tripleo::fencing'] -> Class['pacemaker::stonith']
106 # FIXME(gfidente): sets 200secs as default start timeout op
107 # param; until we can use pcmk global defaults we'll still
108 # need to add it to every resource which redefines op params
109 Pacemaker::Resource::Service {
110 op_params => 'start timeout=200s stop timeout=200s',
113 # Only configure RabbitMQ in this step, don't start it yet to
114 # avoid races where non-master nodes attempt to start without
115 # config (eg. binding on 0.0.0.0)
116 # The module ignores erlang_cookie if cluster_config is false
117 $rabbit_ipv6 = str2bool(hiera('rabbit_ipv6', false))
119 $rabbit_env = merge(hiera('rabbitmq_environment'), {
120 'RABBITMQ_SERVER_START_ARGS' => '"-proto_dist inet6_tcp"'
123 $rabbit_env = hiera('rabbitmq_environment')
126 class { '::rabbitmq':
127 service_manage => false,
128 tcp_keepalive => false,
129 config_kernel_variables => hiera('rabbitmq_kernel_variables'),
130 config_variables => hiera('rabbitmq_config_variables'),
131 environment_variables => $rabbit_env,
133 file { '/var/lib/rabbitmq/.erlang.cookie':
138 content => hiera('rabbitmq::erlang_cookie'),
142 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
143 include ::mongodb::globals
144 include ::mongodb::client
145 class { '::mongodb::server' :
146 service_manage => false,
151 class {'::memcached' :
152 service_manage => false,
157 service_manage => false,
158 notify_service => false,
162 if str2bool(hiera('enable_galera', true)) {
163 $mysql_config_file = '/etc/my.cnf.d/galera.cnf'
165 $mysql_config_file = '/etc/my.cnf.d/server.cnf'
167 $galera_nodes = downcase(hiera('galera_node_names', $::hostname))
168 $galera_nodes_count = count(split($galera_nodes, ','))
170 # FIXME: due to https://bugzilla.redhat.com/show_bug.cgi?id=1298671 we
171 # set bind-address to a hostname instead of an ip address; to move Mysql
172 # from internal_api on another network we'll have to customize both
173 # MysqlNetwork and ControllerHostnameResolveNetwork in ServiceNetMap
174 $mysql_bind_host = hiera('mysql_bind_host')
177 'skip-name-resolve' => '1',
178 'binlog_format' => 'ROW',
179 'default-storage-engine' => 'innodb',
180 'innodb_autoinc_lock_mode' => '2',
181 'innodb_locks_unsafe_for_binlog'=> '1',
182 'query_cache_size' => '0',
183 'query_cache_type' => '0',
184 'bind-address' => $::hostname,
185 'max_connections' => hiera('mysql_max_connections'),
186 'open_files_limit' => '-1',
188 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
189 'wsrep_cluster_name' => 'galera_cluster',
190 'wsrep_cluster_address' => "gcomm://${galera_nodes}",
191 'wsrep_slave_threads' => '1',
192 'wsrep_certify_nonPK' => '1',
193 'wsrep_max_ws_rows' => '131072',
194 'wsrep_max_ws_size' => '1073741824',
195 'wsrep_debug' => '0',
196 'wsrep_convert_LOCK_to_trx' => '0',
197 'wsrep_retry_autocommit' => '1',
198 'wsrep_auto_increment_control' => '1',
199 'wsrep_drupal_282555_workaround'=> '0',
200 'wsrep_causal_reads' => '0',
201 'wsrep_sst_method' => 'rsync',
202 'wsrep_provider_options' => "gmcast.listen_addr=tcp://[${mysql_bind_host}]:4567;",
206 class { '::mysql::server':
207 create_root_user => false,
208 create_root_my_cnf => false,
209 config_file => $mysql_config_file,
210 override_options => $mysqld_options,
211 remove_default_accounts => $pacemaker_master,
212 service_manage => false,
213 service_enabled => false,
218 if hiera('step') >= 2 {
220 # NOTE(gfidente): the following vars are needed on all nodes so they
221 # need to stay out of pacemaker_master conditional.
222 # The addresses mangling will hopefully go away when we'll be able to
223 # configure the connection string via hostnames, until then, we need to pass
224 # the list of IPv6 addresses *with* port and without the brackets as 'members'
225 # argument for the 'mongodb_replset' resource.
226 if str2bool(hiera('mongodb::server::ipv6', false)) {
227 $mongo_node_ips_with_port_prefixed = prefix(hiera('mongo_node_ips'), '[')
228 $mongo_node_ips_with_port = suffix($mongo_node_ips_with_port_prefixed, ']:27017')
229 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
231 $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
232 $mongo_node_ips_with_port_nobr = suffix(hiera('mongo_node_ips'), ':27017')
234 $mongodb_replset = hiera('mongodb::server::replset')
236 if $pacemaker_master {
238 if $enable_load_balancer {
240 include ::pacemaker::resource_defaults
242 # Create an openstack-core dummy resource. See RHBZ 1290121
243 pacemaker::resource::ocf { 'openstack-core':
244 ocf_agent_name => 'heartbeat:Dummy',
245 clone_params => true,
247 # FIXME: we should not have to access tripleo::loadbalancer class
248 # parameters here to configure pacemaker VIPs. The configuration
249 # of pacemaker VIPs could move into puppet-tripleo or we should
250 # make use of less specific hiera parameters here for the settings.
251 pacemaker::resource::service { 'haproxy':
252 clone_params => true,
255 $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
256 tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_control_vip':
257 vip_name => 'control',
258 ip_address => $control_vip,
261 $public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
262 tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_public_vip':
263 ensure => $public_vip and $public_vip != $control_vip,
264 vip_name => 'public',
265 ip_address => $public_vip,
268 $redis_vip = hiera('redis_vip')
269 tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_redis_vip':
270 ensure => $redis_vip and $redis_vip != $control_vip,
272 ip_address => $redis_vip,
276 $internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
277 tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_internal_api_vip':
278 ensure => $internal_api_vip and $internal_api_vip != $control_vip,
279 vip_name => 'internal_api',
280 ip_address => $internal_api_vip,
283 $storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
284 tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_vip':
285 ensure => $storage_vip and $storage_vip != $control_vip,
286 vip_name => 'storage',
287 ip_address => $storage_vip,
290 $storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
291 tripleo::pacemaker::haproxy_with_vip { 'haproxy_and_storage_mgmt_vip':
292 ensure => $storage_mgmt_vip and $storage_mgmt_vip != $control_vip,
293 vip_name => 'storage_mgmt',
294 ip_address => $storage_mgmt_vip,
298 pacemaker::resource::service { $::memcached::params::service_name :
299 clone_params => 'interleave=true',
300 require => Class['::memcached'],
303 pacemaker::resource::ocf { 'rabbitmq':
304 ocf_agent_name => 'heartbeat:rabbitmq-cluster',
305 resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'',
306 clone_params => 'ordered=true interleave=true',
307 meta_params => 'notify=true',
308 require => Class['::rabbitmq'],
311 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
312 pacemaker::resource::service { $::mongodb::params::service_name :
313 op_params => 'start timeout=370s stop timeout=200s',
314 clone_params => true,
315 require => Class['::mongodb::server'],
317 # NOTE (spredzy) : The replset can only be run
318 # once all the nodes have joined the cluster.
319 mongodb_conn_validator { $mongo_node_ips_with_port :
321 require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
322 before => Mongodb_replset[$mongodb_replset],
324 mongodb_replset { $mongodb_replset :
325 members => $mongo_node_ips_with_port_nobr,
329 pacemaker::resource::ocf { 'galera' :
330 ocf_agent_name => 'heartbeat:galera',
331 op_params => 'promote timeout=300s on-fail=block',
333 meta_params => "master-max=${galera_nodes_count} ordered=true",
334 resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'",
335 require => Class['::mysql::server'],
336 before => Exec['galera-ready'],
339 pacemaker::resource::ocf { 'redis':
340 ocf_agent_name => 'heartbeat:redis',
342 meta_params => 'notify=true ordered=true interleave=true',
343 resource_params => 'wait_last_known_master=true',
344 require => Class['::redis'],
349 exec { 'galera-ready' :
350 command => '/usr/bin/clustercheck >/dev/null',
354 environment => ['AVAILABLE_WHEN_READONLY=0'],
355 require => File['/etc/sysconfig/clustercheck'],
358 file { '/etc/sysconfig/clustercheck' :
360 content => "MYSQL_USERNAME=root\n
362 MYSQL_HOST=localhost\n",
365 xinetd::service { 'galera-monitor' :
367 server => '/usr/bin/clustercheck',
368 per_source => 'UNLIMITED',
369 log_on_success => '',
370 log_on_failure => 'HOST',
372 service_type => 'UNLISTED',
375 require => File['/etc/sysconfig/clustercheck'],
378 # Create all the database schemas
380 class { '::keystone::db::mysql':
381 require => Exec['galera-ready'],
383 class { '::glance::db::mysql':
384 require => Exec['galera-ready'],
386 class { '::nova::db::mysql':
387 require => Exec['galera-ready'],
389 class { '::nova::db::mysql_api':
390 require => Exec['galera-ready'],
392 class { '::neutron::db::mysql':
393 require => Exec['galera-ready'],
395 class { '::cinder::db::mysql':
396 require => Exec['galera-ready'],
398 class { '::heat::db::mysql':
399 require => Exec['galera-ready'],
402 if downcase(hiera('ceilometer_backend')) == 'mysql' {
403 class { '::ceilometer::db::mysql':
404 require => Exec['galera-ready'],
408 if downcase(hiera('gnocchi_indexer_backend')) == 'mysql' {
409 class { '::gnocchi::db::mysql':
410 require => Exec['galera-ready'],
413 class { '::sahara::db::mysql':
414 require => Exec['galera-ready'],
418 # pre-install swift here so we can build rings
422 $enable_ceph = hiera('ceph_storage_count', 0) > 0 or hiera('enable_ceph_storage', false)
425 $mon_initial_members = downcase(hiera('ceph_mon_initial_members'))
426 if str2bool(hiera('ceph_ipv6', false)) {
427 $mon_host = hiera('ceph_mon_host_v6')
429 $mon_host = hiera('ceph_mon_host')
431 class { '::ceph::profile::params':
432 mon_initial_members => $mon_initial_members,
433 mon_host => $mon_host,
436 include ::ceph::profile::mon
439 if str2bool(hiera('enable_ceph_storage', false)) {
440 if str2bool(hiera('ceph_osd_selinux_permissive', true)) {
441 exec { 'set selinux to permissive on boot':
442 command => "sed -ie 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config",
443 onlyif => "test -f /etc/selinux/config && ! grep '^SELINUX=permissive' /etc/selinux/config",
444 path => ['/usr/bin', '/usr/sbin'],
447 exec { 'set selinux to permissive':
448 command => 'setenforce 0',
449 onlyif => "which setenforce && getenforce | grep -i 'enforcing'",
450 path => ['/usr/bin', '/usr/sbin'],
451 } -> Class['ceph::profile::osd']
455 include ::ceph::profile::osd
458 if str2bool(hiera('enable_external_ceph', false)) {
459 if str2bool(hiera('ceph_ipv6', false)) {
460 $mon_host = hiera('ceph_mon_host_v6')
462 $mon_host = hiera('ceph_mon_host')
464 class { '::ceph::profile::params':
465 mon_host => $mon_host,
468 include ::ceph::profile::client
474 if hiera('step') >= 4 or ( hiera('step') >= 3 and $sync_db ) {
476 $nova_ipv6 = hiera('nova::use_ipv6', false)
478 $memcached_servers = suffix(hiera('memcache_node_ips_v6'), ':11211')
480 $memcached_servers = suffix(hiera('memcache_node_ips'), ':11211')
484 memcached_servers => $memcached_servers
487 include ::nova::config
489 class { '::nova::api' :
491 sync_db_api => $sync_db,
492 manage_service => false,
495 class { '::nova::cert' :
496 manage_service => false,
499 class { '::nova::conductor' :
500 manage_service => false,
503 class { '::nova::consoleauth' :
504 manage_service => false,
507 class { '::nova::vncproxy' :
508 manage_service => false,
511 include ::nova::scheduler::filter
512 class { '::nova::scheduler' :
513 manage_service => false,
516 include ::nova::network::neutron
518 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
520 # TODO(devvesa) provide non-controller ips for these services
521 $zookeeper_node_ips = hiera('neutron_api_node_ips')
522 $cassandra_node_ips = hiera('neutron_api_node_ips')
524 # Run zookeeper in the controller if configured
525 if hiera('enable_zookeeper_on_controller') {
526 class {'::tripleo::cluster::zookeeper':
527 zookeeper_server_ips => $zookeeper_node_ips,
528 # TODO: create a 'bind' hiera key for zookeeper
529 zookeeper_client_ip => hiera('neutron::bind_host'),
530 zookeeper_hostnames => split(hiera('controller_node_names'), ',')
534 # Run cassandra in the controller if configured
535 if hiera('enable_cassandra_on_controller') {
536 class {'::tripleo::cluster::cassandra':
537 cassandra_servers => $cassandra_node_ips,
538 # TODO: create a 'bind' hiera key for cassandra
539 cassandra_ip => hiera('neutron::bind_host'),
543 class {'::tripleo::network::midonet::agent':
544 zookeeper_servers => $zookeeper_node_ips,
545 cassandra_seeds => $cassandra_node_ips
548 class {'::tripleo::network::midonet::api':
549 zookeeper_servers => $zookeeper_node_ips,
550 vip => hiera('tripleo::loadbalancer::public_virtual_ip'),
551 keystone_ip => hiera('tripleo::loadbalancer::public_virtual_ip'),
552 keystone_admin_token => hiera('keystone::admin_token'),
553 # TODO: create a 'bind' hiera key for api
554 bind_address => hiera('neutron::bind_host'),
555 admin_password => hiera('admin_password')
560 service_plugins => []
565 # Neutron class definitions
569 include ::neutron::config
570 class { '::neutron::server' :
572 manage_service => false,
575 include ::neutron::server::notifications
576 if hiera('neutron::core_plugin') == 'neutron.plugins.nuage.plugin.NuagePlugin' {
577 include ::neutron::plugins::nuage
579 if hiera('neutron::core_plugin') == 'neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2' {
580 include ::neutron::plugins::opencontrail
582 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
583 class {'::neutron::plugins::midonet':
584 midonet_api_ip => hiera('tripleo::loadbalancer::public_virtual_ip'),
585 keystone_tenant => hiera('neutron::server::auth_tenant'),
586 keystone_password => hiera('neutron::server::auth_password')
589 if hiera('neutron::core_plugin') == 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2' {
590 class { '::neutron::plugins::plumgrid' :
591 connection => hiera('neutron::server::database_connection'),
592 controller_priv_host => hiera('keystone_admin_api_vip'),
593 admin_password => hiera('admin_password'),
594 metadata_proxy_shared_secret => hiera('nova::api::neutron_metadata_proxy_shared_secret'),
597 if hiera('neutron::enable_dhcp_agent',true) {
598 class { '::neutron::agents::dhcp' :
599 manage_service => false,
602 file { '/etc/neutron/dnsmasq-neutron.conf':
603 content => hiera('neutron_dnsmasq_options'),
606 notify => Service['neutron-dhcp-service'],
607 require => Package['neutron'],
610 if hiera('neutron::enable_l3_agent',true) {
611 class { '::neutron::agents::l3' :
612 manage_service => false,
616 if hiera('neutron::enable_metadata_agent',true) {
617 class { '::neutron::agents::metadata':
618 manage_service => false,
622 include ::neutron::plugins::ml2
623 class { '::neutron::agents::ml2::ovs':
624 manage_service => false,
628 if 'cisco_ucsm' in hiera('neutron::plugins::ml2::mechanism_drivers') {
629 include ::neutron::plugins::ml2::cisco::ucsm
631 if 'cisco_nexus' in hiera('neutron::plugins::ml2::mechanism_drivers') {
632 include ::neutron::plugins::ml2::cisco::nexus
633 include ::neutron::plugins::ml2::cisco::type_nexus_vxlan
635 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
636 include ::neutron::plugins::ml2::cisco::nexus1000v
638 class { '::neutron::agents::n1kv_vem':
639 n1kv_source => hiera('n1kv_vem_source', undef),
640 n1kv_version => hiera('n1kv_vem_version', undef),
644 n1kv_source => hiera('n1kv_vsm_source', undef),
645 n1kv_version => hiera('n1kv_vsm_version', undef),
649 if 'bsn_ml2' in hiera('neutron::plugins::ml2::mechanism_drivers') {
650 include ::neutron::plugins::ml2::bigswitch::restproxy
651 include ::neutron::agents::bigswitch
653 neutron_l3_agent_config {
654 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
656 neutron_dhcp_agent_config {
657 'DEFAULT/ovs_use_veth': value => hiera('neutron_ovs_use_veth', false);
660 'DEFAULT/notification_driver': value => 'messaging';
664 include ::cinder::config
665 include ::tripleo::ssl::cinder_config
666 class { '::cinder::api':
668 manage_service => false,
671 class { '::cinder::scheduler' :
672 manage_service => false,
675 class { '::cinder::volume' :
676 manage_service => false,
679 include ::cinder::glance
680 include ::cinder::ceilometer
681 class { '::cinder::setup_test_volume':
682 size => join([hiera('cinder_lvm_loop_device_size'), 'M']),
685 $cinder_enable_iscsi = hiera('cinder_enable_iscsi_backend', true)
686 if $cinder_enable_iscsi {
687 $cinder_iscsi_backend = 'tripleo_iscsi'
689 cinder::backend::iscsi { $cinder_iscsi_backend :
690 iscsi_ip_address => hiera('cinder_iscsi_ip_address'),
691 iscsi_helper => hiera('cinder_iscsi_helper'),
697 $ceph_pools = hiera('ceph_pools')
698 ceph::pool { $ceph_pools :
699 pg_num => hiera('ceph::profile::params::osd_pool_default_pg_num'),
700 pgp_num => hiera('ceph::profile::params::osd_pool_default_pgp_num'),
701 size => hiera('ceph::profile::params::osd_pool_default_size'),
704 $cinder_pool_requires = [Ceph::Pool[hiera('cinder_rbd_pool_name')]]
707 $cinder_pool_requires = []
710 if hiera('cinder_enable_rbd_backend', false) {
711 $cinder_rbd_backend = 'tripleo_ceph'
713 cinder::backend::rbd { $cinder_rbd_backend :
714 backend_host => hiera('cinder::host'),
715 rbd_pool => hiera('cinder_rbd_pool_name'),
716 rbd_user => hiera('ceph_client_user_name'),
717 rbd_secret_uuid => hiera('ceph::profile::params::fsid'),
718 require => $cinder_pool_requires,
722 if hiera('cinder_enable_eqlx_backend', false) {
723 $cinder_eqlx_backend = hiera('cinder::backend::eqlx::volume_backend_name')
725 cinder::backend::eqlx { $cinder_eqlx_backend :
726 volume_backend_name => hiera('cinder::backend::eqlx::volume_backend_name', undef),
727 san_ip => hiera('cinder::backend::eqlx::san_ip', undef),
728 san_login => hiera('cinder::backend::eqlx::san_login', undef),
729 san_password => hiera('cinder::backend::eqlx::san_password', undef),
730 san_thin_provision => hiera('cinder::backend::eqlx::san_thin_provision', undef),
731 eqlx_group_name => hiera('cinder::backend::eqlx::eqlx_group_name', undef),
732 eqlx_pool => hiera('cinder::backend::eqlx::eqlx_pool', undef),
733 eqlx_use_chap => hiera('cinder::backend::eqlx::eqlx_use_chap', undef),
734 eqlx_chap_login => hiera('cinder::backend::eqlx::eqlx_chap_login', undef),
735 eqlx_chap_password => hiera('cinder::backend::eqlx::eqlx_san_password', undef),
739 if hiera('cinder_enable_dellsc_backend', false) {
740 $cinder_dellsc_backend = hiera('cinder::backend::dellsc_iscsi::volume_backend_name')
742 cinder::backend::dellsc_iscsi{ $cinder_dellsc_backend :
743 volume_backend_name => hiera('cinder::backend::dellsc_iscsi::volume_backend_name', undef),
744 san_ip => hiera('cinder::backend::dellsc_iscsi::san_ip', undef),
745 san_login => hiera('cinder::backend::dellsc_iscsi::san_login', undef),
746 san_password => hiera('cinder::backend::dellsc_iscsi::san_password', undef),
747 dell_sc_ssn => hiera('cinder::backend::dellsc_iscsi::dell_sc_ssn', undef),
748 iscsi_ip_address => hiera('cinder::backend::dellsc_iscsi::iscsi_ip_address', undef),
749 iscsi_port => hiera('cinder::backend::dellsc_iscsi::iscsi_port', undef),
750 dell_sc_api_port => hiera('cinder::backend::dellsc_iscsi::dell_sc_api_port', undef),
751 dell_sc_server_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_server_folder', undef),
752 dell_sc_volume_folder => hiera('cinder::backend::dellsc_iscsi::dell_sc_volume_folder', undef),
756 if hiera('cinder_enable_netapp_backend', false) {
757 $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
759 if hiera('cinder::backend::netapp::nfs_shares', undef) {
760 $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
763 cinder::backend::netapp { $cinder_netapp_backend :
764 netapp_login => hiera('cinder::backend::netapp::netapp_login', undef),
765 netapp_password => hiera('cinder::backend::netapp::netapp_password', undef),
766 netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
767 netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef),
768 netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
769 netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef),
770 netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
771 netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef),
772 netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef),
773 netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef),
774 netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef),
775 netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
776 nfs_shares => $cinder_netapp_nfs_shares,
777 nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef),
778 netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
779 netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
780 netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef),
781 netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
782 netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
783 netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
787 if hiera('cinder_enable_nfs_backend', false) {
788 $cinder_nfs_backend = 'tripleo_nfs'
790 if str2bool($::selinux) {
791 selboolean { 'virt_use_nfs':
794 } -> Package['nfs-utils']
797 package { 'nfs-utils': } ->
798 cinder::backend::nfs { $cinder_nfs_backend:
799 nfs_servers => hiera('cinder_nfs_servers'),
800 nfs_mount_options => hiera('cinder_nfs_mount_options',''),
801 nfs_shares_config => '/etc/cinder/shares-nfs.conf',
805 $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_eqlx_backend, $cinder_dellsc_backend, $cinder_netapp_backend, $cinder_nfs_backend])
806 class { '::cinder::backends' :
807 enabled_backends => union($cinder_enabled_backends, hiera('cinder_user_enabled_backends')),
813 class { '::sahara::service::api':
814 manage_service => false,
817 class { '::sahara::service::engine':
818 manage_service => false,
823 class { '::swift::proxy' :
824 manage_service => $non_pcmk_start,
825 enabled => $non_pcmk_start,
827 include ::swift::proxy::proxy_logging
828 include ::swift::proxy::healthcheck
829 include ::swift::proxy::cache
830 include ::swift::proxy::keystone
831 include ::swift::proxy::authtoken
832 include ::swift::proxy::staticweb
833 include ::swift::proxy::ratelimit
834 include ::swift::proxy::catch_errors
835 include ::swift::proxy::tempurl
836 include ::swift::proxy::formpost
839 if str2bool(hiera('enable_swift_storage', true)) {
840 class {'::swift::storage::all':
841 mount_check => str2bool(hiera('swift_mount_check')),
843 class {'::swift::storage::account':
844 manage_service => $non_pcmk_start,
845 enabled => $non_pcmk_start,
847 class {'::swift::storage::container':
848 manage_service => $non_pcmk_start,
849 enabled => $non_pcmk_start,
851 class {'::swift::storage::object':
852 manage_service => $non_pcmk_start,
853 enabled => $non_pcmk_start,
855 if(!defined(File['/srv/node'])) {
860 require => Package['openstack-swift'],
863 $swift_components = ['account', 'container', 'object']
864 swift::storage::filter::recon { $swift_components : }
865 swift::storage::filter::healthcheck { $swift_components : }
869 case downcase(hiera('ceilometer_backend')) {
871 $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
874 $mongo_node_string = join($mongo_node_ips_with_port, ',')
875 $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
879 include ::ceilometer::config
880 class { '::ceilometer::api' :
881 manage_service => false,
884 class { '::ceilometer::agent::notification' :
885 manage_service => false,
888 class { '::ceilometer::agent::central' :
889 manage_service => false,
892 class { '::ceilometer::collector' :
893 manage_service => false,
896 include ::ceilometer::expirer
897 class { '::ceilometer::db' :
898 database_connection => $ceilometer_database_connection,
901 include ::ceilometer::agent::auth
902 include ::ceilometer::dispatcher::gnocchi
904 Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" }
907 include ::heat::config
910 notification_driver => 'messaging',
912 class { '::heat::api' :
913 manage_service => false,
916 class { '::heat::api_cfn' :
917 manage_service => false,
920 class { '::heat::api_cloudwatch' :
921 manage_service => false,
924 class { '::heat::engine' :
925 manage_service => false,
929 # httpd/apache and horizon
930 # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
932 service_enable => false,
933 # service_manage => false, # <-- not supported with horizon&apache mod_wsgi?
935 include ::apache::mod::remoteip
936 include ::apache::mod::status
937 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
938 $_profile_support = 'cisco'
940 $_profile_support = 'None'
942 $neutron_options = {'profile_support' => $_profile_support }
944 $memcached_ipv6 = hiera('memcached_ipv6', false)
946 $horizon_memcached_servers = hiera('memcache_node_ips_v6', '[::1]')
948 $horizon_memcached_servers = hiera('memcache_node_ips', '127.0.0.1')
952 cache_server_ip => $horizon_memcached_servers,
953 neutron_options => $neutron_options,
958 database_connection => $ceilometer_database_connection,
960 include ::aodh::config
962 include ::aodh::client
963 include ::aodh::wsgi::apache
964 class { '::aodh::api':
965 manage_service => false,
967 service_name => 'httpd',
969 class { '::aodh::evaluator':
970 manage_service => false,
973 class { '::aodh::notifier':
974 manage_service => false,
977 class { '::aodh::listener':
978 manage_service => false,
983 $gnocchi_database_connection = hiera('gnocchi_mysql_conn_string')
984 include ::gnocchi::client
986 include ::gnocchi::db::sync
988 include ::gnocchi::storage
989 $gnocchi_backend = downcase(hiera('gnocchi_backend', 'swift'))
990 case $gnocchi_backend {
991 'swift': { include ::gnocchi::storage::swift }
992 'file': { include ::gnocchi::storage::file }
993 'rbd': { include ::gnocchi::storage::ceph }
994 default: { fail('Unrecognized gnocchi_backend parameter.') }
997 database_connection => $gnocchi_database_connection,
999 class { '::gnocchi::api' :
1000 manage_service => false,
1002 service_name => 'httpd',
1004 class { '::gnocchi::wsgi::apache' :
1007 class { '::gnocchi::metricd' :
1008 manage_service => false,
1011 class { '::gnocchi::statsd' :
1012 manage_service => false,
1016 $snmpd_user = hiera('snmpd_readonly_user_name')
1017 snmp::snmpv3_user { $snmpd_user:
1019 authpass => hiera('snmpd_readonly_user_password'),
1022 agentaddress => ['udp:161','udp6:[::1]:161'],
1023 snmpd_config => [ join(['createUser ', hiera('snmpd_readonly_user_name'), ' MD5 "', hiera('snmpd_readonly_user_password'), '"']), join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
1026 hiera_include('controller_classes')
1030 if hiera('step') >= 5 {
1031 $nova_enable_db_purge = hiera('nova_enable_db_purge', true)
1032 $cinder_enable_db_purge = hiera('cinder_enable_db_purge', true)
1033 $heat_enable_db_purge = hiera('heat_enable_db_purge', true)
1035 if $nova_enable_db_purge {
1036 include ::nova::cron::archive_deleted_rows
1038 if $cinder_enable_db_purge {
1039 include ::cinder::cron::db_purge
1041 if $heat_enable_db_purge {
1042 include ::heat::cron::purge_deleted
1045 if $pacemaker_master {
1047 pacemaker::constraint::base { 'openstack-core-then-httpd-constraint':
1048 constraint_type => 'order',
1049 first_resource => 'openstack-core-clone',
1050 second_resource => "${::apache::params::service_name}-clone",
1051 first_action => 'start',
1052 second_action => 'start',
1053 require => [Pacemaker::Resource::Service[$::apache::params::service_name],
1054 Pacemaker::Resource::Ocf['openstack-core']],
1056 pacemaker::constraint::base { 'memcached-then-openstack-core-constraint':
1057 constraint_type => 'order',
1058 first_resource => 'memcached-clone',
1059 second_resource => 'openstack-core-clone',
1060 first_action => 'start',
1061 second_action => 'start',
1062 require => [Pacemaker::Resource::Service['memcached'],
1063 Pacemaker::Resource::Ocf['openstack-core']],
1065 pacemaker::constraint::base { 'galera-then-openstack-core-constraint':
1066 constraint_type => 'order',
1067 first_resource => 'galera-master',
1068 second_resource => 'openstack-core-clone',
1069 first_action => 'promote',
1070 second_action => 'start',
1071 require => [Pacemaker::Resource::Ocf['galera'],
1072 Pacemaker::Resource::Ocf['openstack-core']],
1076 pacemaker::resource::service { $::cinder::params::api_service :
1077 clone_params => 'interleave=true',
1078 require => Pacemaker::Resource::Ocf['openstack-core'],
1080 pacemaker::resource::service { $::cinder::params::scheduler_service :
1081 clone_params => 'interleave=true',
1083 pacemaker::resource::service { $::cinder::params::volume_service : }
1085 pacemaker::constraint::base { 'keystone-then-cinder-api-constraint':
1086 constraint_type => 'order',
1087 first_resource => 'openstack-core-clone',
1088 second_resource => "${::cinder::params::api_service}-clone",
1089 first_action => 'start',
1090 second_action => 'start',
1091 require => [Pacemaker::Resource::Ocf['openstack-core'],
1092 Pacemaker::Resource::Service[$::cinder::params::api_service]],
1094 pacemaker::constraint::base { 'cinder-api-then-cinder-scheduler-constraint':
1095 constraint_type => 'order',
1096 first_resource => "${::cinder::params::api_service}-clone",
1097 second_resource => "${::cinder::params::scheduler_service}-clone",
1098 first_action => 'start',
1099 second_action => 'start',
1100 require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1101 Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1103 pacemaker::constraint::colocation { 'cinder-scheduler-with-cinder-api-colocation':
1104 source => "${::cinder::params::scheduler_service}-clone",
1105 target => "${::cinder::params::api_service}-clone",
1106 score => 'INFINITY',
1107 require => [Pacemaker::Resource::Service[$::cinder::params::api_service],
1108 Pacemaker::Resource::Service[$::cinder::params::scheduler_service]],
1110 pacemaker::constraint::base { 'cinder-scheduler-then-cinder-volume-constraint':
1111 constraint_type => 'order',
1112 first_resource => "${::cinder::params::scheduler_service}-clone",
1113 second_resource => $::cinder::params::volume_service,
1114 first_action => 'start',
1115 second_action => 'start',
1116 require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1117 Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1119 pacemaker::constraint::colocation { 'cinder-volume-with-cinder-scheduler-colocation':
1120 source => $::cinder::params::volume_service,
1121 target => "${::cinder::params::scheduler_service}-clone",
1122 score => 'INFINITY',
1123 require => [Pacemaker::Resource::Service[$::cinder::params::scheduler_service],
1124 Pacemaker::Resource::Service[$::cinder::params::volume_service]],
1128 pacemaker::resource::service { $::sahara::params::api_service_name :
1129 clone_params => 'interleave=true',
1130 require => Pacemaker::Resource::Ocf['openstack-core'],
1132 pacemaker::resource::service { $::sahara::params::engine_service_name :
1133 clone_params => 'interleave=true',
1135 pacemaker::constraint::base { 'keystone-then-sahara-api-constraint':
1136 constraint_type => 'order',
1137 first_resource => 'openstack-core-clone',
1138 second_resource => "${::sahara::params::api_service_name}-clone",
1139 first_action => 'start',
1140 second_action => 'start',
1141 require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1142 Pacemaker::Resource::Ocf['openstack-core']],
1144 pacemaker::constraint::base { 'sahara-api-then-sahara-engine-constraint':
1145 constraint_type => 'order',
1146 first_resource => "${::sahara::params::api_service_name}-clone",
1147 second_resource => "${::sahara::params::engine_service_name}-clone",
1148 first_action => 'start',
1149 second_action => 'start',
1150 require => [Pacemaker::Resource::Service[$::sahara::params::api_service_name],
1151 Pacemaker::Resource::Service[$::sahara::params::engine_service_name]],
1154 if hiera('step') == 5 {
1156 # NOTE(gfidente): Neutron will try to populate the database with some data
1157 # as soon as neutron-server is started; to avoid races we want to make this
1158 # happen only on one node, before normal Pacemaker initialization
1159 # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
1160 # NOTE(emilien): we need to run this Exec only at Step 4 otherwise this exec
1161 # will try to start the service while it's already started by Pacemaker
1162 # It would result to a deployment failure since systemd would return 1 to Puppet
1163 # and the overcloud would fail to deploy (6 would be returned).
1164 # This conditional prevents from a race condition during the deployment.
1165 # https://bugzilla.redhat.com/show_bug.cgi?id=1290582
1166 exec { 'neutron-server-systemd-start-sleep' :
1167 command => 'systemctl start neutron-server && /usr/bin/sleep 5',
1169 unless => '/sbin/pcs resource show neutron-server',
1171 pacemaker::resource::service { $::neutron::params::server_service:
1172 clone_params => 'interleave=true',
1173 require => Pacemaker::Resource::Ocf['openstack-core']
1176 pacemaker::resource::service { $::neutron::params::server_service:
1177 clone_params => 'interleave=true',
1178 require => Pacemaker::Resource::Ocf['openstack-core']
1181 if hiera('neutron::enable_l3_agent', true) {
1182 pacemaker::resource::service { $::neutron::params::l3_agent_service:
1183 clone_params => 'interleave=true',
1186 if hiera('neutron::enable_dhcp_agent', true) {
1187 pacemaker::resource::service { $::neutron::params::dhcp_agent_service:
1188 clone_params => 'interleave=true',
1191 if hiera('neutron::enable_ovs_agent', true) {
1192 pacemaker::resource::service { $::neutron::params::ovs_agent_service:
1193 clone_params => 'interleave=true',
1196 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1197 pacemaker::resource::service {'tomcat':
1198 clone_params => 'interleave=true',
1201 if hiera('neutron::enable_metadata_agent', true) {
1202 pacemaker::resource::service { $::neutron::params::metadata_agent_service:
1203 clone_params => 'interleave=true',
1206 if hiera('neutron::enable_ovs_agent', true) {
1207 pacemaker::resource::ocf { $::neutron::params::ovs_cleanup_service:
1208 ocf_agent_name => 'neutron:OVSCleanup',
1209 clone_params => 'interleave=true',
1211 pacemaker::resource::ocf { 'neutron-netns-cleanup':
1212 ocf_agent_name => 'neutron:NetnsCleanup',
1213 clone_params => 'interleave=true',
1216 # neutron - one chain ovs-cleanup-->netns-cleanup-->ovs-agent
1217 pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
1218 constraint_type => 'order',
1219 first_resource => "${::neutron::params::ovs_cleanup_service}-clone",
1220 second_resource => 'neutron-netns-cleanup-clone',
1221 first_action => 'start',
1222 second_action => 'start',
1223 require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1224 Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1226 pacemaker::constraint::colocation { 'neutron-ovs-cleanup-to-netns-cleanup-colocation':
1227 source => 'neutron-netns-cleanup-clone',
1228 target => "${::neutron::params::ovs_cleanup_service}-clone",
1229 score => 'INFINITY',
1230 require => [Pacemaker::Resource::Ocf[$::neutron::params::ovs_cleanup_service],
1231 Pacemaker::Resource::Ocf['neutron-netns-cleanup']],
1233 pacemaker::constraint::base { 'neutron-netns-cleanup-to-openvswitch-agent-constraint':
1234 constraint_type => 'order',
1235 first_resource => 'neutron-netns-cleanup-clone',
1236 second_resource => "${::neutron::params::ovs_agent_service}-clone",
1237 first_action => 'start',
1238 second_action => 'start',
1239 require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1240 Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1242 pacemaker::constraint::colocation { 'neutron-netns-cleanup-to-openvswitch-agent-colocation':
1243 source => "${::neutron::params::ovs_agent_service}-clone",
1244 target => 'neutron-netns-cleanup-clone',
1245 score => 'INFINITY',
1246 require => [Pacemaker::Resource::Ocf['neutron-netns-cleanup'],
1247 Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1250 pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
1251 constraint_type => 'order',
1252 first_resource => 'openstack-core-clone',
1253 second_resource => "${::neutron::params::server_service}-clone",
1254 first_action => 'start',
1255 second_action => 'start',
1256 require => [Pacemaker::Resource::Ocf['openstack-core'],
1257 Pacemaker::Resource::Service[$::neutron::params::server_service]],
1259 if hiera('neutron::enable_ovs_agent',true) {
1260 pacemaker::constraint::base { 'neutron-openvswitch-agent-to-dhcp-agent-constraint':
1261 constraint_type => 'order',
1262 first_resource => "${::neutron::params::ovs_agent_service}-clone",
1263 second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1264 first_action => 'start',
1265 second_action => 'start',
1266 require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1267 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1270 if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_ovs_agent',true) {
1271 pacemaker::constraint::base { 'neutron-server-to-openvswitch-agent-constraint':
1272 constraint_type => 'order',
1273 first_resource => "${::neutron::params::server_service}-clone",
1274 second_resource => "${::neutron::params::ovs_agent_service}-clone",
1275 first_action => 'start',
1276 second_action => 'start',
1277 require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1278 Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service]],
1281 pacemaker::constraint::colocation { 'neutron-openvswitch-agent-to-dhcp-agent-colocation':
1282 source => "${::neutron::params::dhcp_agent_service}-clone",
1283 target => "${::neutron::params::ovs_agent_service}-clone",
1284 score => 'INFINITY',
1285 require => [Pacemaker::Resource::Service[$::neutron::params::ovs_agent_service],
1286 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1289 if hiera('neutron::enable_dhcp_agent',true) and hiera('neutron::enable_l3_agent',true) {
1290 pacemaker::constraint::base { 'neutron-dhcp-agent-to-l3-agent-constraint':
1291 constraint_type => 'order',
1292 first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1293 second_resource => "${::neutron::params::l3_agent_service}-clone",
1294 first_action => 'start',
1295 second_action => 'start',
1296 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1297 Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1299 pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-l3-agent-colocation':
1300 source => "${::neutron::params::l3_agent_service}-clone",
1301 target => "${::neutron::params::dhcp_agent_service}-clone",
1302 score => 'INFINITY',
1303 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1304 Pacemaker::Resource::Service[$::neutron::params::l3_agent_service]]
1307 if hiera('neutron::enable_l3_agent',true) and hiera('neutron::enable_metadata_agent',true) {
1308 pacemaker::constraint::base { 'neutron-l3-agent-to-metadata-agent-constraint':
1309 constraint_type => 'order',
1310 first_resource => "${::neutron::params::l3_agent_service}-clone",
1311 second_resource => "${::neutron::params::metadata_agent_service}-clone",
1312 first_action => 'start',
1313 second_action => 'start',
1314 require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1315 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1317 pacemaker::constraint::colocation { 'neutron-l3-agent-to-metadata-agent-colocation':
1318 source => "${::neutron::params::metadata_agent_service}-clone",
1319 target => "${::neutron::params::l3_agent_service}-clone",
1320 score => 'INFINITY',
1321 require => [Pacemaker::Resource::Service[$::neutron::params::l3_agent_service],
1322 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]]
1325 if hiera('neutron::core_plugin') == 'midonet.neutron.plugin_v1.MidonetPluginV2' {
1326 #midonet-chain chain keystone-->neutron-server-->dhcp-->metadata->tomcat
1327 pacemaker::constraint::base { 'neutron-server-to-dhcp-agent-constraint':
1328 constraint_type => 'order',
1329 first_resource => "${::neutron::params::server_service}-clone",
1330 second_resource => "${::neutron::params::dhcp_agent_service}-clone",
1331 first_action => 'start',
1332 second_action => 'start',
1333 require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
1334 Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service]],
1336 pacemaker::constraint::base { 'neutron-dhcp-agent-to-metadata-agent-constraint':
1337 constraint_type => 'order',
1338 first_resource => "${::neutron::params::dhcp_agent_service}-clone",
1339 second_resource => "${::neutron::params::metadata_agent_service}-clone",
1340 first_action => 'start',
1341 second_action => 'start',
1342 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1343 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1345 pacemaker::constraint::base { 'neutron-metadata-agent-to-tomcat-constraint':
1346 constraint_type => 'order',
1347 first_resource => "${::neutron::params::metadata_agent_service}-clone",
1348 second_resource => 'tomcat-clone',
1349 first_action => 'start',
1350 second_action => 'start',
1351 require => [Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service],
1352 Pacemaker::Resource::Service['tomcat']],
1354 pacemaker::constraint::colocation { 'neutron-dhcp-agent-to-metadata-agent-colocation':
1355 source => "${::neutron::params::metadata_agent_service}-clone",
1356 target => "${::neutron::params::dhcp_agent_service}-clone",
1357 score => 'INFINITY',
1358 require => [Pacemaker::Resource::Service[$::neutron::params::dhcp_agent_service],
1359 Pacemaker::Resource::Service[$::neutron::params::metadata_agent_service]],
1364 pacemaker::resource::service { $::nova::params::api_service_name :
1365 clone_params => 'interleave=true',
1367 pacemaker::resource::service { $::nova::params::conductor_service_name :
1368 clone_params => 'interleave=true',
1370 pacemaker::resource::service { $::nova::params::consoleauth_service_name :
1371 clone_params => 'interleave=true',
1372 require => Pacemaker::Resource::Ocf['openstack-core'],
1374 pacemaker::resource::service { $::nova::params::vncproxy_service_name :
1375 clone_params => 'interleave=true',
1377 pacemaker::resource::service { $::nova::params::scheduler_service_name :
1378 clone_params => 'interleave=true',
1381 pacemaker::constraint::base { 'keystone-then-nova-consoleauth-constraint':
1382 constraint_type => 'order',
1383 first_resource => 'openstack-core-clone',
1384 second_resource => "${::nova::params::consoleauth_service_name}-clone",
1385 first_action => 'start',
1386 second_action => 'start',
1387 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1388 Pacemaker::Resource::Ocf['openstack-core']],
1390 pacemaker::constraint::base { 'nova-consoleauth-then-nova-vncproxy-constraint':
1391 constraint_type => 'order',
1392 first_resource => "${::nova::params::consoleauth_service_name}-clone",
1393 second_resource => "${::nova::params::vncproxy_service_name}-clone",
1394 first_action => 'start',
1395 second_action => 'start',
1396 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1397 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1399 pacemaker::constraint::colocation { 'nova-vncproxy-with-nova-consoleauth-colocation':
1400 source => "${::nova::params::vncproxy_service_name}-clone",
1401 target => "${::nova::params::consoleauth_service_name}-clone",
1402 score => 'INFINITY',
1403 require => [Pacemaker::Resource::Service[$::nova::params::consoleauth_service_name],
1404 Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name]],
1406 pacemaker::constraint::base { 'nova-vncproxy-then-nova-api-constraint':
1407 constraint_type => 'order',
1408 first_resource => "${::nova::params::vncproxy_service_name}-clone",
1409 second_resource => "${::nova::params::api_service_name}-clone",
1410 first_action => 'start',
1411 second_action => 'start',
1412 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1413 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1415 pacemaker::constraint::colocation { 'nova-api-with-nova-vncproxy-colocation':
1416 source => "${::nova::params::api_service_name}-clone",
1417 target => "${::nova::params::vncproxy_service_name}-clone",
1418 score => 'INFINITY',
1419 require => [Pacemaker::Resource::Service[$::nova::params::vncproxy_service_name],
1420 Pacemaker::Resource::Service[$::nova::params::api_service_name]],
1422 pacemaker::constraint::base { 'nova-api-then-nova-scheduler-constraint':
1423 constraint_type => 'order',
1424 first_resource => "${::nova::params::api_service_name}-clone",
1425 second_resource => "${::nova::params::scheduler_service_name}-clone",
1426 first_action => 'start',
1427 second_action => 'start',
1428 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1429 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1431 pacemaker::constraint::colocation { 'nova-scheduler-with-nova-api-colocation':
1432 source => "${::nova::params::scheduler_service_name}-clone",
1433 target => "${::nova::params::api_service_name}-clone",
1434 score => 'INFINITY',
1435 require => [Pacemaker::Resource::Service[$::nova::params::api_service_name],
1436 Pacemaker::Resource::Service[$::nova::params::scheduler_service_name]],
1438 pacemaker::constraint::base { 'nova-scheduler-then-nova-conductor-constraint':
1439 constraint_type => 'order',
1440 first_resource => "${::nova::params::scheduler_service_name}-clone",
1441 second_resource => "${::nova::params::conductor_service_name}-clone",
1442 first_action => 'start',
1443 second_action => 'start',
1444 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1445 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1447 pacemaker::constraint::colocation { 'nova-conductor-with-nova-scheduler-colocation':
1448 source => "${::nova::params::conductor_service_name}-clone",
1449 target => "${::nova::params::scheduler_service_name}-clone",
1450 score => 'INFINITY',
1451 require => [Pacemaker::Resource::Service[$::nova::params::scheduler_service_name],
1452 Pacemaker::Resource::Service[$::nova::params::conductor_service_name]],
1455 # Ceilometer and Aodh
1456 case downcase(hiera('ceilometer_backend')) {
1458 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1459 clone_params => 'interleave=true',
1460 require => Pacemaker::Resource::Ocf['openstack-core'],
1464 pacemaker::resource::service { $::ceilometer::params::agent_central_service_name:
1465 clone_params => 'interleave=true',
1466 require => [Pacemaker::Resource::Ocf['openstack-core'],
1467 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1471 pacemaker::resource::service { $::ceilometer::params::collector_service_name :
1472 clone_params => 'interleave=true',
1474 pacemaker::resource::service { $::ceilometer::params::api_service_name :
1475 clone_params => 'interleave=true',
1477 pacemaker::resource::service { $::ceilometer::params::agent_notification_service_name :
1478 clone_params => 'interleave=true',
1480 pacemaker::resource::ocf { 'delay' :
1481 ocf_agent_name => 'heartbeat:Delay',
1482 clone_params => 'interleave=true',
1483 resource_params => 'startdelay=10',
1485 # Fedora doesn't know `require-all` parameter for constraints yet
1486 if $::operatingsystem == 'Fedora' {
1487 $redis_ceilometer_constraint_params = undef
1488 $redis_aodh_constraint_params = undef
1490 $redis_ceilometer_constraint_params = 'require-all=false'
1491 $redis_aodh_constraint_params = 'require-all=false'
1493 pacemaker::constraint::base { 'redis-then-ceilometer-central-constraint':
1494 constraint_type => 'order',
1495 first_resource => 'redis-master',
1496 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1497 first_action => 'promote',
1498 second_action => 'start',
1499 constraint_params => $redis_ceilometer_constraint_params,
1500 require => [Pacemaker::Resource::Ocf['redis'],
1501 Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name]],
1503 pacemaker::constraint::base { 'redis-then-aodh-evaluator-constraint':
1504 constraint_type => 'order',
1505 first_resource => 'redis-master',
1506 second_resource => "${::aodh::params::evaluator_service_name}-clone",
1507 first_action => 'promote',
1508 second_action => 'start',
1509 constraint_params => $redis_aodh_constraint_params,
1510 require => [Pacemaker::Resource::Ocf['redis'],
1511 Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name]],
1513 pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
1514 constraint_type => 'order',
1515 first_resource => 'openstack-core-clone',
1516 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1517 first_action => 'start',
1518 second_action => 'start',
1519 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1520 Pacemaker::Resource::Ocf['openstack-core']],
1522 pacemaker::constraint::base { 'keystone-then-ceilometer-notification-constraint':
1523 constraint_type => 'order',
1524 first_resource => 'openstack-core-clone',
1525 second_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1526 first_action => 'start',
1527 second_action => 'start',
1528 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1529 Pacemaker::Resource::Ocf['openstack-core']],
1531 pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
1532 constraint_type => 'order',
1533 first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1534 second_resource => "${::ceilometer::params::collector_service_name}-clone",
1535 first_action => 'start',
1536 second_action => 'start',
1537 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1538 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1540 pacemaker::constraint::base { 'ceilometer-collector-then-ceilometer-api-constraint':
1541 constraint_type => 'order',
1542 first_resource => "${::ceilometer::params::collector_service_name}-clone",
1543 second_resource => "${::ceilometer::params::api_service_name}-clone",
1544 first_action => 'start',
1545 second_action => 'start',
1546 require => [Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name],
1547 Pacemaker::Resource::Service[$::ceilometer::params::api_service_name]],
1549 pacemaker::constraint::colocation { 'ceilometer-api-with-ceilometer-collector-colocation':
1550 source => "${::ceilometer::params::api_service_name}-clone",
1551 target => "${::ceilometer::params::collector_service_name}-clone",
1552 score => 'INFINITY',
1553 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1554 Pacemaker::Resource::Service[$::ceilometer::params::collector_service_name]],
1556 pacemaker::constraint::base { 'ceilometer-api-then-ceilometer-delay-constraint':
1557 constraint_type => 'order',
1558 first_resource => "${::ceilometer::params::api_service_name}-clone",
1559 second_resource => 'delay-clone',
1560 first_action => 'start',
1561 second_action => 'start',
1562 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1563 Pacemaker::Resource::Ocf['delay']],
1565 pacemaker::constraint::colocation { 'ceilometer-delay-with-ceilometer-api-colocation':
1566 source => 'delay-clone',
1567 target => "${::ceilometer::params::api_service_name}-clone",
1568 score => 'INFINITY',
1569 require => [Pacemaker::Resource::Service[$::ceilometer::params::api_service_name],
1570 Pacemaker::Resource::Ocf['delay']],
1573 pacemaker::resource::service { $::aodh::params::evaluator_service_name :
1574 clone_params => 'interleave=true',
1576 pacemaker::resource::service { $::aodh::params::notifier_service_name :
1577 clone_params => 'interleave=true',
1579 pacemaker::resource::service { $::aodh::params::listener_service_name :
1580 clone_params => 'interleave=true',
1582 pacemaker::constraint::base { 'aodh-delay-then-aodh-evaluator-constraint':
1583 constraint_type => 'order',
1584 first_resource => 'delay-clone',
1585 second_resource => "${::aodh::params::evaluator_service_name}-clone",
1586 first_action => 'start',
1587 second_action => 'start',
1588 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1589 Pacemaker::Resource::Ocf['delay']],
1591 pacemaker::constraint::colocation { 'aodh-evaluator-with-aodh-delay-colocation':
1592 source => "${::aodh::params::evaluator_service_name}-clone",
1593 target => 'delay-clone',
1594 score => 'INFINITY',
1595 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1596 Pacemaker::Resource::Ocf['delay']],
1598 pacemaker::constraint::base { 'aodh-evaluator-then-aodh-notifier-constraint':
1599 constraint_type => 'order',
1600 first_resource => "${::aodh::params::evaluator_service_name}-clone",
1601 second_resource => "${::aodh::params::notifier_service_name}-clone",
1602 first_action => 'start',
1603 second_action => 'start',
1604 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1605 Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
1607 pacemaker::constraint::colocation { 'aodh-notifier-with-aodh-evaluator-colocation':
1608 source => "${::aodh::params::notifier_service_name}-clone",
1609 target => "${::aodh::params::evaluator_service_name}-clone",
1610 score => 'INFINITY',
1611 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1612 Pacemaker::Resource::Service[$::aodh::params::notifier_service_name]],
1614 pacemaker::constraint::base { 'aodh-evaluator-then-aodh-listener-constraint':
1615 constraint_type => 'order',
1616 first_resource => "${::aodh::params::evaluator_service_name}-clone",
1617 second_resource => "${::aodh::params::listener_service_name}-clone",
1618 first_action => 'start',
1619 second_action => 'start',
1620 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1621 Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
1623 pacemaker::constraint::colocation { 'aodh-listener-with-aodh-evaluator-colocation':
1624 source => "${::aodh::params::listener_service_name}-clone",
1625 target => "${::aodh::params::evaluator_service_name}-clone",
1626 score => 'INFINITY',
1627 require => [Pacemaker::Resource::Service[$::aodh::params::evaluator_service_name],
1628 Pacemaker::Resource::Service[$::aodh::params::listener_service_name]],
1630 if downcase(hiera('ceilometer_backend')) == 'mongodb' {
1631 pacemaker::constraint::base { 'mongodb-then-ceilometer-central-constraint':
1632 constraint_type => 'order',
1633 first_resource => "${::mongodb::params::service_name}-clone",
1634 second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
1635 first_action => 'start',
1636 second_action => 'start',
1637 require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
1638 Pacemaker::Resource::Service[$::mongodb::params::service_name]],
1643 pacemaker::resource::service { $::gnocchi::params::metricd_service_name :
1644 clone_params => 'interleave=true',
1646 pacemaker::resource::service { $::gnocchi::params::statsd_service_name :
1647 clone_params => 'interleave=true',
1649 pacemaker::constraint::base { 'gnocchi-metricd-then-gnocchi-statsd-constraint':
1650 constraint_type => 'order',
1651 first_resource => "${::gnocchi::params::metricd_service_name}-clone",
1652 second_resource => "${::gnocchi::params::statsd_service_name}-clone",
1653 first_action => 'start',
1654 second_action => 'start',
1655 require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
1656 Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
1658 pacemaker::constraint::colocation { 'gnocchi-statsd-with-metricd-colocation':
1659 source => "${::gnocchi::params::statsd_service_name}-clone",
1660 target => "${::gnocchi::params::metricd_service_name}-clone",
1661 score => 'INFINITY',
1662 require => [Pacemaker::Resource::Service[$::gnocchi::params::metricd_service_name],
1663 Pacemaker::Resource::Service[$::gnocchi::params::statsd_service_name]],
1667 pacemaker::resource::service { $::heat::params::api_service_name :
1668 clone_params => 'interleave=true',
1670 pacemaker::resource::service { $::heat::params::api_cloudwatch_service_name :
1671 clone_params => 'interleave=true',
1673 pacemaker::resource::service { $::heat::params::api_cfn_service_name :
1674 clone_params => 'interleave=true',
1676 pacemaker::resource::service { $::heat::params::engine_service_name :
1677 clone_params => 'interleave=true',
1679 pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
1680 constraint_type => 'order',
1681 first_resource => "${::heat::params::api_service_name}-clone",
1682 second_resource => "${::heat::params::api_cfn_service_name}-clone",
1683 first_action => 'start',
1684 second_action => 'start',
1685 require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1686 Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1688 pacemaker::constraint::colocation { 'heat-api-cfn-with-heat-api-colocation':
1689 source => "${::heat::params::api_cfn_service_name}-clone",
1690 target => "${::heat::params::api_service_name}-clone",
1691 score => 'INFINITY',
1692 require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1693 Pacemaker::Resource::Service[$::heat::params::api_service_name]],
1695 pacemaker::constraint::base { 'heat-api-cfn-then-heat-api-cloudwatch-constraint':
1696 constraint_type => 'order',
1697 first_resource => "${::heat::params::api_cfn_service_name}-clone",
1698 second_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1699 first_action => 'start',
1700 second_action => 'start',
1701 require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1702 Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name]],
1704 pacemaker::constraint::colocation { 'heat-api-cloudwatch-with-heat-api-cfn-colocation':
1705 source => "${::heat::params::api_cloudwatch_service_name}-clone",
1706 target => "${::heat::params::api_cfn_service_name}-clone",
1707 score => 'INFINITY',
1708 require => [Pacemaker::Resource::Service[$::heat::params::api_cfn_service_name],
1709 Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name]],
1711 pacemaker::constraint::base { 'heat-api-cloudwatch-then-heat-engine-constraint':
1712 constraint_type => 'order',
1713 first_resource => "${::heat::params::api_cloudwatch_service_name}-clone",
1714 second_resource => "${::heat::params::engine_service_name}-clone",
1715 first_action => 'start',
1716 second_action => 'start',
1717 require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1718 Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1720 pacemaker::constraint::colocation { 'heat-engine-with-heat-api-cloudwatch-colocation':
1721 source => "${::heat::params::engine_service_name}-clone",
1722 target => "${::heat::params::api_cloudwatch_service_name}-clone",
1723 score => 'INFINITY',
1724 require => [Pacemaker::Resource::Service[$::heat::params::api_cloudwatch_service_name],
1725 Pacemaker::Resource::Service[$::heat::params::engine_service_name]],
1727 pacemaker::constraint::base { 'ceilometer-notification-then-heat-api-constraint':
1728 constraint_type => 'order',
1729 first_resource => "${::ceilometer::params::agent_notification_service_name}-clone",
1730 second_resource => "${::heat::params::api_service_name}-clone",
1731 first_action => 'start',
1732 second_action => 'start',
1733 require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
1734 Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
1737 # Horizon and Keystone
1738 pacemaker::resource::service { $::apache::params::service_name:
1739 clone_params => 'interleave=true',
1740 verify_on_create => true,
1741 require => [File['/etc/keystone/ssl/certs/ca.pem'],
1742 File['/etc/keystone/ssl/private/signing_key.pem'],
1743 File['/etc/keystone/ssl/certs/signing_cert.pem']],
1747 if 'cisco_n1kv' in hiera('neutron::plugins::ml2::mechanism_drivers') {
1748 pacemaker::resource::ocf { 'vsm-p' :
1749 ocf_agent_name => 'heartbeat:VirtualDomain',
1750 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_primary_deploy.xml',
1751 require => Class['n1k_vsm'],
1752 meta_params => 'resource-stickiness=INFINITY',
1754 if str2bool(hiera('n1k_vsm::pacemaker_control', true)) {
1755 pacemaker::resource::ocf { 'vsm-s' :
1756 ocf_agent_name => 'heartbeat:VirtualDomain',
1757 resource_params => 'force_stop=true config=/var/spool/cisco/vsm/vsm_secondary_deploy.xml',
1758 require => Class['n1k_vsm'],
1759 meta_params => 'resource-stickiness=INFINITY',
1761 pacemaker::constraint::colocation { 'vsm-colocation-contraint':
1764 score => '-INFINITY',
1765 require => [Pacemaker::Resource::Ocf['vsm-p'],
1766 Pacemaker::Resource::Ocf['vsm-s']],
1775 $package_manifest_name = join(['/var/lib/tripleo/installed-packages/overcloud_controller_pacemaker', hiera('step')])
1776 package_manifest{$package_manifest_name: ensure => present}