$sync_db = false
}
+$enable_fencing = str2bool(hiera('enable_fencing', 'false')) and hiera('step') >= 5
+
# When to start and enable services which haven't been Pacemakerized
# FIXME: remove when we start all OpenStack services using Pacemaker
# (occurences of this variable will be gradually replaced with false)
controller_hosts => $controller_node_ips,
controller_hosts_names => $controller_node_names,
manage_vip => false,
+ mysql_clustercheck => true,
haproxy_service_manage => false,
}
setup_cluster => $pacemaker_master,
}
class { '::pacemaker::stonith':
- disable => true,
+ disable => !$enable_fencing,
+ }
+ if $enable_fencing {
+ include tripleo::fencing
+
+ # enable stonith after all fencing devices have been created
+ Class['tripleo::fencing'] -> Class['pacemaker::stonith']
}
# Only configure RabbitMQ in this step, don't start it yet to
if downcase(hiera('ceilometer_backend')) == 'mongodb' {
include ::mongodb::globals
- # FIXME: replace with service_manage => false on ::mongodb::server
- # when this is merged: https://github.com/puppetlabs/pupp etlabs-mongodb/pull/198
class { '::mongodb::server' :
- service_ensure => undef,
- service_enable => false,
+ service_manage => false,
}
}
service_manage => false,
}
+ # Redis
+ class { '::redis' :
+ service_manage => false,
+ notify_service => false,
+ }
+
# Galera
if str2bool(hiera('enable_galera', 'true')) {
$mysql_config_file = '/etc/my.cnf.d/galera.cnf'
'query_cache_size' => '0',
'query_cache_type' => '0',
'bind-address' => hiera('mysql_bind_host'),
- 'max_connections' => '1024',
+ 'max_connections' => hiera('mysql_max_connections'),
'open_files_limit' => '-1',
'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so',
'wsrep_cluster_name' => 'galera_cluster',
config_file => $mysql_config_file,
override_options => $mysqld_options,
service_manage => false,
+ service_enabled => false,
}
}
if hiera('step') >= 2 {
+ # NOTE(gfidente): the following vars are needed on all nodes so they
+ # need to stay out of pacemaker_master conditional
+ $mongo_node_ips_with_port = suffix(hiera('mongo_node_ips'), ':27017')
+ $mongodb_replset = hiera('mongodb::server::replset')
+
if $pacemaker_master {
# FIXME: we should not have to access tripleo::loadbalancer class
# parameters here to configure pacemaker VIPs. The configuration
# of pacemaker VIPs could move into puppet-tripleo or we should
# make use of less specific hiera parameters here for the settings.
+ pacemaker::resource::service { 'haproxy':
+ clone_params => true,
+ }
+
$control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip')
pacemaker::resource::ip { 'control_vip':
ip_address => $control_vip,
}
+ pacemaker::constraint::base { 'control_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${control_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['control_vip']],
+ }
+ pacemaker::constraint::colocation { 'control_vip-with-haproxy':
+ source => "ip-${control_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['control_vip']],
+ }
+
$public_vip = hiera('tripleo::loadbalancer::public_virtual_ip')
- pacemaker::resource::ip { 'public_vip':
- ip_address => $public_vip,
+ if $public_vip and $public_vip != $control_vip {
+ pacemaker::resource::ip { 'public_vip':
+ ip_address => $public_vip,
+ }
+ pacemaker::constraint::base { 'public_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${public_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['public_vip']],
+ }
+ pacemaker::constraint::colocation { 'public_vip-with-haproxy':
+ source => "ip-${public_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['public_vip']],
+ }
+ }
+
+ $redis_vip = hiera('redis_vip')
+ if $redis_vip and $redis_vip != $control_vip {
+ pacemaker::resource::ip { 'redis_vip':
+ ip_address => $redis_vip,
+ }
+ pacemaker::constraint::base { 'redis_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${redis_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['redis_vip']],
+ }
+ pacemaker::constraint::colocation { 'redis_vip-with-haproxy':
+ source => "ip-${redis_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['redis_vip']],
+ }
}
$internal_api_vip = hiera('tripleo::loadbalancer::internal_api_virtual_ip')
pacemaker::resource::ip { 'internal_api_vip':
ip_address => $internal_api_vip,
}
+ pacemaker::constraint::base { 'internal_api_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${internal_api_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['internal_api_vip']],
+ }
+ pacemaker::constraint::colocation { 'internal_api_vip-with-haproxy':
+ source => "ip-${internal_api_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['internal_api_vip']],
+ }
}
$storage_vip = hiera('tripleo::loadbalancer::storage_virtual_ip')
pacemaker::resource::ip { 'storage_vip':
ip_address => $storage_vip,
}
+ pacemaker::constraint::base { 'storage_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${storage_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_vip']],
+ }
+ pacemaker::constraint::colocation { 'storage_vip-with-haproxy':
+ source => "ip-${storage_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_vip']],
+ }
}
$storage_mgmt_vip = hiera('tripleo::loadbalancer::storage_mgmt_virtual_ip')
pacemaker::resource::ip { 'storage_mgmt_vip':
ip_address => $storage_mgmt_vip,
}
+ pacemaker::constraint::base { 'storage_mgmt_vip-then-haproxy':
+ constraint_type => 'order',
+ first_resource => "ip-${storage_mgmt_vip}",
+ second_resource => 'haproxy-clone',
+ first_action => 'start',
+ second_action => 'start',
+ constraint_params => 'kind=Optional',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_mgmt_vip']],
+ }
+ pacemaker::constraint::colocation { 'storage_mgmt_vip-with-haproxy':
+ source => "ip-${storage_mgmt_vip}",
+ target => 'haproxy-clone',
+ score => 'INFINITY',
+ require => [Pacemaker::Resource::Service['haproxy'],
+ Pacemaker::Resource::Ip['storage_mgmt_vip']],
+ }
}
- pacemaker::resource::service { 'haproxy':
- clone_params => true,
- }
pacemaker::resource::service { $::memcached::params::service_name :
clone_params => true,
require => Class['::memcached'],
op_params => 'start timeout=120s',
clone_params => true,
require => Class['::mongodb::server'],
- before => Exec['mongodb-ready'],
}
# NOTE (spredzy) : The replset can only be run
# once all the nodes have joined the cluster.
- $mongo_node_ips = hiera('mongo_node_ips')
- $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017')
- $mongo_node_string = join($mongo_node_ips_with_port, ',')
- $mongodb_replset = hiera('mongodb::server::replset')
- $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ')
- $mongodb_pacemaker_resource = Pacemaker::Resource::Service[$::mongodb::params::service_name]
- exec { 'mongodb-ready' :
- command => $mongodb_cluster_ready_command,
- timeout => 30,
- tries => 180,
- try_sleep => 10,
+ mongodb_conn_validator { $mongo_node_ips_with_port :
+ timeout => '600',
+ require => Pacemaker::Resource::Service[$::mongodb::params::service_name],
+ before => Mongodb_replset[$mongodb_replset],
}
mongodb_replset { $mongodb_replset :
members => $mongo_node_ips_with_port,
- require => Exec['mongodb-ready'],
}
}
require => Class['::mysql::server'],
before => Exec['galera-ready'],
}
- }
-
- # Redis
- $redis_node_ips = hiera('redis_node_ips')
- $redis_master_hostname = downcase(hiera('bootstrap_nodeid'))
- if $redis_master_hostname == $::hostname {
- $slaveof = undef
- } else {
- $slaveof = "${redis_master_hostname} 6379"
- }
- class {'::redis' :
- slaveof => $slaveof,
- }
+ pacemaker::resource::ocf { 'redis':
+ ocf_agent_name => 'heartbeat:redis',
+ master_params => '',
+ meta_params => 'notify=true ordered=true interleave=true',
+ resource_params => 'wait_last_known_master=true',
+ require => Class['::redis'],
+ }
- if count($redis_node_ips) > 1 {
- Class['::tripleo::redis_notification'] -> Service['redis-sentinel']
- include ::redis::sentinel
- include ::tripleo::redis_notification
}
exec { 'galera-ready' :
$glance_backend = downcase(hiera('glance_backend', 'swift'))
case $glance_backend {
- swift: { $glance_store = 'glance.store.swift.Store' }
- file: { $glance_store = 'glance.store.filesystem.Store' }
- rbd: { $glance_store = 'glance.store.rbd.Store' }
+ swift: { $backend_store = 'glance.store.swift.Store' }
+ file: { $backend_store = 'glance.store.filesystem.Store' }
+ rbd: { $backend_store = 'glance.store.rbd.Store' }
default: { fail('Unrecognized glance_backend parameter.') }
}
+ $http_store = ['glance.store.http.Store']
+ $glance_store = concat($http_store, $backend_store)
# TODO: notifications, scrubber, etc.
include ::glance
class { 'glance::api':
- known_stores => [$glance_store],
+ known_stores => $glance_store,
manage_service => false,
enabled => false,
}
tenant_network_types => [hiera('neutron_tenant_network_type')],
}
class { 'neutron::agents::ml2::ovs':
- # manage_service => false # not implemented
+ manage_service => false,
enabled => false,
bridge_mappings => split(hiera('neutron_bridge_mappings'), ','),
tunnel_types => split(hiera('neutron_tunnel_types'), ','),
}
}
- $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend])
+ if hiera('cinder_enable_netapp_backend', false) {
+ $cinder_netapp_backend = hiera('cinder::backend::netapp::title')
+
+ cinder_config {
+ "${cinder_netapp_backend}/host": value => 'hostgroup';
+ }
+
+ if hiera('cinder::backend::netapp::nfs_shares', undef) {
+ $cinder_netapp_nfs_shares = split(hiera('cinder::backend::netapp::nfs_shares', undef), ',')
+ }
+
+ cinder::backend::netapp { $cinder_netapp_backend :
+ netapp_login => hiera('cinder::backend::netapp::netapp_login', undef),
+ netapp_password => hiera('cinder::backend::netapp::netapp_password', undef),
+ netapp_server_hostname => hiera('cinder::backend::netapp::netapp_server_hostname', undef),
+ netapp_server_port => hiera('cinder::backend::netapp::netapp_server_port', undef),
+ netapp_size_multiplier => hiera('cinder::backend::netapp::netapp_size_multiplier', undef),
+ netapp_storage_family => hiera('cinder::backend::netapp::netapp_storage_family', undef),
+ netapp_storage_protocol => hiera('cinder::backend::netapp::netapp_storage_protocol', undef),
+ netapp_transport_type => hiera('cinder::backend::netapp::netapp_transport_type', undef),
+ netapp_vfiler => hiera('cinder::backend::netapp::netapp_vfiler', undef),
+ netapp_volume_list => hiera('cinder::backend::netapp::netapp_volume_list', undef),
+ netapp_vserver => hiera('cinder::backend::netapp::netapp_vserver', undef),
+ netapp_partner_backend_name => hiera('cinder::backend::netapp::netapp_partner_backend_name', undef),
+ nfs_shares => $cinder_netapp_nfs_shares,
+ nfs_shares_config => hiera('cinder::backend::netapp::nfs_shares_config', undef),
+ netapp_copyoffload_tool_path => hiera('cinder::backend::netapp::netapp_copyoffload_tool_path', undef),
+ netapp_controller_ips => hiera('cinder::backend::netapp::netapp_controller_ips', undef),
+ netapp_sa_password => hiera('cinder::backend::netapp::netapp_sa_password', undef),
+ netapp_storage_pools => hiera('cinder::backend::netapp::netapp_storage_pools', undef),
+ netapp_eseries_host_type => hiera('cinder::backend::netapp::netapp_eseries_host_type', undef),
+ netapp_webservice_path => hiera('cinder::backend::netapp::netapp_webservice_path', undef),
+ }
+ }
+
+ $cinder_enabled_backends = delete_undef_values([$cinder_iscsi_backend, $cinder_rbd_backend, $cinder_netapp_backend])
class { '::cinder::backends' :
enabled_backends => $cinder_enabled_backends,
}
include ::swift::proxy::keystone
include ::swift::proxy::authtoken
include ::swift::proxy::staticweb
- include ::swift::proxy::ceilometer
include ::swift::proxy::ratelimit
include ::swift::proxy::catch_errors
include ::swift::proxy::tempurl
$ceilometer_database_connection = hiera('ceilometer_mysql_conn_string')
}
default : {
+ $mongo_node_string = join($mongo_node_ips_with_port, ',')
$ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}"
}
}
enabled => false,
}
- # Horizon
- $vhost_params = { add_listen => false }
+ # httpd/apache and horizon
+ # NOTE(gfidente): server-status can be consumed by the pacemaker resource agent
+ include ::apache
+ include ::apache::mod::status
+ $vhost_params = {
+ add_listen => false,
+ priority => 10,
+ }
class { 'horizon':
cache_server_ip => hiera('memcache_node_ips', '127.0.0.1'),
vhost_extra_params => $vhost_params,
+ server_aliases => $::hostname,
}
$snmpd_user = hiera('snmpd_readonly_user_name')
snmpd_config => [ join(['rouser ', hiera('snmpd_readonly_user_name')]), 'proc cron', 'includeAllDisks 10%', 'master agentx', 'trapsink localhost public', 'iquerySecName internalUser', 'rouser internalUser', 'defaultMonitors yes', 'linkUpDownNotifications yes' ],
}
+ hiera_include('controller_classes')
+
} #END STEP 3
if hiera('step') >= 4 {
}
# Neutron
+ # NOTE(gfidente): Neutron will try to populate the database with some data
+ # as soon as neutron-server is started; to avoid races we want to make this
+ # happen only on one node, before normal Pacemaker initialization
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1233061
+ exec { '/usr/bin/systemctl start neutron-server && /usr/bin/sleep 5' : } ->
pacemaker::resource::service { $::neutron::params::server_service:
op_params => "start timeout=90",
clone_params => "interleave=true",
ocf_agent_name => "neutron:NetnsCleanup",
clone_params => "interleave=true",
}
- pacemaker::resource::ocf { 'neutron-scale':
- ocf_agent_name => "neutron:NeutronScale",
- clone_params => "globally-unique=true clone-max=3 interleave=true",
- }
pacemaker::constraint::base { 'keystone-to-neutron-server-constraint':
constraint_type => "order",
first_resource => "${::keystone::params::service_name}-clone",
require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
Pacemaker::Resource::Service[$::neutron::params::server_service]],
}
- pacemaker::constraint::base { 'neutron-server-to-neutron-scale-constraint':
+ pacemaker::constraint::base { 'neutron-server-to-neutron-ovs-cleanup-constraint':
constraint_type => "order",
first_resource => "${::neutron::params::server_service}-clone",
- second_resource => "neutron-scale-clone",
- first_action => "start",
- second_action => "start",
- require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
- Pacemaker::Resource::Ocf['neutron-scale']],
- }
- pacemaker::constraint::base { 'neutron-scale-to-ovs-cleanup-constraint':
- constraint_type => "order",
- first_resource => "neutron-scale-clone",
second_resource => "${::neutron::params::ovs_cleanup_service}-clone",
first_action => "start",
second_action => "start",
- require => [Pacemaker::Resource::Ocf['neutron-scale'],
- Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
- }
- pacemaker::constraint::colocation { 'neutron-scale-to-ovs-cleanup-colocation':
- source => "${::neutron::params::ovs_cleanup_service}-clone",
- target => "neutron-scale-clone",
- score => "INFINITY",
- require => [Pacemaker::Resource::Ocf['neutron-scale'],
+ require => [Pacemaker::Resource::Service[$::neutron::params::server_service],
Pacemaker::Resource::Ocf["${::neutron::params::ovs_cleanup_service}"]],
}
pacemaker::constraint::base { 'neutron-ovs-cleanup-to-netns-cleanup-constraint':
pacemaker::resource::service { $::ceilometer::params::agent_central_service_name :
clone_params => 'interleave=true',
require => [Pacemaker::Resource::Service[$::keystone::params::service_name],
- $mongodb_pacemaker_resource],
+ Pacemaker::Resource::Service[$::mongodb::params::service_name]],
}
pacemaker::resource::service { $::ceilometer::params::collector_service_name :
clone_params => 'interleave=true',
clone_params => 'interleave=true',
resource_params => 'startdelay=10',
}
+ pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
+ constraint_type => 'order',
+ first_resource => "${::keystone::params::service_name}-clone",
+ second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ }
pacemaker::constraint::base { 'ceilometer-central-then-ceilometer-collector-constraint':
constraint_type => 'order',
first_resource => "${::ceilometer::params::agent_central_service_name}-clone",
Pacemaker::Resource::Service[$::mongodb::params::service_name]],
}
}
- pacemaker::constraint::base { 'keystone-then-ceilometer-central-constraint':
- constraint_type => 'order',
- first_resource => "${::keystone::params::service_name}-clone",
- second_resource => "${::ceilometer::params::agent_central_service_name}-clone",
- first_action => 'start',
- second_action => 'start',
- require => [Pacemaker::Resource::Service[$::ceilometer::params::agent_central_service_name],
- Pacemaker::Resource::Service[$::keystone::params::service_name]],
- }
# Heat
pacemaker::resource::service { $::heat::params::api_service_name :
pacemaker::resource::service { $::heat::params::engine_service_name :
clone_params => 'interleave=true',
}
+ pacemaker::constraint::base { 'keystone-then-heat-api-constraint':
+ constraint_type => 'order',
+ first_resource => "${::keystone::params::service_name}-clone",
+ second_resource => "${::heat::params::api_service_name}-clone",
+ first_action => 'start',
+ second_action => 'start',
+ require => [Pacemaker::Resource::Service[$::heat::params::api_service_name],
+ Pacemaker::Resource::Service[$::keystone::params::service_name]],
+ }
pacemaker::constraint::base { 'heat-api-then-heat-api-cfn-constraint':
constraint_type => 'order',
first_resource => "${::heat::params::api_service_name}-clone",
Pacemaker::Resource::Service[$::ceilometer::params::agent_notification_service_name]],
}
+ # Horizon
+ pacemaker::resource::service { $::horizon::params::http_service:
+ clone_params => "interleave=true",
+ }
+
+
}
} #END STEP 4