X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=puppet%2Fmanifests%2Fovercloud_controller_pacemaker.pp;h=ce8e81ee077aac95e1ecfa8e565b12db13586be0;hb=eaa66742a7de7be8df0f75c8fcb28b5e89572451;hp=6512dc109ae09247f93697c95ef4e30d12194aa9;hpb=ada9662b8b92fd4ec925407fd675c7941f6a3267;p=apex-tripleo-heat-templates.git diff --git a/puppet/manifests/overcloud_controller_pacemaker.pp b/puppet/manifests/overcloud_controller_pacemaker.pp index 6512dc10..ce8e81ee 100644 --- a/puppet/manifests/overcloud_controller_pacemaker.pp +++ b/puppet/manifests/overcloud_controller_pacemaker.pp @@ -13,6 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. +Pcmk_resource <| |> { + tries => 10, + try_sleep => 3, +} + if !str2bool(hiera('enable_package_install', 'false')) { case $::osfamily { 'RedHat': { @@ -26,20 +31,35 @@ if !str2bool(hiera('enable_package_install', 'false')) { if $::hostname == downcase(hiera('bootstrap_nodeid')) { $pacemaker_master = true + $sync_db = true } else { $pacemaker_master = false + $sync_db = false } +# When to start and enable services which haven't been Pacemakerized +# FIXME: remove when we start all OpenStack services using Pacemaker +# (occurences of this variable will be gradually replaced with false) +$non_pcmk_start = hiera('step') >= 4 + if hiera('step') >= 1 { - $controller_node_ips = split(hiera('controller_node_ips'), ',') + create_resources(sysctl::value, hiera('sysctl_settings'), {}) + + if count(hiera('ntp::servers')) > 0 { + include ::ntp + } + $controller_node_ips = split(hiera('controller_node_ips'), ',') + $controller_node_names = split(downcase(hiera('controller_node_names')), ',') class { '::tripleo::loadbalancer' : - controller_hosts => $controller_node_ips, - manage_vip => false, + controller_hosts => $controller_node_ips, + controller_hosts_names => $controller_node_names, + manage_vip => false, + haproxy_service_manage => false, } - $pacemaker_cluster_members = regsubst(hiera('controller_node_ips'), ',', ' ', 'G') + $pacemaker_cluster_members = downcase(regsubst(hiera('controller_node_names'), ',', ' ', 'G')) user { 'hacluster': ensure => present, } -> @@ -53,6 +73,87 @@ if hiera('step') >= 1 { class { '::pacemaker::stonith': disable => true, } + + # Only configure RabbitMQ in this step, don't start it yet to + # avoid races where non-master nodes attempt to start without + # config (eg. binding on 0.0.0.0) + # The module ignores erlang_cookie if cluster_config is false + class { '::rabbitmq': + service_manage => false, + tcp_keepalive => false, + config_kernel_variables => hiera('rabbitmq_kernel_variables'), + config_variables => hiera('rabbitmq_config_variables'), + environment_variables => hiera('rabbitmq_environment'), + } -> + file { '/var/lib/rabbitmq/.erlang.cookie': + ensure => 'present', + owner => 'rabbitmq', + group => 'rabbitmq', + mode => '0400', + content => hiera('rabbitmq::erlang_cookie'), + replace => true, + } + + # MongoDB + include ::mongodb::globals + + # FIXME: replace with service_manage => false on ::mongodb::server + # when this is merged: https://github.com/puppetlabs/pupp etlabs-mongodb/pull/198 + class { '::mongodb::server' : + service_ensure => undef, + service_enable => false, + } + + # Galera + if str2bool(hiera('enable_galera', 'true')) { + $mysql_config_file = '/etc/my.cnf.d/galera.cnf' + } else { + $mysql_config_file = '/etc/my.cnf.d/server.cnf' + } + $galera_nodes = downcase(hiera('galera_node_names', $::hostname)) + $galera_nodes_count = count(split($galera_nodes, ',')) + + $mysqld_options = { + 'mysqld' => { + 'skip-name-resolve' => '1', + 'binlog_format' => 'ROW', + 'default-storage-engine' => 'innodb', + 'innodb_autoinc_lock_mode' => '2', + 'innodb_locks_unsafe_for_binlog'=> '1', + 'query_cache_size' => '0', + 'query_cache_type' => '0', + 'bind-address' => hiera('controller_host'), + 'max_connections' => '1024', + 'open_files_limit' => '-1', + 'wsrep_provider' => '/usr/lib64/galera/libgalera_smm.so', + 'wsrep_cluster_name' => 'galera_cluster', + 'wsrep_slave_threads' => '1', + 'wsrep_certify_nonPK' => '1', + 'wsrep_max_ws_rows' => '131072', + 'wsrep_max_ws_size' => '1073741824', + 'wsrep_debug' => '0', + 'wsrep_convert_LOCK_to_trx' => '0', + 'wsrep_retry_autocommit' => '1', + 'wsrep_auto_increment_control' => '1', + 'wsrep_drupal_282555_workaround'=> '0', + 'wsrep_causal_reads' => '0', + 'wsrep_notify_cmd' => '', + 'wsrep_sst_method' => 'rsync', + } + } + + class { '::mysql::server': + create_root_user => false, + create_root_my_cnf => false, + config_file => $mysql_config_file, + override_options => $mysqld_options, + service_manage => false, + } + +} + +if hiera('step') >= 2 { + if $pacemaker_master { $control_vip = hiera('tripleo::loadbalancer::controller_virtual_ip') pacemaker::resource::ip { 'control_vip': @@ -63,55 +164,54 @@ if hiera('step') >= 1 { ip_address => $public_vip, } pacemaker::resource::service { 'haproxy': - clone => true, + clone_params => true, } - } - - Class['::pacemaker::corosync'] -> Pacemaker::Resource::Service <| |> - -} -if hiera('step') >= 2 { - - if count(hiera('ntp::servers')) > 0 { - include ::ntp - } - - # MongoDB - if downcase(hiera('ceilometer_backend')) == 'mongodb' { - include ::mongodb::globals - - class {'::mongodb::server' : - service_ensure => undef + pacemaker::resource::ocf { 'rabbitmq': + ocf_agent_name => 'heartbeat:rabbitmq-cluster', + resource_params => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'', + clone_params => 'ordered=true interleave=true', + require => Class['::rabbitmq'], } - $mongo_node_ips = split(hiera('mongo_node_ips'), ',') - $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017') - $mongo_node_string = join($mongo_node_ips_with_port, ',') - - $mongodb_replset = hiera('mongodb::server::replset') - $ceilometer_mongodb_conn_string = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" - if downcase(hiera('bootstrap_nodeid')) == $::hostname { - - pacemaker::resource::service { 'mongod' : - options => "op start timeout=120s", - clone => true, - before => Exec['mongodb-ready'], + + if downcase(hiera('ceilometer_backend')) == 'mongodb' { + pacemaker::resource::service { $::mongodb::params::service_name : + op_params => 'start timeout=120s', + clone_params => true, + require => Class['::mongodb::server'], + before => Exec['mongodb-ready'], } # NOTE (spredzy) : The replset can only be run # once all the nodes have joined the cluster. + $mongo_node_ips = split(hiera('mongo_node_ips'), ',') + $mongo_node_ips_with_port = suffix($mongo_node_ips, ':27017') + $mongo_node_string = join($mongo_node_ips_with_port, ',') + $mongodb_replset = hiera('mongodb::server::replset') $mongodb_cluster_ready_command = join(suffix(prefix($mongo_node_ips, '/bin/nc -w1 '), ' 27017 < /dev/null'), ' && ') exec { 'mongodb-ready' : command => $mongodb_cluster_ready_command, - timeout => 600, - tries => 60, + timeout => 30, + tries => 180, try_sleep => 10, - before => Mongodb_replset[$mongodb_replset], } - mongodb_replset { $mongodb_replset : members => $mongo_node_ips_with_port, + require => Exec['mongodb-ready'], } } + + pacemaker::resource::ocf { 'galera' : + ocf_agent_name => 'heartbeat:galera', + op_params => 'promote timeout=300s on-fail=block --master', + meta_params => "master-max=${galera_nodes_count} ordered=true", + resource_params => "additional_parameters='--open-files-limit=16384' enable_creation=true wsrep_cluster_address='gcomm://${galera_nodes}'", + require => Class['::mysql::server'], + before => Exec['galera-ready'], + } + mysql_user { 'clustercheckuser@localhost' : + password_hash => mysql_password($clustercheck_password), + require => Exec['galera-ready'], + } } # Redis @@ -135,111 +235,110 @@ if hiera('step') >= 2 { } } - if str2bool(hiera('enable_galera', 'true')) { - $mysql_config_file = '/etc/my.cnf.d/galera.cnf' - } else { - $mysql_config_file = '/etc/my.cnf.d/server.cnf' + exec { 'galera-ready' : + command => '/usr/bin/clustercheck >/dev/null', + timeout => 30, + tries => 180, + try_sleep => 10, + environment => ["AVAILABLE_WHEN_READONLY=0"], + require => File['/etc/sysconfig/clustercheck'], } - # TODO Galara - class { 'mysql::server': - config_file => $mysql_config_file, - override_options => { - 'mysqld' => { - 'bind-address' => hiera('controller_host') - } - } + + file { '/etc/sysconfig/clustercheck' : + ensure => file, + content => "MYSQL_USERNAME=root\n +MYSQL_PASSWORD=''\n +MYSQL_HOST=localhost\n", + } + + xinetd::service { 'galera-monitor' : + port => '9200', + server => '/usr/bin/clustercheck', + per_source => 'UNLIMITED', + log_on_success => '', + log_on_failure => 'HOST', + flags => 'REUSE', + service_type => 'UNLISTED', + user => 'root', + group => 'root', + require => File['/etc/sysconfig/clustercheck'], } - # FIXME: this should only occur on the bootstrap host (ditto for db syncs) # Create all the database schemas # Example DSN format: mysql://user:password@host/dbname - $allowed_hosts = ['%',hiera('controller_host')] - $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]') - class { 'keystone::db::mysql': - user => $keystone_dsn[3], - password => $keystone_dsn[4], - host => $keystone_dsn[5], - dbname => $keystone_dsn[6], - allowed_hosts => $allowed_hosts, - } - $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]') - class { 'glance::db::mysql': - user => $glance_dsn[3], - password => $glance_dsn[4], - host => $glance_dsn[5], - dbname => $glance_dsn[6], - allowed_hosts => $allowed_hosts, - } - $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]') - class { 'nova::db::mysql': - user => $nova_dsn[3], - password => $nova_dsn[4], - host => $nova_dsn[5], - dbname => $nova_dsn[6], - allowed_hosts => $allowed_hosts, - } - $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]') - class { 'neutron::db::mysql': - user => $neutron_dsn[3], - password => $neutron_dsn[4], - host => $neutron_dsn[5], - dbname => $neutron_dsn[6], - allowed_hosts => $allowed_hosts, - } - $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]') - class { 'cinder::db::mysql': - user => $cinder_dsn[3], - password => $cinder_dsn[4], - host => $cinder_dsn[5], - dbname => $cinder_dsn[6], - allowed_hosts => $allowed_hosts, - } - $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]') - class { 'heat::db::mysql': - user => $heat_dsn[3], - password => $heat_dsn[4], - host => $heat_dsn[5], - dbname => $heat_dsn[6], - allowed_hosts => $allowed_hosts, - } - if downcase(hiera('ceilometer_backend')) == 'mysql' { - $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]') - class { 'ceilometer::db::mysql': - user => $ceilometer_dsn[3], - password => $ceilometer_dsn[4], - host => $ceilometer_dsn[5], - dbname => $ceilometer_dsn[6], + if $sync_db { + $allowed_hosts = ['%',hiera('controller_host')] + $keystone_dsn = split(hiera('keystone::database_connection'), '[@:/?]') + class { 'keystone::db::mysql': + user => $keystone_dsn[3], + password => $keystone_dsn[4], + host => $keystone_dsn[5], + dbname => $keystone_dsn[6], allowed_hosts => $allowed_hosts, + require => Exec['galera-ready'], } - } - - # the module ignores erlang_cookie if cluster_config is false - file { '/var/lib/rabbitmq/.erlang.cookie': - ensure => 'present', - owner => 'rabbitmq', - group => 'rabbitmq', - mode => '0400', - content => hiera('rabbitmq::erlang_cookie'), - replace => true, - } -> - class { '::rabbitmq': - service_manage => false, - environment_variables => { - 'RABBITMQ_NODENAME' => "rabbit@$::hostname", - }, - } - if $pacemaker_master { - pacemaker::resource::ocf { 'rabbitmq': - resource_name => 'heartbeat:rabbitmq-cluster', - options => 'set_policy=\'ha-all ^(?!amq\.).* {"ha-mode":"all"}\'', - clone => true, - require => Class['::rabbitmq'], + $glance_dsn = split(hiera('glance::api::database_connection'), '[@:/?]') + class { 'glance::db::mysql': + user => $glance_dsn[3], + password => $glance_dsn[4], + host => $glance_dsn[5], + dbname => $glance_dsn[6], + allowed_hosts => $allowed_hosts, + require => Exec['galera-ready'], + } + $nova_dsn = split(hiera('nova::database_connection'), '[@:/?]') + class { 'nova::db::mysql': + user => $nova_dsn[3], + password => $nova_dsn[4], + host => $nova_dsn[5], + dbname => $nova_dsn[6], + allowed_hosts => $allowed_hosts, + require => Exec['galera-ready'], + } + $neutron_dsn = split(hiera('neutron::server::database_connection'), '[@:/?]') + class { 'neutron::db::mysql': + user => $neutron_dsn[3], + password => $neutron_dsn[4], + host => $neutron_dsn[5], + dbname => $neutron_dsn[6], + allowed_hosts => $allowed_hosts, + require => Exec['galera-ready'], + } + $cinder_dsn = split(hiera('cinder::database_connection'), '[@:/?]') + class { 'cinder::db::mysql': + user => $cinder_dsn[3], + password => $cinder_dsn[4], + host => $cinder_dsn[5], + dbname => $cinder_dsn[6], + allowed_hosts => $allowed_hosts, + require => Exec['galera-ready'], + } + $heat_dsn = split(hiera('heat::database_connection'), '[@:/?]') + class { 'heat::db::mysql': + user => $heat_dsn[3], + password => $heat_dsn[4], + host => $heat_dsn[5], + dbname => $heat_dsn[6], + allowed_hosts => $allowed_hosts, + require => Exec['galera-ready'], + } + if downcase(hiera('ceilometer_backend')) == 'mysql' { + $ceilometer_dsn = split(hiera('ceilometer_mysql_conn_string'), '[@:/?]') + class { 'ceilometer::db::mysql': + user => $ceilometer_dsn[3], + password => $ceilometer_dsn[4], + host => $ceilometer_dsn[5], + dbname => $ceilometer_dsn[6], + allowed_hosts => $allowed_hosts, + require => Exec['galera-ready'], + } } } # pre-install swift here so we can build rings include ::swift + # Ceph $cinder_enable_rbd_backend = hiera('cinder_enable_rbd_backend', false) $enable_ceph = $cinder_enable_rbd_backend @@ -255,11 +354,18 @@ if hiera('step') >= 2 { include ::ceph::profile::osd } + # Memcached + include ::memcached + } #END STEP 2 if hiera('step') >= 3 { - include ::keystone + class { '::keystone': + sync_db => $sync_db, + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } #TODO: need a cleanup-keystone-tokens.sh solution here keystone_config { @@ -304,27 +410,62 @@ if hiera('step') >= 3 { # TODO: notifications, scrubber, etc. include ::glance class { 'glance::api': - known_stores => [$glance_store] + known_stores => [$glance_store], + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::glance::registry' : + sync_db => $sync_db, + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, } - include ::glance::registry include join(['::glance::backend::', $glance_backend]) class { 'nova': glance_api_servers => join([hiera('glance_protocol'), '://', hiera('controller_virtual_ip'), ':', hiera('glance_port')]), } - include ::nova::api - include ::nova::cert - include ::nova::conductor - include ::nova::consoleauth + class { '::nova::api' : + sync_db => $sync_db, + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::nova::cert' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::nova::conductor' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::nova::consoleauth' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::nova::vncproxy' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::nova::scheduler' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } include ::nova::network::neutron - include ::nova::vncproxy - include ::nova::scheduler include ::neutron - include ::neutron::server - include ::neutron::agents::dhcp - include ::neutron::agents::l3 + class { '::neutron::server' : + sync_db => $sync_db, + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::neutron::agents::dhcp' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::neutron::agents::l3' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } file { '/etc/neutron/dnsmasq-neutron.conf': content => hiera('neutron_dnsmasq_options'), @@ -341,11 +482,15 @@ if hiera('step') >= 3 { } class { 'neutron::agents::ml2::ovs': + # manage_service => $non_pcmk_start, -- not implemented + enabled => $non_pcmk_start, bridge_mappings => split(hiera('neutron_bridge_mappings'), ','), tunnel_types => split(hiera('neutron_tunnel_types'), ','), } class { 'neutron::agents::metadata': + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, auth_url => join(['http://', hiera('controller_virtual_ip'), ':35357/v2.0']), } @@ -355,10 +500,20 @@ if hiera('step') >= 3 { Service['neutron-server'] -> Service['neutron-metadata'] include ::cinder - include ::cinder::api + class { '::cinder::api': + sync_db => $sync_db, + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::cinder::scheduler' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::cinder::volume' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } include ::cinder::glance - include ::cinder::scheduler - include ::cinder::volume class {'cinder::setup_test_volume': size => join([hiera('cinder_lvm_loop_device_size'), 'M']), } @@ -406,8 +561,10 @@ if hiera('step') >= 3 { } # swift proxy - include ::memcached - include ::swift::proxy + class { '::swift::proxy' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } include ::swift::proxy::proxy_logging include ::swift::proxy::healthcheck include ::swift::proxy::cache @@ -422,9 +579,21 @@ if hiera('step') >= 3 { # swift storage if str2bool(hiera('enable_swift_storage', 'true')) { - class {'swift::storage::all': + class {'::swift::storage::all': mount_check => str2bool(hiera('swift_mount_check')) } + class {'::swift::storage::account': + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class {'::swift::storage::container': + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class {'::swift::storage::object': + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } if(!defined(File['/srv/node'])) { file { '/srv/node': ensure => directory, @@ -445,19 +614,38 @@ if hiera('step') >= 3 { $ceilometer_database_connection = hiera('ceilometer_mysql_conn_string') } default : { - $ceilometer_database_connection = $ceilometer_mongodb_conn_string + $ceilometer_database_connection = "mongodb://${mongo_node_string}/ceilometer?replicaSet=${mongodb_replset}" } } include ::ceilometer - include ::ceilometer::api - include ::ceilometer::agent::notification - include ::ceilometer::agent::central - include ::ceilometer::alarm::notifier - include ::ceilometer::alarm::evaluator + class { '::ceilometer::api' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::ceilometer::agent::notification' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::ceilometer::agent::central' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::ceilometer::alarm::notifier' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::ceilometer::alarm::evaluator' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::ceilometer::collector' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } include ::ceilometer::expirer - include ::ceilometer::collector class { '::ceilometer::db' : database_connection => $ceilometer_database_connection, + sync_db => $sync_db, } class { 'ceilometer::agent::auth': auth_url => join(['http://', hiera('controller_virtual_ip'), ':5000/v2.0']), @@ -466,11 +654,25 @@ if hiera('step') >= 3 { Cron <| title == 'ceilometer-expirer' |> { command => "sleep $((\$(od -A n -t d -N 3 /dev/urandom) % 86400)) && ${::ceilometer::params::expirer_command}" } # Heat - include ::heat - include ::heat::api - include ::heat::api_cfn - include ::heat::api_cloudwatch - include ::heat::engine + class { '::heat' : + sync_db => $sync_db, + } + class { '::heat::api' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::heat::api_cfn' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::heat::api_cloudwatch' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } + class { '::heat::engine' : + manage_service => $non_pcmk_start, + enabled => $non_pcmk_start, + } # Horizon $vhost_params = { add_listen => false } @@ -490,3 +692,7 @@ if hiera('step') >= 3 { } } #END STEP 3 + +if hiera('step') >= 4 { + # TODO: pacemaker::resource::service for OpenStack services go here +} #END STEP 4