set -eu
-cluster_sync_timeout=1800
-
-if pcs status 2>&1 | grep -E '(cluster is not currently running)|(OFFLINE:)'; then
- echo_error "ERROR: upgrade cannot start with some cluster nodes being offline"
- exit 1
+check_cluster
+check_pcsd
+if [[ -n $(is_bootstrap_node) ]]; then
+ check_clean_cluster
fi
-
+check_python_rpm
+check_galera_root_password
+check_disk_for_mysql_dump
# We want to disable fencing during the cluster --stop as it might fence
# nodes where a service fails to stop, which could be fatal during an upgrade
# procedure. So we remember the stonith state. If it was enabled we reenable it
# at the end of this script
-STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
-pcs property set stonith-enabled=false
-
-if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
- pcs resource disable httpd
- check_resource httpd stopped 1800
- pcs resource disable openstack-core
- check_resource openstack-core stopped 1800
- pcs resource disable redis
- check_resource redis stopped 600
- pcs resource disable mongod
- check_resource mongod stopped 600
- pcs resource disable rabbitmq
- check_resource rabbitmq stopped 600
- pcs resource disable memcached
- check_resource memcached stopped 600
- pcs resource disable galera
- check_resource galera stopped 600
- # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
- for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
- pcs resource disable $vip
- check_resource $vip stopped 60
- done
- pcs cluster stop --all
-fi
-
-# Swift isn't controled by pacemaker
-systemctl_swift stop
-
-tstart=$(date +%s)
-while systemctl is-active pacemaker; do
- sleep 5
- tnow=$(date +%s)
- if (( tnow-tstart > cluster_sync_timeout )) ; then
- echo_error "ERROR: cluster shutdown timed out"
- exit 1
+if [[ -n $(is_bootstrap_node) ]]; then
+ STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
+ # We create this empty file if stonith was set to true so we can reenable stonith in step2
+ rm -f /var/tmp/stonith-true
+ if [ $STONITH_STATE == "true" ]; then
+ touch /var/tmp/stonith-true
fi
-done
-
-yum -y install python-zaqarclient # needed for os-collect-config
-yum -y -q update
-
+ pcs property set stonith-enabled=false
+fi
-# Let's reset the stonith back to true if it was true, before starting the cluster
-if [ $STONITH_STATE == "true" ]; then
- pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
+# Migrate to HA NG and fix up rabbitmq queues
+# We fix up the rabbitmq ha queues after the migration because it will
+# restart the rabbitmq resource. Doing it after the migration means no other
+# services will be restart as there are no other constraints
+if [[ -n $(is_bootstrap_node) ]]; then
+ migrate_full_to_ng_ha
+ rabbitmq_mitaka_newton_upgrade
fi
-# Pin messages sent to compute nodes to kilo, these will be upgraded later
-crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284047
-# Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
-crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
-# https://bugzilla.redhat.com/show_bug.cgi?id=1284058
-# Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
-crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"