5 cluster_sync_timeout=1800
9 if [[ -n $(is_bootstrap_node) ]]; then
13 check_galera_root_password
14 check_disk_for_mysql_dump
16 # We want to disable fencing during the cluster --stop as it might fence
17 # nodes where a service fails to stop, which could be fatal during an upgrade
18 # procedure. So we remember the stonith state. If it was enabled we reenable it
19 # at the end of this script
20 STONITH_STATE=$(pcs property show stonith-enabled | grep "stonith-enabled" | awk '{ print $2 }')
21 pcs property set stonith-enabled=false
24 if [[ -n $(is_bootstrap_node) ]]; then
28 # After migrating the cluster to HA-NG the services not under pacemaker's control
29 # are still up and running. We need to stop them explicitely otherwise during the yum
30 # upgrade the rpm %post sections will try to do a systemctl try-restart <service>, which
31 # is going to take a long time because rabbit is down. By having the service stopped
32 # systemctl try-restart is a noop
34 for service in $(services_to_migrate); do
35 manage_systemd_service stop "${service%%-clone}"
36 # So the reason for not reusing check_resource_systemd is that
37 # I have observed systemctl is-active returning unknown with at least
38 # one service that was stopped (See LP 1627254)
41 tend=$(( $tstart + $timeout ))
43 while (( $(date +%s) < $tend )); do
44 if [[ "$(systemctl is-active ${service%%-clone})" = "active" ]]; then
45 echo "$service still active, sleeping $check_interval seconds."
48 # we do not care if it is inactive, unknown or failed as long as it is
56 # In case the mysql package is updated, the database on disk must be
57 # upgraded as well. This typically needs to happen during major
58 # version upgrades (e.g. 5.5 -> 5.6, 5.5 -> 10.1...)
60 # Because in-place upgrades are not supported across 2+ major versions
61 # (e.g. 5.5 -> 10.1), we rely on logical upgrades via dump/restore cycle
62 # https://bugzilla.redhat.com/show_bug.cgi?id=1341968
64 # The default is to determine automatically if upgrade is needed based
65 # on mysql package versionning, but this can be overriden manually
66 # to support specific upgrade scenario
68 if [[ -n $(is_bootstrap_node) ]]; then
69 if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
70 mysqldump $backup_flags > "$MYSQL_BACKUP_DIR/openstack_database.sql"
71 cp -rdp /etc/my.cnf* "$MYSQL_BACKUP_DIR"
74 pcs resource disable redis
75 check_resource redis stopped 600
76 pcs resource disable rabbitmq
77 check_resource rabbitmq stopped 600
78 pcs resource disable galera
79 check_resource galera stopped 600
80 pcs resource disable openstack-cinder-volume
81 check_resource openstack-cinder-volume stopped 600
82 # Disable all VIPs before stopping the cluster, so that pcs doesn't use one as a source address:
83 # https://bugzilla.redhat.com/show_bug.cgi?id=1330688
84 for vip in $(pcs resource show | grep ocf::heartbeat:IPaddr2 | grep Started | awk '{ print $1 }'); do
85 pcs resource disable $vip
86 check_resource $vip stopped 60
88 pcs cluster stop --all
92 # Swift isn't controlled by pacemaker
96 while systemctl is-active pacemaker; do
99 if (( tnow-tstart > cluster_sync_timeout )) ; then
100 echo_error "ERROR: cluster shutdown timed out"
105 # The reason we do an sql dump *and* we move the old dir out of
106 # the way is because it gives us an extra level of safety in case
107 # something goes wrong during the upgrade. Once the restore is
108 # successful we go ahead and remove it. If the directory exists
109 # we bail out as it means the upgrade process had issues in the last
111 if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
112 if [ -d $MYSQL_TEMP_UPGRADE_BACKUP_DIR ]; then
113 echo_error "ERROR: mysql backup dir already exist"
116 mv /var/lib/mysql $MYSQL_TEMP_UPGRADE_BACKUP_DIR
119 yum -y install python-zaqarclient # needed for os-collect-config
122 # We need to ensure at least those two configuration settings, otherwise
123 # mariadb 10.1+ won't activate galera replication.
124 # wsrep_cluster_address must only be set though, its value does not
125 # matter because it's overriden by the galera resource agent.
126 cat >> /etc/my.cnf.d/galera.cnf <<EOF
129 wsrep_cluster_address = gcomm://localhost
132 if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
133 # Scripts run via heat have no HOME variable set and this confuses
137 mkdir /var/lib/mysql || /bin/true
138 chown mysql:mysql /var/lib/mysql
139 chmod 0755 /var/lib/mysql
140 restorecon -R /var/lib/mysql/
141 mysql_install_db --datadir=/var/lib/mysql --user=mysql
142 chown -R mysql:mysql /var/lib/mysql/
144 if [ "$(hiera -c /etc/puppet/hiera.yaml bootstrap_nodeid)" = "$(facter hostname)" ]; then
145 mysqld_safe --wsrep-new-cluster &
146 # We have a populated /root/.my.cnf with root/password here so
147 # we need to temporarily rename it because the newly created
148 # db is empty and no root password is set
149 mv /root/.my.cnf /root/.my.cnf.temporary
150 timeout 60 sh -c 'while ! mysql -e "" &> /dev/null; do sleep 1; done'
151 mysql -u root < "$MYSQL_BACKUP_DIR/openstack_database.sql"
152 mv /root/.my.cnf.temporary /root/.my.cnf
153 mysqladmin -u root shutdown
154 # The import was successful so we may remove the folder
155 rm -r "$MYSQL_BACKUP_DIR"
159 # If we reached here without error we can safely blow away the origin
160 # mysql dir from every controller
162 # TODO: What if the upgrade fails on the bootstrap node, but not on
163 # this controller. Data may be lost.
164 if [ $DO_MYSQL_UPGRADE -eq 1 ]; then
165 rm -r $MYSQL_TEMP_UPGRADE_BACKUP_DIR
168 # Let's reset the stonith back to true if it was true, before starting the cluster
169 if [ $STONITH_STATE == "true" ]; then
170 pcs -f /var/lib/pacemaker/cib/cib.xml property set stonith-enabled=true
173 # Pin messages sent to compute nodes to kilo, these will be upgraded later
174 crudini --set /etc/nova/nova.conf upgrade_levels compute "$upgrade_level_nova_compute"
175 # https://bugzilla.redhat.com/show_bug.cgi?id=1284047
176 # Change-Id: Ib3f6c12ff5471e1f017f28b16b1e6496a4a4b435
177 crudini --set /etc/ceilometer/ceilometer.conf DEFAULT rpc_backend rabbit
178 # https://bugzilla.redhat.com/show_bug.cgi?id=1284058
179 # Ifd1861e3df46fad0e44ff9b5cbd58711bbc87c97 Swift Ceilometer middleware no longer exists
180 crudini --set /etc/swift/proxy-server.conf pipeline:main pipeline "catch_errors healthcheck cache ratelimit tempurl formpost authtoken keystone staticweb proxy-logging proxy-server"
181 # LP: 1615035, required only for M/N upgrade.
182 crudini --set /etc/nova/nova.conf DEFAULT scheduler_host_manager host_manager
183 # LP: 1627450, required only for M/N upgrade
184 crudini --set /etc/nova/nova.conf DEFAULT scheduler_driver filter_scheduler
186 crudini --set /etc/sahara/sahara.conf DEFAULT plugins ambari,cdh,mapr,vanilla,spark,storm