3 # Special pieces of upgrade migration logic go into this
4 # file. E.g. Pacemaker cluster transitions for existing deployments,
5 # matching changes to overcloud_controller_pacemaker.pp (Puppet
6 # handles deployment, this file handles migrations).
8 # This file shouldn't execute any action on its own, all logic should
9 # be wrapped into bash functions. Upgrade scripts will source this
10 # file and call the functions defined in this file where appropriate.
12 # The migration functions should be idempotent. If the migration has
13 # been already applied, it should be possible to call the function
14 # again without damaging the deployment or failing the upgrade.
16 # If the major version of mysql is going to change after the major
17 # upgrade, the database must be upgraded on disk to avoid failures
18 # due to internal incompatibilities between major mysql versions
19 # https://bugs.launchpad.net/tripleo/+bug/1587449
20 # This function detects whether a database upgrade is required
21 # after a mysql package upgrade. It returns 0 when no major upgrade
22 # has to take place, 1 otherwise.
23 function is_mysql_upgrade_needed {
24 # The name of the package which provides mysql might differ
25 # after the upgrade. Consider the generic package name, which
26 # should capture the major version change (e.g. 5.5 -> 10.1)
31 output=$(yum -q check-update $name)
34 if [ $ret -ne 100 ]; then
35 # no updates so we exit
40 local currentepoch=$(rpm -q --qf "%{epoch}" $name)
41 local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
42 local currentrelease=$(rpm -q --qf "%{release}" $name)
43 local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
44 local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
45 local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
46 local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
48 # With this we trigger the dump restore/path if we change either epoch or
49 # version in the package If only the release tag changes we do not do it
50 # FIXME: we could refine this by trying to parse the mariadb version
51 # into X.Y.Z and trigger the update only if X and/or Y change.
52 output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
53 if [ "$output" != "-1" ]; then
60 # This function returns the list of services to be migrated away from pacemaker
61 # and to systemd. The reason to have these services in a separate function is because
62 # this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
63 # and in the function to migrate the cluster from full HA to HA NG
64 function services_to_migrate {
65 # The following PCMK resources the ones the we are going to delete
66 PCMK_RESOURCE_TODELETE="
70 neutron-dhcp-agent-clone
71 neutron-l3-agent-clone
72 neutron-metadata-agent-clone
73 neutron-netns-cleanup-clone
74 neutron-openvswitch-agent-clone
75 neutron-ovs-cleanup-clone
77 openstack-aodh-evaluator-clone
78 openstack-aodh-listener-clone
79 openstack-aodh-notifier-clone
80 openstack-ceilometer-api-clone
81 openstack-ceilometer-central-clone
82 openstack-ceilometer-collector-clone
83 openstack-ceilometer-notification-clone
84 openstack-cinder-api-clone
85 openstack-cinder-scheduler-clone
86 openstack-glance-api-clone
87 openstack-glance-registry-clone
88 openstack-gnocchi-metricd-clone
89 openstack-gnocchi-statsd-clone
90 openstack-heat-api-cfn-clone
91 openstack-heat-api-clone
92 openstack-heat-api-cloudwatch-clone
93 openstack-heat-engine-clone
94 openstack-nova-api-clone
95 openstack-nova-conductor-clone
96 openstack-nova-consoleauth-clone
97 openstack-nova-novncproxy-clone
98 openstack-nova-scheduler-clone
99 openstack-sahara-api-clone
100 openstack-sahara-engine-clone
102 echo $PCMK_RESOURCE_TODELETE
105 # This function will migrate a mitaka system where all the resources are managed
106 # via pacemaker to a newton setup where only a few services will be managed by pacemaker
107 # On a high-level it will operate as follows:
108 # 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
109 # during the conversion
110 # 2. Remove all the colocation constraints and then the ordering constraints, except the
111 # ones related to haproxy/VIPs which exist in Newton as well
112 # 3. Take the cluster out of maintenance-mode and do a resource cleanup
113 # 4. Remove all the resources that won't be managed by pacemaker in newton. The
115 # that they are stopped and removed from pacemakers control
116 # 5. Do a resource cleanup to make sure the cluster is in a clean state
117 function migrate_full_to_ng_ha {
118 if [[ -n $(pcmk_running) ]]; then
119 pcs property set maintenance-mode=true
120 # We are making sure here that the property has propagated everywhere
121 if ! timeout -k 10 300 crm_resource --wait; then
122 echo_error "ERROR: cluster remained unstable after setting maintenance-mode for more than 300 seconds, exiting."
125 # First we go through all the colocation constraints (except the ones we want to keep, i.e. the haproxy/ip ones)
126 # and we remove those
127 COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
128 for constraint in $COL_CONSTRAINTS; do
129 log_debug "Deleting colocation constraint $constraint from CIB"
130 pcs constraint remove "$constraint"
133 # Now we kill all the ordering constraints (except the haproxy/ip ones)
134 ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
135 for constraint in $ORD_CONSTRAINTS; do
136 log_debug "Deleting ordering constraint $constraint from CIB"
137 pcs constraint remove "$constraint"
139 # At this stage all the pacemaker resources are removed from the CIB.
140 # Once we remove the maintenance-mode those systemd resources will keep
141 # on running. They shall be systemd enabled via the puppet converge
143 pcs property set maintenance-mode=false
145 # At this stage there are no constraints whatsoever except the haproxy/ip ones
146 # which we want to keep. We now disable and then delete each resource
147 # that will move to systemd.
148 # We want the systemd resources be stopped before doing "yum update",
149 # that way "systemctl try-restart <service>" is no-op because the
150 # service was down already
151 PCS_STATUS_OUTPUT="$(pcs status)"
152 for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
153 if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
154 log_debug "Deleting $resource from the CIB"
155 if ! pcs resource disable "$resource" --wait=600; then
156 echo_error "ERROR: resource $resource failed to be disabled"
159 pcs resource delete --force "$resource"
161 log_debug "Service $service not found as a pacemaker resource, not trying to delete."
165 # We need to do a pcs resource cleanup here + crm_resource --wait to
166 # make sure the cluster is in a clean state before we stop everything,
167 # upgrade and restart everything
169 # We are making sure here that the cluster is stable before proceeding
170 if ! timeout -k 10 600 crm_resource --wait; then
171 echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."