3 # Special pieces of upgrade migration logic go into this
4 # file. E.g. Pacemaker cluster transitions for existing deployments,
5 # matching changes to overcloud_controller_pacemaker.pp (Puppet
6 # handles deployment, this file handles migrations).
8 # This file shouldn't execute any action on its own, all logic should
9 # be wrapped into bash functions. Upgrade scripts will source this
10 # file and call the functions defined in this file where appropriate.
12 # The migration functions should be idempotent. If the migration has
13 # been already applied, it should be possible to call the function
14 # again without damaging the deployment or failing the upgrade.
16 # If the major version of mysql is going to change after the major
17 # upgrade, the database must be upgraded on disk to avoid failures
18 # due to internal incompatibilities between major mysql versions
19 # https://bugs.launchpad.net/tripleo/+bug/1587449
20 # This function detects whether a database upgrade is required
21 # after a mysql package upgrade. It returns 0 when no major upgrade
22 # has to take place, 1 otherwise.
23 function is_mysql_upgrade_needed {
24 # The name of the package which provides mysql might differ
25 # after the upgrade. Consider the generic package name, which
26 # should capture the major version change (e.g. 5.5 -> 10.1)
31 output=$(yum -q check-update $name)
34 if [ $ret -ne 100 ]; then
35 # no updates so we exit
40 local currentepoch=$(rpm -q --qf "%{epoch}" $name)
41 local currentversion=$(rpm -q --qf "%{version}" $name | cut -d. -f-2)
42 local currentrelease=$(rpm -q --qf "%{release}" $name)
43 local newoutput=$(repoquery -a --pkgnarrow=updates --qf "%{epoch} %{version} %{release}\n" $name)
44 local newepoch=$(echo "$newoutput" | awk '{ print $1 }')
45 local newversion=$(echo "$newoutput" | awk '{ print $2 }' | cut -d. -f-2)
46 local newrelease=$(echo "$newoutput" | awk '{ print $3 }')
48 # With this we trigger the dump restore/path if we change either epoch or
49 # version in the package If only the release tag changes we do not do it
50 # FIXME: we could refine this by trying to parse the mariadb version
51 # into X.Y.Z and trigger the update only if X and/or Y change.
52 output=$(python -c "import rpm; rc = rpm.labelCompare((\"$currentepoch\", \"$currentversion\", None), (\"$newepoch\", \"$newversion\", None)); print rc")
53 if [ "$output" != "-1" ]; then
60 # This function returns the list of services to be migrated away from pacemaker
61 # and to systemd. The reason to have these services in a separate function is because
62 # this list is needed in three different places: major_upgrade_controller_pacemaker_{1,2}
63 # and in the function to migrate the cluster from full HA to HA NG
64 function services_to_migrate {
65 # The following PCMK resources the ones the we are going to delete
66 PCMK_RESOURCE_TODELETE="
70 neutron-dhcp-agent-clone
71 neutron-l3-agent-clone
72 neutron-metadata-agent-clone
73 neutron-netns-cleanup-clone
74 neutron-openvswitch-agent-clone
75 neutron-ovs-cleanup-clone
77 openstack-aodh-evaluator-clone
78 openstack-aodh-listener-clone
79 openstack-aodh-notifier-clone
80 openstack-ceilometer-central-clone
81 openstack-ceilometer-collector-clone
82 openstack-ceilometer-notification-clone
83 openstack-cinder-api-clone
84 openstack-cinder-scheduler-clone
85 openstack-glance-api-clone
86 openstack-glance-registry-clone
87 openstack-gnocchi-metricd-clone
88 openstack-gnocchi-statsd-clone
89 openstack-heat-api-cfn-clone
90 openstack-heat-api-clone
91 openstack-heat-api-cloudwatch-clone
92 openstack-heat-engine-clone
93 openstack-nova-api-clone
94 openstack-nova-conductor-clone
95 openstack-nova-consoleauth-clone
96 openstack-nova-novncproxy-clone
97 openstack-nova-scheduler-clone
98 openstack-sahara-api-clone
99 openstack-sahara-engine-clone
101 echo $PCMK_RESOURCE_TODELETE
104 # This function will migrate a mitaka system where all the resources are managed
105 # via pacemaker to a newton setup where only a few services will be managed by pacemaker
106 # On a high-level it will operate as follows:
107 # 1. Set the cluster in maintenance-mode so no start/stop action will actually take place
108 # during the conversion
109 # 2. Remove all the colocation constraints and then the ordering constraints, except the
110 # ones related to haproxy/VIPs which exist in Newton as well
111 # 3. Take the cluster out of maintenance-mode
112 # 4. Remove all the resources that won't be managed by pacemaker in newton. The
114 # that they are stopped and removed from pacemakers control
115 # 5. Do a resource cleanup to make sure the cluster is in a clean state
116 function migrate_full_to_ng_ha {
117 if [[ -n $(pcmk_running) ]]; then
118 pcs property set maintenance-mode=true
120 # First we go through all the colocation constraints (except the ones
121 # we want to keep, i.e. the haproxy/ip ones) and we remove those
122 COL_CONSTRAINTS=$(pcs config show | sed -n '/^Colocation Constraints:$/,/^$/p' | grep -v "Colocation Constraints:" | egrep -v "ip-.*haproxy" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
123 for constraint in $COL_CONSTRAINTS; do
124 log_debug "Deleting colocation constraint $constraint from CIB"
125 pcs constraint remove "$constraint"
128 # Now we kill all the ordering constraints (except the haproxy/ip ones)
129 ORD_CONSTRAINTS=$(pcs config show | sed -n '/^Ordering Constraints:/,/^Colocation Constraints:$/p' | grep -v "Ordering Constraints:" | awk '{print $NF}' | cut -f2 -d: |cut -f1 -d\))
130 for constraint in $ORD_CONSTRAINTS; do
131 log_debug "Deleting ordering constraint $constraint from CIB"
132 pcs constraint remove "$constraint"
134 # At this stage all the pacemaker resources are removed from the CIB.
135 # Once we remove the maintenance-mode those systemd resources will keep
136 # on running. They shall be systemd enabled via the puppet converge
138 pcs property set maintenance-mode=false
140 # At this stage there are no constraints whatsoever except the haproxy/ip ones
141 # which we want to keep. We now disable and then delete each resource
142 # that will move to systemd.
143 # We want the systemd resources be stopped before doing "yum update",
144 # that way "systemctl try-restart <service>" is no-op because the
145 # service was down already
146 PCS_STATUS_OUTPUT="$(pcs status)"
147 for resource in $(services_to_migrate) "delay-clone" "openstack-core-clone"; do
148 if echo "$PCS_STATUS_OUTPUT" | grep "$resource"; then
149 log_debug "Deleting $resource from the CIB"
150 if ! pcs resource disable "$resource" --wait=600; then
151 echo_error "ERROR: resource $resource failed to be disabled"
154 pcs resource delete --force "$resource"
156 log_debug "Service $resource not found as a pacemaker resource, not trying to delete."
160 # We need to do a pcs resource cleanup here + crm_resource --wait to
161 # make sure the cluster is in a clean state before we stop everything,
162 # upgrade and restart everything
164 # We are making sure here that the cluster is stable before proceeding
165 if ! timeout -k 10 600 crm_resource --wait; then
166 echo_error "ERROR: cluster remained unstable after resource cleanup for more than 600 seconds, exiting."
172 function disable_standalone_ceilometer_api {
173 if [[ -n $(is_bootstrap_node) ]]; then
174 if [[ -n $(is_pacemaker_managed openstack-ceilometer-api) ]]; then
175 # Disable pacemaker resources for ceilometer-api
176 manage_pacemaker_service disable openstack-ceilometer-api
177 check_resource_pacemaker openstack-ceilometer-api stopped 600
178 pcs resource delete openstack-ceilometer-api --wait=600
184 # This function will make sure that the rabbitmq ha policies are converted from mitaka to newton
185 # In newton we had: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"all"}"
186 # In ocata we want: Attributes: set_policy="ha-all ^(?!amq\.).* {"ha-mode":"exactly","ha-params":2}"
187 # The nr "2" should be CEIL(N/2) where N is the number of Controllers (i.e. rabbit instances)
188 # Note that changing an attribute like this makes the rabbitmq resource restart
189 function rabbitmq_newton_ocata_upgrade {
190 if pcs resource show rabbitmq-clone | grep -q -E "Attributes:.*\"ha-mode\":\"all\""; then
191 # Number of controller is obtained by counting how many hostnames we
192 # have in controller_node_names hiera key
193 nr_controllers=$(($(hiera controller_node_names | grep -o "," |wc -l) + 1))
194 nr_queues=$(($nr_controllers / 2 + ($nr_controllers % 2)))
195 if ! [ $nr_queues -gt 0 -a $nr_queues -le $nr_controllers ]; then
196 echo_error "ERROR: The nr. of HA queues during the M/N upgrade is out of range $nr_queues"
199 pcs resource update rabbitmq set_policy='ha-all ^(?!amq\\.).* {"ha-mode":"exactly","ha-params":'"$nr_queues}" --wait=600