For hybrid PODs (e.g. x86_64 jumpserver + control nodes, aarch64
baremetal compute nodes), the virtual nodes rely on MaaS DHCP to be
up when the OS boots, so issue a `virsh reset` accordingly.
Instead of checking for online nodes using `test.ping`, use
`saltutil.sync_all` to also sync Salt state modules to the virtual
nodes (usually handled by baremetal_init state in HA deploys).
JIRA: FUEL-338
Change-Id: If689d057dc4438102c3a7428a97b9638e21bfdc5
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \
CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \
/root/fuel/mcp/config/states/${state}"
+ if [ "${state}" = 'maas' ]; then
+ # For hybrid PODs (virtual + baremetal nodes), the virtual nodes
+ # should be reset to force a DHCP request from MaaS DHCP
+ reset_vms "${virtual_nodes[@]}"
+ fi
done
fi
maas:region:admin:password
# Check all baremetal nodes are available
-wait_for 5.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
+wait_for 10.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
wait_for 10.0 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"
"<host mac='${amac}' name='mas01' ip='${MAAS_IP}'/>" --live --config
}
+function reset_vms {
+ local vnodes=("$@")
+ local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}"
+
+ # reset non-infrastructure vms, wait for them to come back online
+ for node in "${vnodes[@]}"; do
+ if [[ ! "${node}" =~ (cfg01|mas01) ]]; then
+ virsh reset "${node}"
+ fi
+ done
+ for node in "${vnodes[@]}"; do
+ if [[ ! "${node}" =~ (cfg01|mas01) ]]; then
+ wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all"
+ fi
+ done
+}
+
function start_vms {
local vnodes=("$@")