From 827d8e0ea6f083f3b2082c8906a41258ed52f51a Mon Sep 17 00:00:00 2001 From: Alexandru Avadanii Date: Thu, 2 Aug 2018 02:59:12 +0200 Subject: [PATCH] [lib.sh] Reset virtual nodes after MaaS install For hybrid PODs (e.g. x86_64 jumpserver + control nodes, aarch64 baremetal compute nodes), the virtual nodes rely on MaaS DHCP to be up when the OS boots, so issue a `virsh reset` accordingly. Instead of checking for online nodes using `test.ping`, use `saltutil.sync_all` to also sync Salt state modules to the virtual nodes (usually handled by baremetal_init state in HA deploys). JIRA: FUEL-338 Change-Id: If689d057dc4438102c3a7428a97b9638e21bfdc5 Signed-off-by: Alexandru Avadanii --- ci/deploy.sh | 5 +++++ mcp/config/states/maas | 2 +- mcp/scripts/lib.sh | 17 +++++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/ci/deploy.sh b/ci/deploy.sh index 10b639e3c..40176073d 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -313,6 +313,11 @@ else wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \ CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \ /root/fuel/mcp/config/states/${state}" + if [ "${state}" = 'maas' ]; then + # For hybrid PODs (virtual + baremetal nodes), the virtual nodes + # should be reset to force a DHCP request from MaaS DHCP + reset_vms "${virtual_nodes[@]}" + fi done fi diff --git a/mcp/config/states/maas b/mcp/config/states/maas index ec2458234..f321b7160 100755 --- a/mcp/config/states/maas +++ b/mcp/config/states/maas @@ -101,6 +101,6 @@ salt -C 'mas01*' pillar.item\ maas:region:admin:password # Check all baremetal nodes are available -wait_for 5.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)" +wait_for 10.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)" wait_for 10.0 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all" diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh index c9c1bbd0a..9c12bff64 100644 --- a/mcp/scripts/lib.sh +++ b/mcp/scripts/lib.sh @@ -452,6 +452,23 @@ function update_mcpcontrol_network { "" --live --config } +function reset_vms { + local vnodes=("$@") + local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}" + + # reset non-infrastructure vms, wait for them to come back online + for node in "${vnodes[@]}"; do + if [[ ! "${node}" =~ (cfg01|mas01) ]]; then + virsh reset "${node}" + fi + done + for node in "${vnodes[@]}"; do + if [[ ! "${node}" =~ (cfg01|mas01) ]]; then + wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all" + fi + done +} + function start_vms { local vnodes=("$@") -- 2.16.6