While at it, fix some shellcheck warnings, and s/fgrep/grep -F/g.
Change-Id: I093b7b4c196731b1ecc0c27a4111955b2e412762
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
#!/bin/bash
set -x
+# shellcheck disable=SC1090
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
+
salt -I 'nova:compute' system.reboot
-while true; do salt -I 'nova:compute' test.ping | grep -Fq 'Not connected' || break; done
+wait_for 90 "! salt -I 'nova:compute' test.ping | " \
+ "tee /dev/stderr | grep -Fq 'Not connected'"
salt -I 'nova:compute' state.sls linux.network
# switch to UCA repos since fuel-infra packages have bugs
#!/bin/bash
-# shellcheck disable=SC1090
set -x
+# shellcheck disable=SC1090
source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
# Wait for MaaS commissioning/deploy to finish, retry on failure
#!/bin/bash
set -x
+# shellcheck disable=SC1090
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
+
salt -I 'keepalived:cluster' state.sls keepalived -b 1
salt -I 'keepalived:cluster' pillar.get keepalived:cluster:instance:VIP:address
salt -I 'nova:compute' state.sls nova
salt -I 'mongodb:server' state.sls mongodb
-while true; do sleep 5; salt -C 'I@mongodb:server and *01*' cmd.run 'mongo localhost:27017/admin' && break; done
+wait_for 90 "salt -C 'I@mongodb:server and *01*' cmd.run 'mongo localhost:27017/admin'"
salt -C 'I@mongodb:server and *01*' cmd.run 'mongo localhost:27017/admin --eval "rs.initiate()"'
salt -C 'I@mongodb:server and *01*' state.sls mongodb
#!/bin/bash
-# shellcheck disable=SC1090
set -x
+# shellcheck disable=SC1090
source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
# KVM, compute node prereqs (libvirt first), VCP deployment
salt -C 'kvm*' pkg.install bridge-utils
salt -C 'kvm*' state.apply linux.network
salt -C 'kvm*' system.reboot
-wait_for 90 "! salt 'kvm*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'"
+wait_for 90 "! salt 'kvm*' test.ping | tee /dev/stderr | grep -Fq 'Not connected'"
salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp
salt -C '* and not cfg01* and not mas01*' state.apply salt
salt -C 'kvm*' saltutil.sync_all
wait_for 10 "! salt -C 'kvm*' state.sls salt.control | " \
- "tee /dev/stderr | fgrep -q 'Not connected'"
+ "tee /dev/stderr | grep -Fq 'Not connected'"
vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | \
awk '/\s+\w+:$/ {gsub(/:$/, "*"); print $1}')
wait_for 10 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"
wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply salt"
wait_for 10 "! salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply linux,ntp | " \
- "tee /dev/stderr | fgrep -q 'Not connected'"
+ "tee /dev/stderr | grep -Fq 'Not connected'"
wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' ssh.set_auth_key ${SUDO_USER} \
$(awk 'NR==1{print $2}' "$(eval echo "~${SUDO_USER}/.ssh/authorized_keys")")"
ovs_set_manager:
cmd.run:
- name: "ovs-vsctl set-manager {{ client.ovsdb_server_iface }} {{ client.ovsdb_odl_iface }}"
- - unless: "ovs-vsctl get-manager | fgrep -x {{ client.ovsdb_odl_iface }}"
+ - unless: "ovs-vsctl get-manager | grep -Fx {{ client.ovsdb_odl_iface }}"
ovs_set_tunnel_endpoint:
cmd.run:
function check_connection {
local total_attempts=60
local sleep_time=5
- local attempt=1
set +e
echo '[INFO] Attempting to get into Salt master ...'
# wait until ssh on Salt master is available
- while ((attempt <= total_attempts)); do
+ # shellcheck disable=SC2034
+ for attempt in $(seq "${total_attempts}"); do
# shellcheck disable=SC2086
ssh ${SSH_OPTS} "ubuntu@${SALT_MASTER}" uptime
case $? in
*) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;;
esac
sleep $sleep_time
- ((attempt+=1))
done
set -e
}
salt-call state.apply salt
salt '*' saltutil.sync_all
- salt '*' state.apply salt | fgrep -q 'No response' && salt '*' state.apply salt
+ salt '*' state.apply salt | grep -Fq 'No response' && salt '*' state.apply salt
salt -C 'I@salt:master' state.sls linux
salt -C '* and not cfg01*' state.sls linux