5 local total_attempts=$1; shift
8 echo "[NOTE] Waiting for cmd to return success: ${cmdstr}"
9 for attempt in $(seq "${total_attempts}"); do
10 eval "${cmdstr}" && break || true
11 echo -n '.'; sleep "${sleep_time}"
15 # Wait for MaaS commissioning/deploy to finish, retry on failure
16 function maas_fixup() {
17 local statuscmd="salt 'mas01*' --out yaml state.apply maas.machines.status"
18 wait_for 180 "${statuscmd} | tee /dev/stderr | " \
19 "grep -Eq '((Deployed|Ready): 5|status:Failed|status:Allocated)'"
20 local statusout=$(eval "${statuscmd}")
22 local fcnodes=$(echo "${statusout}" | \
23 grep -Po '(?<=system_id:)(.*)(?=,status:Failed commissioning)')
24 for node_system_id in ${fcnodes}; do
25 salt -C 'mas01*' state.apply maas.machines.delete \
26 pillar="{'system_id': '${node_system_id}'}"
28 if [ -n "${fcnodes}" ]; then
29 salt -C 'mas01*' state.apply maas.machines
33 local fdnodes=$(echo "${statusout}" | \
34 grep -Po '(?<=system_id:)(.*)(?=,status:(Failed deployment|Allocated))')
35 for node_system_id in ${fdnodes}; do
36 salt -C 'mas01*' state.apply maas.machines.mark_broken_fixed \
37 pillar="{'system_id': '${node_system_id}'}"
39 if [ -n "${fdnodes}" ]; then
40 salt -C 'mas01*' state.apply maas.machines.deploy
47 # MaaS rack/region controller, node commissioning
48 salt -C 'mas01*' cmd.run "add-apt-repository ppa:maas/stable"
50 salt -C 'mas01*' state.apply linux,salt,openssh,ntp
51 salt -C 'mas01*' state.apply linux.network.interface
52 salt -C 'mas01*' state.apply maas.pxe_nat
53 salt -C 'mas01*' state.apply maas.cluster
54 salt -C 'cfg01*' cmd.run \
55 "route add -net 192.168.11.0/24 gw ${MAAS_IP:-192.168.10.3}"
57 wait_for 10 "salt -C 'mas01*' state.apply maas.region"
59 salt -C 'mas01*' state.apply maas.machines
60 wait_for 10 maas_fixup
62 # cleanup outdated salt keys
63 salt-key --out yaml | awk '!/^(minions|- cfg01|- mas01)/ {print $2}' | \
64 xargs -I{} salt-key -yd {}
66 # MaaS node deployment
67 salt -C 'mas01*' state.apply maas.machines.deploy
68 wait_for 10 maas_fixup
70 salt -C 'mas01*' pillar.item\
71 maas:region:admin:username \
72 maas:region:admin:password
74 # KVM, compute node prereqs (libvirt first), VCP deployment
75 salt -C '* and not cfg01* and not mas01*' saltutil.sync_all
77 salt -C 'kvm*' pkg.install bridge-utils
78 salt -C 'kvm*' state.apply linux.network
79 salt -C 'kvm*' system.reboot
80 wait_for 90 "! salt '*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'"
82 salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp
84 salt -C 'kvm*' state.sls libvirt
86 salt -C '* and not cfg01* and not mas01*' state.apply salt
87 salt -C 'kvm*' saltutil.sync_all
88 salt -C 'kvm*' state.sls salt.control
90 vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | \
91 awk '/\s+\w+:$/ {gsub(/:$/, "*"); print $1}')
93 # Check all vcp nodes are available
95 while [ $rc -ne 0 ]; do
97 for node in $vcp_nodes; do
98 salt "$node" test.ping 2>/dev/null || { rc=$?; break; };
103 wait_for 10 "salt -C '* and not cfg01* and not mas01*' ssh.set_auth_key ${SUDO_USER} \
104 $(awk 'NR==1{print $2}' $(eval echo ~${SUDO_USER}/.ssh/authorized_keys))"
106 wait_for 10 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"
107 wait_for 10 "salt -C '* and not cfg01* and not mas01*' state.apply salt"
108 wait_for 10 "salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp"