2 ##############################################################################
3 # Copyright (c) 2017 Mirantis Inc., Enea AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 # Library of shell functions
13 function generate_ssh_key {
14 # shellcheck disable=SC2155
15 local mcp_ssh_key=$(basename "${SSH_KEY}")
17 if [ -n "${SUDO_USER}" ] && [ "${SUDO_USER}" != 'root' ]; then
21 if [ -f "${SSH_KEY}" ]; then
23 ssh-keygen -f "${mcp_ssh_key}" -y > "${mcp_ssh_key}.pub"
26 [ -f "${mcp_ssh_key}" ] || ssh-keygen -f "${mcp_ssh_key}" -N ''
27 sudo install -D -o "${user}" -m 0600 "${mcp_ssh_key}" "${SSH_KEY}"
30 function get_base_image {
34 mkdir -p "${image_dir}"
35 wget -P "${image_dir}" -N "${base_image}"
38 function cleanup_uefi {
39 # Clean up Ubuntu boot entry if cfg01, kvm nodes online from previous deploy
40 local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}"
41 [ ! "$(hostname)" = 'cfg01' ] || cmd_str='eval'
42 ${cmd_str} "sudo salt -C 'kvm* or cmp*' cmd.run \
43 \"which efibootmgr > /dev/null 2>&1 && \
44 efibootmgr | grep -oP '(?<=Boot)[0-9]+(?=.*ubuntu)' | \
45 xargs -I{} efibootmgr --delete-bootnum --bootnum {}; \
46 rm -rf /boot/efi/*\"" || true
49 function cleanup_vms {
50 # clean up existing nodes
51 for node in $(virsh list --name | grep -P '\w{3}\d{2}'); do
52 virsh destroy "${node}"
54 for node in $(virsh list --name --all | grep -P '\w{3}\d{2}'); do
55 virsh domblklist "${node}" | awk '/^.da/ {print $2}' | \
56 xargs --no-run-if-empty -I{} sudo rm -f {}
57 virsh undefine "${node}" --remove-all-storage --nvram
61 function prepare_vms {
62 local base_image=$1; shift
63 local image_dir=$1; shift
68 get_base_image "${base_image}" "${image_dir}"
69 # shellcheck disable=SC2016
70 envsubst '${SALT_MASTER},${CLUSTER_DOMAIN}' < \
71 user-data.template > user-data.sh
73 for node in "${vnodes[@]}"; do
74 # create/prepare images
75 ./create-config-drive.sh -k "$(basename "${SSH_KEY}").pub" -u user-data.sh \
76 -h "${node}" "${image_dir}/mcp_${node}.iso"
77 cp "${image_dir}/${base_image/*\/}" "${image_dir}/mcp_${node}.qcow2"
78 qemu-img resize "${image_dir}/mcp_${node}.qcow2" 100G
82 function create_networks {
83 local vnode_networks=("$@")
84 # create required networks, including constant "mcpcontrol"
85 # FIXME(alav): since we renamed "pxe" to "mcpcontrol", we need to make sure
86 # we delete the old "pxe" virtual network, or it would cause IP conflicts.
87 # FIXME(alav): The same applies for "fuel1" virsh network.
88 for net in "fuel1" "pxe" "mcpcontrol" "${vnode_networks[@]}"; do
89 if virsh net-info "${net}" >/dev/null 2>&1; then
90 virsh net-destroy "${net}" || true
91 virsh net-undefine "${net}"
93 # in case of custom network, host should already have the bridge in place
94 if [ -f "net_${net}.xml" ] && [ ! -d "/sys/class/net/${net}/bridge" ]; then
95 virsh net-define "net_${net}.xml"
96 virsh net-autostart "${net}"
97 virsh net-start "${net}"
102 function create_vms {
103 local image_dir=$1; shift
104 IFS='|' read -r -a vnodes <<< "$1"; shift
105 local vnode_networks=("$@")
107 # AArch64: prepare arch specific arguments
108 local virt_extra_args=""
109 if [ "$(uname -i)" = "aarch64" ]; then
110 # No Cirrus VGA on AArch64, use virtio instead
111 virt_extra_args="$virt_extra_args --video=virtio"
114 # create vms with specified options
115 for serialized_vnode_data in "${vnodes[@]}"; do
116 IFS=',' read -r -a vnode_data <<< "${serialized_vnode_data}"
118 # prepare network args
119 net_args=" --network network=mcpcontrol,model=virtio"
120 if [ "${vnode_data[0]}" = "mas01" ]; then
121 # MaaS node's 3rd interface gets connected to PXE/Admin Bridge
122 vnode_networks[2]="${vnode_networks[0]}"
124 for net in "${vnode_networks[@]:1}"; do
125 net_args="${net_args} --network bridge=${net},model=virtio"
128 # shellcheck disable=SC2086
129 virt-install --name "${vnode_data[0]}" \
130 --ram "${vnode_data[1]}" --vcpus "${vnode_data[2]}" \
131 --cpu host-passthrough --accelerate ${net_args} \
132 --disk path="${image_dir}/mcp_${vnode_data[0]}.qcow2",format=qcow2,bus=virtio,cache=none,io=native \
133 --os-type linux --os-variant none \
134 --boot hd --vnc --console pty --autostart --noreboot \
135 --disk path="${image_dir}/mcp_${vnode_data[0]}.iso",device=cdrom \
141 function update_mcpcontrol_network {
142 # set static ip address for salt master node, MaaS node
143 # shellcheck disable=SC2155
144 local cmac=$(virsh domiflist cfg01 2>&1| awk '/mcpcontrol/ {print $5; exit}')
145 # shellcheck disable=SC2155
146 local amac=$(virsh domiflist mas01 2>&1| awk '/mcpcontrol/ {print $5; exit}')
147 virsh net-update "mcpcontrol" add ip-dhcp-host \
148 "<host mac='${cmac}' name='cfg01' ip='${SALT_MASTER}'/>" --live --config
149 [ -z "${amac}" ] || virsh net-update "mcpcontrol" add ip-dhcp-host \
150 "<host mac='${amac}' name='mas01' ip='${MAAS_IP}'/>" --live --config
157 for node in "${vnodes[@]}"; do
158 virsh start "${node}"
159 sleep $((RANDOM%5+1))
163 function check_connection {
164 local total_attempts=60
168 echo '[INFO] Attempting to get into Salt master ...'
170 # wait until ssh on Salt master is available
171 # shellcheck disable=SC2034
172 for attempt in $(seq "${total_attempts}"); do
173 # shellcheck disable=SC2086
174 ssh ${SSH_OPTS} "ubuntu@${SALT_MASTER}" uptime
176 0) echo "${attempt}> Success"; break ;;
177 *) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;;
184 function parse_yaml {
191 fs="$(echo @|tr @ '\034')"
192 sed -e 's|---||g' -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
193 -e "s|^\($s\)\($w\)$s[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" |
195 indent = length($1)/2;
197 for (i in vname) {if (i > indent) {delete vname[i]}}
198 if (length($3) > 0) {
199 vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
200 printf("%s%s%s=(\"%s\")\n", "'"$prefix"'",vn, $2, $3);
206 # Execute in a subshell to prevent local variable override during recursion
208 local total_attempts=$1; shift
211 echo "[NOTE] Waiting for cmd to return success: ${cmdstr}"
212 # shellcheck disable=SC2034
213 for attempt in $(seq "${total_attempts}"); do
214 # shellcheck disable=SC2015
215 eval "${cmdstr}" && return 0 || true
216 echo -n '.'; sleep "${sleep_time}"