-#!/bin/bash -ex
+#!/bin/bash -e
# shellcheck disable=SC2034,SC2154,SC1090,SC1091
##############################################################################
# Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others.
# BEGIN of Exit handlers
#
do_exit () {
+ local RC=$?
clean
- echo "Exiting ..."
+ cleanup_mounts
+ if [ ${RC} -eq 0 ]; then
+ notify "\n[OK] MCP: Openstack installation finished succesfully!\n\n" 2
+ else
+ notify "\n[ERROR] MCP: Openstack installation threw a fatal error!\n\n"
+ fi
}
#
# End of Exit handlers
$(notify "USAGE:" 2)
$(basename "$0") -b base-uri -l lab-name -p pod-name -s deploy-scenario \\
[-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] \\
- [-S storage-dir] [-L /path/to/log/file.tar.gz] [-f] [-F] [-e] [-d]
+ [-S storage-dir] [-L /path/to/log/file.tar.gz] \\
+ [-f[f]] [-F] [-e | -E[E]] [-d] [-D]
$(notify "OPTIONS:" 2)
-b Base-uri for the stack-configuration structure
-B Bridge(s): 1st usage = PXE, 2nd = Mgmt, 3rd = Internal, 4th = Public
-d Dry-run
+ -D Debug logging
-e Do not launch environment deployment
- -f Deploy on existing Salt master
+ -E Remove existing VCP VMs (use twice to redeploy baremetal nodes)
+ -f Deploy on existing Salt master (use twice to also skip config sync)
-F Do only create a Salt master
-h Print this message and exit
-l Lab-name
while "mcpcontrol" is used to provision the infrastructure VMs only.
The default is 'pxebr'.
-d Dry-run - Produce deploy config files, but do not execute deploy
+-D Debug logging - Enable extra logging in sh deploy scripts (set -x)
-e Do not launch environment deployment
--f Deploy on existing Salt master
+-E Remove existing VCP VMs. It will destroy and undefine all VCP VMs
+ currently defined on cluster KVM nodes. If specified twice (e.g. -E -E),
+ baremetal nodes (VCP too, implicitly) will be removed, then reprovisioned.
+ Only applicable for baremetal deploys.
+-f Deploy on existing Salt master. It will skip infrastructure VM creation,
+ but it will still sync reclass configuration from current repo to Salt
+ Master node. If specified twice (e.g. -f -f), config sync will also be
+ skipped.
-F Do only create a Salt master
-h Print this message and exit
-L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
##############################################################################
# BEGIN of variables to customize
#
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd)
STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd)
USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0}
NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
+ERASE_ENV=${ERASE_ENV:-0}
source "${DEPLOY_DIR}/globals.sh"
+source "${DEPLOY_DIR}/lib.sh"
#
# END of variables to customize
#
set +x
OPNFV_BRIDGE_IDX=0
-while getopts "b:B:dfFl:L:p:s:S:he" OPTION
+while getopts "b:B:dDfEFl:L:p:s:S:he" OPTION
do
case $OPTION in
b)
if [ -n "${bridge}" ]; then
OPNFV_BRIDGES[${OPNFV_BRIDGE_IDX}]="${bridge}"
fi
- OPNFV_BRIDGE_IDX=$((OPNFV_BRIDGE_IDX + 1))
+ ((OPNFV_BRIDGE_IDX+=1))
done
IFS=${OIFS}
;;
d)
DRY_RUN=1
;;
+ D)
+ CI_DEBUG=1
+ ;;
f)
- USE_EXISTING_INFRA=1
+ ((USE_EXISTING_INFRA+=1))
;;
F)
INFRA_CREATION_ONLY=1
e)
NO_DEPLOY_ENVIRONMENT=1
;;
+ E)
+ ((ERASE_ENV+=1))
+ ;;
l)
TARGET_LAB=${OPTARG}
;;
exit 1
fi
-if ! virsh list >/dev/null 2>&1; then
- notify "[ERROR] This script requires hypervisor access\n" 1>&2
- exit 1
-fi
-
# Validate mandatory arguments are set
if [ -z "${TARGET_LAB}" ] || [ -z "${TARGET_POD}" ] || \
[ -z "${DEPLOY_SCENARIO}" ]; then
exit 1
fi
-set -x
+[[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
# Enable the automatic exit trap
trap do_exit SIGINT SIGTERM EXIT
# Install required packages
[ -n "$(command -v apt-get)" ] && sudo apt-get install -y \
- git make rsync mkisofs curl virtinst cpu-checker qemu-kvm
+ git make rsync mkisofs curl virtinst cpu-checker qemu-kvm uuid-runtime \
+ libvirt-bin
[ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \
- git make rsync genisoimage curl virt-install qemu-kvm
+ git make rsync genisoimage curl virt-install qemu-kvm util-linux \
+ libvirt
+
+# For baremetal, python is indirectly required for PDF parsing
+if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
+ [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \
+ python python-ipaddress python-jinja2 python-yaml
+ [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \
+ python python-ipaddress python-jinja2 python-yaml
+fi
-if [ "$(uname -i)" = "aarch64" ]; then
- [ -n "$(command -v apt-get)" ] && sudo apt-get install -y vgabios && \
- sudo ln -sf /usr/share/vgabios/vgabios.bin /usr/share/qemu/vgabios-stdvga.bin
- [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken vgabios
+# AArch64 VMs use AAVMF (guest UEFI)
+if [ "$(uname -m)" = 'aarch64' ]; then
+ [ -n "$(command -v apt-get)" ] && sudo apt-get install -y qemu-efi
+ [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken AAVMF
+fi
+
+if ! virsh list >/dev/null 2>&1; then
+ notify "[ERROR] This script requires hypervisor access\n" 1>&2
+ exit 1
fi
# Clone git submodules and apply our patches
# Check scenario file existence
SCENARIO_DIR="../config/scenario"
if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
- notify "[WARN] ${DEPLOY_SCENARIO}.yaml not found! \
- Setting simplest scenario (os-nosdn-nofeature-noha)\n" 3
+ notify "[WARN] ${DEPLOY_SCENARIO}.yaml not found!\n" 3
+ notify "[WARN] Setting simplest scenario (os-nosdn-nofeature-noha)\n" 3
DEPLOY_SCENARIO='os-nosdn-nofeature-noha'
if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
notify "[ERROR] Scenario definition file is missing!\n" 1>&2
# Get required infra deployment data
set +x
-source lib.sh
eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")"
eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml")"
eval "$(parse_yaml "${LOCAL_PDF_RECLASS}")"
-set -x
+[[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
export CLUSTER_DOMAIN=${cluster_domain}
-declare -A virtual_nodes_ram virtual_nodes_vcpus
+# Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
for node in "${virtual_nodes[@]}"; do
virtual_custom_ram="virtual_${node}_ram"
virtual_custom_vcpus="virtual_${node}_vcpus"
- virtual_nodes_ram[$node]=${!virtual_custom_ram:-$virtual_default_ram}
- virtual_nodes_vcpus[$node]=${!virtual_custom_vcpus:-$virtual_default_vcpus}
+ virtual_nodes_data+="${node},"
+ virtual_nodes_data+="${!virtual_custom_ram:-$virtual_default_ram},"
+ virtual_nodes_data+="${!virtual_custom_vcpus:-$virtual_default_vcpus}|"
done
+virtual_nodes_data=${virtual_nodes_data%|}
+
+# Serialize repos, packages to (pre-)install/remove for:
+# - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01)
+# - virtualized control plane VM base image (only when VCP is used)
+base_image_flavors=common
+if [[ "${cluster_states[*]}" =~ virtual_control ]]; then
+ base_image_flavors+=" control"
+fi
+for sc in ${base_image_flavors}; do
+ for va in apt_keys apt_repos pkg_install pkg_remove; do
+ key=virtual_${sc}_${va}
+ eval "${key}=\${${key}[@]// /|}"
+ eval "${key}=\${${key}// /,}"
+ virtual_repos_pkgs+="${!key}^"
+ done
+done
+virtual_repos_pkgs=${virtual_repos_pkgs%^}
# Expand reclass and virsh network templates
-for tp in "${RECLASS_CLUSTER_DIR}/all-mcp-ocata-common/opnfv/"*.template \
+for tp in "${RECLASS_CLUSTER_DIR}/all-mcp-arch-common/opnfv/"*.template \
net_*.template; do
eval "cat <<-EOF
$(<"${tp}")
EOF" 2> /dev/null > "${tp%.template}"
done
+# Convert Pharos-compatible PDF to reclass network definitions
+if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
+ find "${RECLASS_CLUSTER_DIR}/${CLUSTER_DOMAIN%.local}" \
+ "${RECLASS_CLUSTER_DIR}/${DEPLOY_TYPE}-mcp-ocata-common" \
+ -name '*.j2' | while read -r tp
+ do
+ if ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
+ -j "${tp}" > "${tp%.j2}"; then
+ notify "[ERROR] Could not convert PDF to reclass network defs!\n"
+ exit 1
+ fi
+ done
+fi
+
# Map PDF networks 'admin', 'mgmt', 'private' and 'public' to bridge names
BR_NAMES=('admin' 'mgmt' 'private' 'public')
BR_NETS=( \
if [ ${DRY_RUN} -eq 1 ]; then
notify "[NOTE] Dry run, skipping all deployment tasks\n" 2 1>&2
exit 0
-elif [ ${USE_EXISTING_INFRA} -eq 1 ]; then
+elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then
notify "[NOTE] Use existing infra\n" 2 1>&2
check_connection
else
generate_ssh_key
- prepare_vms virtual_nodes "${base_image}" "${STORAGE_DIR}"
- create_networks OPNFV_BRIDGES
- create_vms virtual_nodes virtual_nodes_ram virtual_nodes_vcpus \
- OPNFV_BRIDGES "${STORAGE_DIR}"
+ prepare_vms "${base_image}" "${STORAGE_DIR}" "${virtual_repos_pkgs}" \
+ "${virtual_nodes[@]}"
+ create_networks "${OPNFV_BRIDGES[@]}"
+ create_vms "${STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}"
update_mcpcontrol_network
- start_vms virtual_nodes
+ start_vms "${virtual_nodes[@]}"
check_connection
- ./salt.sh "${LOCAL_PDF_RECLASS}"
+fi
+if [ ${USE_EXISTING_INFRA} -lt 2 ]; then
+ wait_for 5 "./salt.sh ${LOCAL_PDF_RECLASS}"
fi
# Openstack cluster setup
for state in "${cluster_states[@]}"; do
notify "[STATE] Applying state: ${state}\n" 2
# shellcheck disable=SC2086,2029
- ssh ${SSH_OPTS} "${SSH_SALT}" \
- sudo "/root/fuel/mcp/config/states/${state} || true"
+ wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \
+ CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \
+ /root/fuel/mcp/config/states/${state}"
done
fi
popd > /dev/null
-notify "\n[DONE] MCP: Openstack installation finished succesfully!\n\n" 2
-
#
# END of main
##############################################################################