X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=ci%2Fdeploy.sh;h=1b4dd95c8f97a671d8d13f2c4f094b5b3743d0f9;hb=2050647bd70043be2eb90a45b7173a25570b8c8d;hp=a39d4946b40e57bcafee5b4c55785b7288acc696;hpb=4023a639de3873bbae3e2b112f652791c5962ffa;p=fuel.git diff --git a/ci/deploy.sh b/ci/deploy.sh index a39d4946b..1b4dd95c8 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -1,6 +1,5 @@ -#!/bin/bash -# shellcheck disable=SC2034,SC2154,SC1091 -set -ex +#!/bin/bash -e +# shellcheck disable=SC2034,SC2154,SC1090,SC1091 ############################################################################## # Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others. # jonas.bjurel@ericsson.com @@ -14,8 +13,14 @@ set -ex # BEGIN of Exit handlers # do_exit () { + local RC=$? clean - echo "Exiting ..." + cleanup_mounts + if [ ${RC} -eq 0 ]; then + notify "\n[OK] MCP: Openstack installation finished succesfully!\n\n" 2 + else + notify "\n[ERROR] MCP: Openstack installation threw a fatal error!\n\n" + fi } # # End of Exit handlers @@ -32,25 +37,25 @@ $(notify "$(basename "$0"): Deploy the Fuel@OPNFV MCP stack" 3) $(notify "USAGE:" 2) $(basename "$0") -b base-uri -l lab-name -p pod-name -s deploy-scenario \\ - [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] + [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] \\ + [-S storage-dir] [-L /path/to/log/file.tar.gz] \\ + [-f[f]] [-F] [-e | -E[E]] [-d] [-D] $(notify "OPTIONS:" 2) -b Base-uri for the stack-configuration structure -B Bridge(s): 1st usage = PXE, 2nd = Mgmt, 3rd = Internal, 4th = Public + -d Dry-run + -D Debug logging + -e Do not launch environment deployment + -E Remove existing VCP VMs (use twice to redeploy baremetal nodes) + -f Deploy on existing Salt master (use twice to also skip config sync) + -F Do only create a Salt master -h Print this message and exit -l Lab-name -p Pod-name -s Deploy-scenario short-name - -$(notify "DISABLED OPTIONS (not yet supported with MCP):" 3) - -d (disabled) Dry-run - -e (disabled) Do not launch environment deployment - -f (disabled) Deploy on existing Salt master - -F (disabled) Do only create a Salt master - -i (disabled) iso url - -L (disabled) Deployment log path and file name - -S (disabled) Storage dir for VM images - -T (disabled) Timeout, in minutes, for the deploy. + -S Storage dir for VM images + -L Deployment log path and file name $(notify "Description:" 2) Deploys the Fuel@OPNFV stack on the indicated lab resource. @@ -60,41 +65,50 @@ It depends on the OPNFV official configuration directory/file structure and provides a fairly simple mechanism to execute a deployment. $(notify "Input parameters to the build script are:" 2) --b Base URI to the configuration directory (needs to be provided in a URI - style, it can be a local resource: file:// or a remote resource http(s)://) +-b Base URI to the configuration directory (needs to be provided in URI style, + it can be a local resource: file:// or a remote resource http(s)://). + A POD Descriptor File (PDF) should be available at: + /labs//.yaml + The default is './mcp/config'. -B Bridges to be used by deploy script. It can be specified several times, or as a comma separated list of bridges, or both: -B br1 -B br2,br3 First occurence sets PXE Brige, next Mgmt, then Internal and Public. For an empty value, the deploy script will use virsh to create the default expected network (e.g. -B pxe,,,public will use existing "pxe" and "public" bridges, respectively create "mgmt" and "internal"). - The default is pxebr. + Note that a virtual network "mcpcontrol" is always created. For virtual + deploys, "mcpcontrol" is also used for PXE, leaving the PXE bridge unused. + For baremetal deploys, PXE bridge is used for baremetal node provisioning, + while "mcpcontrol" is used to provision the infrastructure VMs only. + The default is 'pxebr'. +-d Dry-run - Produce deploy config files, but do not execute deploy +-D Debug logging - Enable extra logging in sh deploy scripts (set -x) +-e Do not launch environment deployment +-E Remove existing VCP VMs. It will destroy and undefine all VCP VMs + currently defined on cluster KVM nodes. If specified twice (e.g. -E -E), + baremetal nodes (VCP too, implicitly) will be removed, then reprovisioned. + Only applicable for baremetal deploys. +-f Deploy on existing Salt master. It will skip infrastructure VM creation, + but it will still sync reclass configuration from current repo to Salt + Master node. If specified twice (e.g. -f -f), config sync will also be + skipped. +-F Do only create a Salt master -h Print this message and exit +-L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz -l Lab name as defined in the configuration directory, e.g. lf --p POD name as defined in the configuration directory, e.g. pod-1 +-p POD name as defined in the configuration directory, e.g. pod2 -s Deployment-scenario, this points to a short deployment scenario name, which - has to be defined in config directory (e.g. os-odl_l2-nofeature-noha). - -$(notify "Disabled input parameters (not yet supported with MCP):" 3) --d (disabled) Dry-run - Produce deploy config files, but do not execute deploy --f (disabled) Deploy on existing Salt master --e (disabled) Do not launch environment deployment --F (disabled) Do only create a Salt master --L (disabled) Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz --S (disabled) Storage dir for VM images, default is fuel/deploy/images --T (disabled) Timeout, in minutes, for the deploy. - It defaults to using the DEPLOY_TIMEOUT environment variable when defined. --i (disabled) .iso image to be deployed (needs to be provided in a URI - style, it can be a local resource: file:// or a remote resource http(s)://) + has to be defined in config directory (e.g. os-odl-nofeature-ha). +-S Storage dir for VM images, default is mcp/deploy/images $(notify "[NOTE] sudo & virsh priviledges are needed for this script to run" 3) Example: $(notify "sudo $(basename "$0") \\ - -b file:///home/jenkins/lab-config \\ - -l lf -p pod1 \\ - -s os-odl_l2-nofeature-noha" 2) + -b file:///home/jenkins/securedlab \\ + -l lf -p pod2 \\ + -s os-odl-nofeature-ha" 2) EOF } @@ -127,29 +141,26 @@ clean() { ############################################################################## # BEGIN of variables to customize # -SCRIPT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")") -DEPLOY_DIR=$(cd "${SCRIPT_PATH}/../mcp/scripts"; pwd) +CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x +REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..") +DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd) +STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd) +RECLASS_CLUSTER_DIR=$(cd "${REPO_ROOT_PATH}/mcp/reclass/classes/cluster"; pwd) DEPLOY_TYPE='baremetal' -OPNFV_BRIDGES=('pxe' 'mgmt' 'internal' 'public') +OPNFV_BRIDGES=('pxebr' 'mgmt' 'internal' 'public') URI_REGEXP='(file|https?|ftp)://.*' +BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/config" -export SSH_KEY=${SSH_KEY:-mcp.rsa} -export SALT_MASTER=${SALT_MASTER_IP:-192.168.10.100} -export SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${SSH_KEY}" +# Customize deploy workflow +DRY_RUN=${DRY_RUN:-0} +USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0} +INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0} +NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0} +ERASE_ENV=${ERASE_ENV:-0} + +source "${DEPLOY_DIR}/globals.sh" +source "${DEPLOY_DIR}/lib.sh" -# Variables below are disabled for now, to be re-introduced or removed later -set +x -USE_EXISTING_FUEL='' -FUEL_CREATION_ONLY='' -NO_DEPLOY_ENVIRONMENT='' -STORAGE_DIR='' -DRY_RUN=0 -if ! [ -z "${DEPLOY_TIMEOUT}" ]; then - DEPLOY_TIMEOUT="-dt ${DEPLOY_TIMEOUT}" -else - DEPLOY_TIMEOUT="" -fi -set -x # # END of variables to customize ############################################################################## @@ -159,7 +170,7 @@ set -x # set +x OPNFV_BRIDGE_IDX=0 -while getopts "b:B:dfFl:L:p:s:S:T:i:he" OPTION +while getopts "b:B:dDfEFl:L:p:s:S:he" OPTION do case $OPTION in b) @@ -178,32 +189,33 @@ do if [ -n "${bridge}" ]; then OPNFV_BRIDGES[${OPNFV_BRIDGE_IDX}]="${bridge}" fi - OPNFV_BRIDGE_IDX=$[OPNFV_BRIDGE_IDX + 1] + ((OPNFV_BRIDGE_IDX+=1)) done IFS=${OIFS} ;; d) - notify '' 3 "${OPTION}"; continue DRY_RUN=1 ;; + D) + CI_DEBUG=1 + ;; f) - notify '' 3 "${OPTION}"; continue - USE_EXISTING_FUEL='-nf' + ((USE_EXISTING_INFRA+=1)) ;; F) - notify '' 3 "${OPTION}"; continue - FUEL_CREATION_ONLY='-fo' + INFRA_CREATION_ONLY=1 ;; e) - notify '' 3 "${OPTION}"; continue - NO_DEPLOY_ENVIRONMENT='-nde' + NO_DEPLOY_ENVIRONMENT=1 + ;; + E) + ((ERASE_ENV+=1)) ;; l) TARGET_LAB=${OPTARG} ;; L) - notify '' 3 "${OPTION}"; continue - DEPLOY_LOG="-log ${OPTARG}" + DEPLOY_LOG="${OPTARG}" ;; p) TARGET_POD=${OPTARG} @@ -215,22 +227,8 @@ do DEPLOY_SCENARIO=${OPTARG} ;; S) - notify '' 3 "${OPTION}"; continue if [[ ${OPTARG} ]]; then - STORAGE_DIR="-s ${OPTARG}" - fi - ;; - T) - notify '' 3 "${OPTION}"; continue - DEPLOY_TIMEOUT="-dt ${OPTARG}" - ;; - i) - notify '' 3 "${OPTION}"; continue - ISO=${OPTARG} - if [[ ! $ISO =~ ${URI_REGEXP} ]]; then - notify "[ERROR] -i $ISO - invalid URI\n" - usage - exit 1 + STORAGE_DIR="${OPTARG}" fi ;; h) @@ -245,17 +243,11 @@ do done if [[ "$(sudo whoami)" != 'root' ]]; then - notify "This script requires sudo rights\n" 1>&2 - exit 1 -fi - -if ! virsh list >/dev/null 2>&1; then - notify "This script requires hypervisor access\n" 1>&2 + notify "[ERROR] This script requires sudo rights\n" 1>&2 exit 1 fi # Validate mandatory arguments are set -# FIXME(armband): Bring back support for BASE_CONFIG_URI if [ -z "${TARGET_LAB}" ] || [ -z "${TARGET_POD}" ] || \ [ -z "${DEPLOY_SCENARIO}" ]; then notify "[ERROR] At least one of the mandatory args is missing!\n" 1>&2 @@ -263,12 +255,12 @@ if [ -z "${TARGET_LAB}" ] || [ -z "${TARGET_POD}" ] || \ exit 1 fi -set -x +[[ "${CI_DEBUG}" =~ (false|0) ]] || set -x # Enable the automatic exit trap trap do_exit SIGINT SIGTERM EXIT -# Set no restrictive umask so that Jenkins can removeeee any residuals +# Set no restrictive umask so that Jenkins can remove any residuals umask 0000 clean @@ -279,56 +271,191 @@ pushd "${DEPLOY_DIR}" > /dev/null # Install required packages [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \ - git make rsync mkisofs curl virtinst cpu-checker qemu-kvm -[ -n "$(command -v yum)" ] && sudo yum install -y \ - git make rsync genisoimage curl virt-install qemu-kvm - -if [ "$(uname -i)" = "aarch64" ]; then - [ -n "$(command -v apt-get)" ] && sudo apt-get install -y vgabios && \ - sudo ln -sf /usr/share/vgabios/vgabios.bin /usr/share/qemu/vgabios-stdvga.bin - [ -n "$(command -v yum)" ] && sudo yum install -y vgabios + git make rsync mkisofs curl virtinst cpu-checker qemu-kvm uuid-runtime \ + libvirt-bin cloud-guest-utils e2fsprogs kpartx +[ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \ + git make rsync genisoimage curl virt-install qemu-kvm util-linux \ + libvirt cloud-utils-growpart e2fsprogs kpartx + +# For baremetal, python is indirectly required for PDF parsing +if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then + [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \ + python python-ipaddress python-jinja2 python-yaml + [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \ + python python-ipaddress python-jinja2 python-yaml +fi + +# AArch64 VMs use AAVMF (guest UEFI) +if [ "$(uname -m)" = 'aarch64' ]; then + [ -n "$(command -v apt-get)" ] && sudo apt-get install -y qemu-efi + [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken AAVMF +fi + +if ! virsh list >/dev/null 2>&1; then + notify "[ERROR] This script requires hypervisor access\n" 1>&2 + exit 1 +fi + +# Clone git submodules and apply our patches +make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import + +# Convert Pharos-compatible POD Descriptor File (PDF) to reclass model input +PHAROS_GEN_CONFIG_SCRIPT="./pharos/config/utils/generate_config.py" +PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2" +BASE_CONFIG_PDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}.yaml" +BASE_CONFIG_IDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/idf-${TARGET_POD}.yaml" +LOCAL_PDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_PDF}")" +LOCAL_IDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_IDF}")" +LOCAL_PDF_RECLASS="${STORAGE_DIR}/pod_config.yml" +if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then + if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then + notify "[ERROR] Could not retrieve PDF (Pod Descriptor File)!\n" 1>&2 + exit 1 + else + notify "[WARN] Could not retrieve PDF (Pod Descriptor File)!\n" 3 + fi +elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then + notify "[WARN] POD has no IDF (Installer Descriptor File)!\n" 3 +elif ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \ + -j "${PHAROS_INSTALLER_ADAPTER}" > "${LOCAL_PDF_RECLASS}"; then + notify "[ERROR] Could not convert PDF to reclass model input!\n" 1>&2 + exit 1 fi # Check scenario file existence -if [[ ! -f ../config/scenario/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml ]]; then - notify "[WARN] ${DEPLOY_SCENARIO}.yaml not found! \ - Setting simplest scenario (os-nosdn-nofeature-noha)\n" 3 +SCENARIO_DIR="../config/scenario" +if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then + notify "[WARN] ${DEPLOY_SCENARIO}.yaml not found!\n" 3 + notify "[WARN] Setting simplest scenario (os-nosdn-nofeature-noha)\n" 3 DEPLOY_SCENARIO='os-nosdn-nofeature-noha' + if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then + notify "[ERROR] Scenario definition file is missing!\n" 1>&2 + exit 1 + fi +fi + +# Check defaults file existence +if [ ! -f "${SCENARIO_DIR}/defaults-$(uname -i).yaml" ]; then + notify "[ERROR] Scenario defaults file is missing!\n" 1>&2 + exit 1 fi # Get required infra deployment data -source lib.sh -eval "$(parse_yaml "../config/scenario/${DEPLOY_TYPE}/defaults.yaml")" -eval "$(parse_yaml "../config/scenario/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml")" +set +x +eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")" +eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml")" +eval "$(parse_yaml "${LOCAL_PDF_RECLASS}")" +[[ "${CI_DEBUG}" =~ (false|0) ]] || set -x export CLUSTER_DOMAIN=${cluster_domain} -declare -A virtual_nodes_ram virtual_nodes_vcpus +# Serialize vnode data as ',,|,,[...]' for node in "${virtual_nodes[@]}"; do virtual_custom_ram="virtual_${node}_ram" virtual_custom_vcpus="virtual_${node}_vcpus" - virtual_nodes_ram[$node]=${!virtual_custom_ram:-$virtual_default_ram} - virtual_nodes_vcpus[$node]=${!virtual_custom_vcpus:-$virtual_default_vcpus} + virtual_nodes_data+="${node}," + virtual_nodes_data+="${!virtual_custom_ram:-$virtual_default_ram}," + virtual_nodes_data+="${!virtual_custom_vcpus:-$virtual_default_vcpus}|" +done +virtual_nodes_data=${virtual_nodes_data%|} + +# Serialize repos, packages to (pre-)install/remove for: +# - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01) +# - virtualized control plane VM base image (only when VCP is used) +base_image_flavors=common +if [[ "${cluster_states[*]}" =~ virtual_control ]]; then + base_image_flavors+=" control" +fi +for sc in ${base_image_flavors}; do + for va in apt_keys apt_repos pkg_install pkg_remove; do + key=virtual_${sc}_${va} + eval "${key}=\${${key}[@]// /|}" + eval "${key}=\${${key}// /,}" + virtual_repos_pkgs+="${!key}^" + done +done +virtual_repos_pkgs=${virtual_repos_pkgs%^} + +# Expand reclass and virsh network templates +for tp in "${RECLASS_CLUSTER_DIR}/all-mcp-arch-common/opnfv/"*.template \ + net_*.template; do + eval "cat <<-EOF + $(<"${tp}") + EOF" 2> /dev/null > "${tp%.template}" done -# Infra setup -generate_ssh_key -prepare_vms virtual_nodes "${base_image}" -create_networks OPNFV_BRIDGES -create_vms virtual_nodes virtual_nodes_ram virtual_nodes_vcpus OPNFV_BRIDGES -update_pxe_network OPNFV_BRIDGES -start_vms virtual_nodes -check_connection +# Convert Pharos-compatible PDF to reclass network definitions +if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then + find "${RECLASS_CLUSTER_DIR}" -name '*.j2' | while read -r tp + do + if ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \ + -j "${tp}" > "${tp%.j2}"; then + notify "[ERROR] Could not convert PDF to reclass network defs!\n" + exit 1 + fi + done +fi -./salt.sh +# Map PDF networks 'admin', 'mgmt', 'private' and 'public' to bridge names +BR_NAMES=('admin' 'mgmt' 'private' 'public') +BR_NETS=( \ + "${parameters__param_opnfv_maas_pxe_address}" \ + "${parameters__param_opnfv_infra_config_address}" \ + "${parameters__param_opnfv_openstack_compute_node01_tenant_address}" \ + "${parameters__param_opnfv_openstack_compute_node01_external_address}" \ +) +for ((i = 0; i < ${#BR_NETS[@]}; i++)); do + br_jump=$(eval echo "\$parameters__param_opnfv_jump_bridge_${BR_NAMES[i]}") + if [ -n "${br_jump}" ] && [ "${br_jump}" != 'None' ] && \ + [ -d "/sys/class/net/${br_jump}/bridge" ]; then + notify "[OK] Bridge found for '${BR_NAMES[i]}': ${br_jump}\n" 2 + OPNFV_BRIDGES[${i}]="${br_jump}" + elif [ -n "${BR_NETS[i]}" ]; then + bridge=$(ip addr | awk "/${BR_NETS[i]%.*}./ {print \$NF; exit}") + if [ -n "${bridge}" ] && [ -d "/sys/class/net/${bridge}/bridge" ]; then + notify "[OK] Bridge found for net ${BR_NETS[i]%.*}.0: ${bridge}\n" 2 + OPNFV_BRIDGES[${i}]="${bridge}" + fi + fi +done +notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}\n" 2 + +# Infra setup +if [ ${DRY_RUN} -eq 1 ]; then + notify "[NOTE] Dry run, skipping all deployment tasks\n" 2 1>&2 + exit 0 +elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then + notify "[NOTE] Use existing infra\n" 2 1>&2 + check_connection +else + generate_ssh_key + prepare_vms "${base_image}" "${STORAGE_DIR}" "${virtual_repos_pkgs}" \ + "${virtual_nodes[@]}" + create_networks "${OPNFV_BRIDGES[@]}" + create_vms "${STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}" + update_mcpcontrol_network + start_vms "${virtual_nodes[@]}" + check_connection +fi +if [ ${USE_EXISTING_INFRA} -lt 2 ]; then + wait_for 5 "./salt.sh ${LOCAL_PDF_RECLASS}" +fi # Openstack cluster setup -for state in "${cluster_states[@]}"; do - notify "STATE: ${state}\n" 2 - # shellcheck disable=SC2086,2029 - ssh ${SSH_OPTS} "ubuntu@${SALT_MASTER}" \ - sudo "/root/fuel/mcp/config/states/${state}" -done +set +x +if [ ${INFRA_CREATION_ONLY} -eq 1 ] || [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then + notify "[NOTE] Skip openstack cluster setup\n" 2 +else + for state in "${cluster_states[@]}"; do + notify "[STATE] Applying state: ${state}\n" 2 + # shellcheck disable=SC2086,2029 + wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \ + CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \ + /root/fuel/mcp/config/states/${state}" + done +fi + +./log.sh "${DEPLOY_LOG}" popd > /dev/null