X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=ci%2Fdeploy.sh;h=41ac60237d9add9313862f6792f8c2020e378104;hb=dae5c2c5e677e4af8e56370c10316219a539321e;hp=e2e4a1c6f1c4126a183d919cf940e17555ea4e30;hpb=feb877785544a31a18ae2f0ee2900a82e7ac4337;p=fuel.git diff --git a/ci/deploy.sh b/ci/deploy.sh index e2e4a1c6f..41ac60237 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -1,7 +1,7 @@ #!/bin/bash -e -# shellcheck disable=SC2034,SC2154,SC1090,SC1091 +# shellcheck disable=SC2034,SC2154,SC1090,SC1091,SC2155 ############################################################################## -# Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others. +# Copyright (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others. # jonas.bjurel@ericsson.com # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 @@ -14,12 +14,11 @@ # do_exit () { local RC=$? - clean - cleanup_mounts + cleanup_mounts > /dev/null 2>&1 if [ ${RC} -eq 0 ]; then - notify "\n[OK] MCP: Openstack installation finished succesfully!\n\n" 2 + notify_n "[OK] MCP: Openstack installation finished succesfully!" 2 else - notify "\n[ERROR] MCP: Openstack installation threw a fatal error!\n\n" + notify_n "[ERROR] MCP: Openstack installation threw a fatal error!" fi } # @@ -36,51 +35,47 @@ xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx $(notify "$(basename "$0"): Deploy the Fuel@OPNFV MCP stack" 3) $(notify "USAGE:" 2) - $(basename "$0") -b base-uri -l lab-name -p pod-name -s deploy-scenario \\ - [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] \\ + $(basename "$0") -l lab-name -p pod-name -s deploy-scenario \\ + [-b Lab Config Base URI] \\ [-S storage-dir] [-L /path/to/log/file.tar.gz] \\ - [-f[f]] [-F] [-e | -E[E]] [-d] [-D] + [-f] [-F] [-e | -E[E]] [-d] [-D] [-N] [-m] $(notify "OPTIONS:" 2) -b Base-uri for the stack-configuration structure - -B Bridge(s): 1st usage = PXE, 2nd = Mgmt, 3rd = Internal, 4th = Public -d Dry-run -D Debug logging -e Do not launch environment deployment -E Remove existing VCP VMs (use twice to redeploy baremetal nodes) - -f Deploy on existing Salt master (use twice to also skip config sync) - -F Do only create a Salt master + -f Deploy on existing Salt master (use twice or more to skip states) + -F Same as -e, do not launch environment deployment (legacy option) -h Print this message and exit -l Lab-name -p Pod-name + -P Skip installation of package dependencies -s Deploy-scenario short-name - -S Storage dir for VM images + -S Storage dir for VM images and other deploy artifacts -L Deployment log path and file name + -m Use single socket CPU compute nodes (only affects virtual computes) + -N Experimental: Do not virtualize control plane (novcp) -$(notify "Description:" 2) +$(notify_i "Description:" 2) Deploys the Fuel@OPNFV stack on the indicated lab resource. This script provides the Fuel@OPNFV deployment abstraction. It depends on the OPNFV official configuration directory/file structure and provides a fairly simple mechanism to execute a deployment. -$(notify "Input parameters to the build script are:" 2) +$(notify_i "Input parameters to the build script are:" 2) -b Base URI to the configuration directory (needs to be provided in URI style, it can be a local resource: file:// or a remote resource http(s)://). - A POD Descriptor File (PDF) should be available at: + A POD Descriptor File (PDF) and its Installer Descriptor File (IDF) + companion should be available at: /labs//.yaml - The default is './mcp/config'. --B Bridges to be used by deploy script. It can be specified several times, - or as a comma separated list of bridges, or both: -B br1 -B br2,br3 - First occurence sets PXE Brige, next Mgmt, then Internal and Public. - For an empty value, the deploy script will use virsh to create the default - expected network (e.g. -B pxe,,,public will use existing "pxe" and "public" - bridges, respectively create "mgmt" and "internal"). - Note that a virtual network "mcpcontrol" is always created. For virtual - deploys, "mcpcontrol" is also used for PXE, leaving the PXE bridge unused. - For baremetal deploys, PXE bridge is used for baremetal node provisioning, - while "mcpcontrol" is used to provision the infrastructure VMs only. - The default is 'pxebr'. + /labs//idf-.yaml + The default is using the git submodule tracking 'OPNFV Pharos' in + <./mcp/scripts/pharos>. + An example config is provided inside current repo in + <./mcp/config>, automatically linked as <./mcp/scripts/pharos/labs/local>. -d Dry-run - Produce deploy config files, but do not execute deploy -D Debug logging - Enable extra logging in sh deploy scripts (set -x) -e Do not launch environment deployment @@ -90,25 +85,41 @@ $(notify "Input parameters to the build script are:" 2) Only applicable for baremetal deploys. -f Deploy on existing Salt master. It will skip infrastructure VM creation, but it will still sync reclass configuration from current repo to Salt - Master node. If specified twice (e.g. -f -f), config sync will also be - skipped. --F Do only create a Salt master + Master node. + Each additional use skips one more state file. For example, -fff would + skip the first 3 state files (e.g. virtual_init, maas, baremetal_init). +-F Same as -e, do not launch environment deployment (legacy option) -h Print this message and exit -L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz -l Lab name as defined in the configuration directory, e.g. lf + For the sample configuration in <./mcp/config>, lab name is 'local'. -p POD name as defined in the configuration directory, e.g. pod2 + For the sample configuration in <./mcp/config>, POD name is 'virtual1' + for virtual deployments or 'pod1' for baremetal (based on lf-pod2). +-m Use single socket compute nodes. Instead of using default NUMA-enabled + topology for virtual compute nodes created via libvirt, configure a + single guest CPU socket. +-N Experimental: Instead of virtualizing the control plane (VCP), deploy + control plane directly on baremetal nodes +-P Skip installing dependency distro packages on current host + This flag should only be used if you have kept back older packages that + would be upgraded and that is undesirable on the current system. + Note that without the required packages, deploy will fail. -s Deployment-scenario, this points to a short deployment scenario name, which has to be defined in config directory (e.g. os-odl-nofeature-ha). --S Storage dir for VM images, default is mcp/deploy/images +-S Storage dir for VM images, default is /var/lib/opnfv/tmpdir + It is recommended to store the deploy artifacts on a fast disk, outside of + the current git repository (so clean operations won't erase it). -$(notify "[NOTE] sudo & virsh priviledges are needed for this script to run" 3) +$(notify_i "[NOTE] sudo & virsh priviledges are needed for this script to run" 3) Example: -$(notify "sudo $(basename "$0") \\ +$(notify_i "sudo $(basename "$0") \\ -b file:///home/jenkins/securedlab \\ -l lf -p pod2 \\ - -s os-odl-nofeature-ha" 2) + -s os-odl-nofeature-ha \\ + -S /home/jenkins/tmpdir" 2) EOF } @@ -116,50 +127,29 @@ EOF # END of usage description ############################################################################## -############################################################################## -# BEGIN of colored notification wrapper -# -notify() { - tput setaf "${2:-1}" || true - echo -en "${1:-"[WARN] Unsupported opt arg: $3\\n"}" - tput sgr0 -} -# -# END of colored notification wrapper -############################################################################## - -############################################################################## -# BEGIN of deployment clean-up -# -clean() { - echo "Cleaning up deploy tmp directories" -} -# -# END of deployment clean-up -############################################################################## - ############################################################################## # BEGIN of variables to customize # CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x -REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..") -DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd) -STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd) -RECLASS_CLUSTER_DIR=$(cd "${REPO_ROOT_PATH}/mcp/reclass/classes/cluster"; pwd) -DEPLOY_TYPE='baremetal' -OPNFV_BRIDGES=('pxebr' 'mgmt' 'internal' 'public') +MCP_REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..") +DEPLOY_DIR=$(cd "${MCP_REPO_ROOT_PATH}/mcp/scripts"; pwd) +MCP_STORAGE_DIR='/var/lib/opnfv/tmpdir' URI_REGEXP='(file|https?|ftp)://.*' -BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/config" +BASE_CONFIG_URI="file://${MCP_REPO_ROOT_PATH}/mcp/scripts/pharos" # Customize deploy workflow DRY_RUN=${DRY_RUN:-0} +USE_EXISTING_PKGS=${USE_EXISTING_PKGS:-0} USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0} -INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0} NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0} ERASE_ENV=${ERASE_ENV:-0} +MCP_VCP=${MCP_VCP:-1} +MCP_DOCKER_TAG=${MCP_DOCKER_TAG:-latest} +MCP_CMP_SS=${MCP_CMP_SS:-0} source "${DEPLOY_DIR}/globals.sh" source "${DEPLOY_DIR}/lib.sh" +source "${DEPLOY_DIR}/lib_template.sh" # # END of variables to customize @@ -169,30 +159,17 @@ source "${DEPLOY_DIR}/lib.sh" # BEGIN of main # set +x -OPNFV_BRIDGE_IDX=0 -while getopts "b:B:dDfEFl:L:p:s:S:he" OPTION +while getopts "b:dDfEFl:L:Np:Ps:S:he" OPTION do case $OPTION in b) BASE_CONFIG_URI=${OPTARG} if [[ ! $BASE_CONFIG_URI =~ ${URI_REGEXP} ]]; then - notify "[ERROR] -b $BASE_CONFIG_URI - invalid URI\n" + notify "[ERROR] -b $BASE_CONFIG_URI - invalid URI" usage exit 1 fi ;; - B) - OIFS=${IFS} - IFS=',' - OPT_BRIDGES=($OPTARG) - for bridge in "${OPT_BRIDGES[@]}"; do - if [ -n "${bridge}" ]; then - OPNFV_BRIDGES[${OPNFV_BRIDGE_IDX}]="${bridge}" - fi - ((OPNFV_BRIDGE_IDX+=1)) - done - IFS=${OIFS} - ;; d) DRY_RUN=1 ;; @@ -202,10 +179,7 @@ do f) ((USE_EXISTING_INFRA+=1)) ;; - F) - INFRA_CREATION_ONLY=1 - ;; - e) + F|e) NO_DEPLOY_ENVIRONMENT=1 ;; E) @@ -217,18 +191,24 @@ do L) DEPLOY_LOG="${OPTARG}" ;; + m) + MCP_CMP_SS=1 + ;; + N) + MCP_VCP=0 + ;; p) TARGET_POD=${OPTARG} - if [[ "${TARGET_POD}" =~ "virtual" ]]; then - DEPLOY_TYPE='virtual' - fi + ;; + P) + USE_EXISTING_PKGS=1 ;; s) DEPLOY_SCENARIO=${OPTARG} ;; S) if [[ ${OPTARG} ]]; then - STORAGE_DIR="${OPTARG}" + MCP_STORAGE_DIR="${OPTARG}" fi ;; h) @@ -236,23 +216,20 @@ do exit 0 ;; *) - notify "[ERROR] Arguments not according to new argument style\n" - exit 1 + notify_e "[ERROR] Unsupported arg, see -h for help" ;; esac done if [[ "$(sudo whoami)" != 'root' ]]; then - notify "[ERROR] This script requires sudo rights\n" 1>&2 - exit 1 + notify_e "[ERROR] This script requires sudo rights" fi # Validate mandatory arguments are set if [ -z "${TARGET_LAB}" ] || [ -z "${TARGET_POD}" ] || \ [ -z "${DEPLOY_SCENARIO}" ]; then - notify "[ERROR] At least one of the mandatory args is missing!\n" 1>&2 usage - exit 1 + notify_e "[ERROR] At least one of the mandatory args is missing!" fi [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x @@ -263,197 +240,102 @@ trap do_exit SIGINT SIGTERM EXIT # Set no restrictive umask so that Jenkins can remove any residuals umask 0000 -clean - pushd "${DEPLOY_DIR}" > /dev/null # Prepare the deploy config files based on lab/pod information, deployment # scenario, etc. -# Install required packages -[ -n "$(command -v apt-get)" ] && sudo apt-get install -y \ - git make rsync mkisofs curl virtinst cpu-checker qemu-kvm uuid-runtime \ - libvirt-bin cloud-guest-utils e2fsprogs -[ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \ - git make rsync genisoimage curl virt-install qemu-kvm util-linux \ - libvirt cloud-utils-growpart e2fsprogs - -# For baremetal, python is indirectly required for PDF parsing -if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then - [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \ - python python-ipaddress python-jinja2 python-yaml - [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \ - python python-ipaddress python-jinja2 python-yaml -fi - -# AArch64 VMs use AAVMF (guest UEFI) -if [ "$(uname -m)" = 'aarch64' ]; then - [ -n "$(command -v apt-get)" ] && sudo apt-get install -y qemu-efi - [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken AAVMF +# Install required packages on jump server +sudo mkdir -p "${MCP_STORAGE_DIR}" +sudo chown "${USER}:${USER}" "${MCP_STORAGE_DIR}" +if [ ${USE_EXISTING_PKGS} -eq 1 ]; then + notify "[NOTE] Skipping distro pkg installation" 2 +else + notify "[NOTE] Installing required distro pkgs" 2 + jumpserver_pkg_install 'deploy' + docker_install "${MCP_STORAGE_DIR}" + virtinst_install "${MCP_STORAGE_DIR}" fi if ! virsh list >/dev/null 2>&1; then - notify "[ERROR] This script requires hypervisor access\n" 1>&2 - exit 1 + notify_e "[ERROR] This script requires hypervisor access" fi +# Collect jump server system information for deploy debugging +./sysinfo_print.sh + # Clone git submodules and apply our patches -make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import - -# Convert Pharos-compatible POD Descriptor File (PDF) to reclass model input -PHAROS_GEN_CONFIG_SCRIPT="./pharos/config/utils/generate_config.py" -PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2" -BASE_CONFIG_PDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}.yaml" -BASE_CONFIG_IDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/idf-${TARGET_POD}.yaml" -LOCAL_PDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_PDF}")" -LOCAL_IDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_IDF}")" -LOCAL_PDF_RECLASS="${STORAGE_DIR}/pod_config.yml" -if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then - if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then - notify "[ERROR] Could not retrieve PDF (Pod Descriptor File)!\n" 1>&2 - exit 1 - else - notify "[WARN] Could not retrieve PDF (Pod Descriptor File)!\n" 3 - fi -elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then - notify "[WARN] POD has no IDF (Installer Descriptor File)!\n" 3 -elif ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \ - -j "${PHAROS_INSTALLER_ADAPTER}" > "${LOCAL_PDF_RECLASS}"; then - notify "[ERROR] Could not convert PDF to reclass model input!\n" 1>&2 - exit 1 -fi +make -C "${MCP_REPO_ROOT_PATH}/mcp/patches" deepclean patches-import # Check scenario file existence -SCENARIO_DIR="../config/scenario" -if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then - notify "[WARN] ${DEPLOY_SCENARIO}.yaml not found!\n" 3 - notify "[WARN] Setting simplest scenario (os-nosdn-nofeature-noha)\n" 3 - DEPLOY_SCENARIO='os-nosdn-nofeature-noha' - if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then - notify "[ERROR] Scenario definition file is missing!\n" 1>&2 - exit 1 - fi +SCENARIO_DIR="$(readlink -f "../config/scenario")" +if [ ! -f "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml" ] && \ + [ ! -f "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml.j2" ]; then + notify_e "[ERROR] Scenario definition file is missing!" fi -# Check defaults file existence -if [ ! -f "${SCENARIO_DIR}/defaults-$(uname -i).yaml" ]; then - notify "[ERROR] Scenario defaults file is missing!\n" 1>&2 - exit 1 -fi +# key might not exist yet ... +generate_ssh_key +export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")" -# Get required infra deployment data -set +x -eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")" -eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml")" -eval "$(parse_yaml "${LOCAL_PDF_RECLASS}")" -[[ "${CI_DEBUG}" =~ (false|0) ]] || set -x +# Expand jinja2 templates based on PDF data and env vars +export MCP_REPO_ROOT_PATH MCP_VCP MCP_STORAGE_DIR MCP_DOCKER_TAG MCP_CMP_SS \ + MCP_JUMP_ARCH=$(uname -i) MCP_DEPLOY_SCENARIO="${DEPLOY_SCENARIO}" +do_templates_scenario "${MCP_STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \ + "${BASE_CONFIG_URI}" "${SCENARIO_DIR}" \ + "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml" +do_templates_cluster "${MCP_STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \ + "${MCP_REPO_ROOT_PATH}" \ + "${SCENARIO_DIR}/defaults.yaml" -export CLUSTER_DOMAIN=${cluster_domain} +# Determine additional data (e.g. jump bridge names) based on XDF +source "${DEPLOY_DIR}/xdf_data.sh" -# Serialize vnode data as ',,|,,[...]' -for node in "${virtual_nodes[@]}"; do - virtual_custom_ram="virtual_${node}_ram" - virtual_custom_vcpus="virtual_${node}_vcpus" - virtual_nodes_data+="${node}," - virtual_nodes_data+="${!virtual_custom_ram:-$virtual_default_ram}," - virtual_nodes_data+="${!virtual_custom_vcpus:-$virtual_default_vcpus}|" -done -virtual_nodes_data=${virtual_nodes_data%|} - -# Serialize repos, packages to (pre-)install/remove for: -# - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01) -# - virtualized control plane VM base image (only when VCP is used) -base_image_flavors=common -if [[ "${cluster_states[*]}" =~ virtual_control ]]; then - base_image_flavors+=" control" -fi -for sc in ${base_image_flavors}; do - for va in apt_keys apt_repos pkg_install pkg_remove; do - key=virtual_${sc}_${va} - eval "${key}=\${${key}[@]// /|}" - eval "${key}=\${${key}// /,}" - virtual_repos_pkgs+="${!key}^" - done -done -virtual_repos_pkgs=${virtual_repos_pkgs%^} - -# Expand reclass and virsh network templates -for tp in "${RECLASS_CLUSTER_DIR}/all-mcp-ocata-common/opnfv/"*.template \ - net_*.template; do - eval "cat <<-EOF - $(<"${tp}") - EOF" 2> /dev/null > "${tp%.template}" -done - -# Convert Pharos-compatible PDF to reclass network definitions -if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then - find "${RECLASS_CLUSTER_DIR}/${CLUSTER_DOMAIN%.local}" \ - "${RECLASS_CLUSTER_DIR}/${DEPLOY_TYPE}-mcp-ocata-common" \ - -name '*.j2' | while read -r tp - do - if ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \ - -j "${tp}" > "${tp%.j2}"; then - notify "[ERROR] Could not convert PDF to reclass network defs!\n" - exit 1 - fi - done -fi - -# Map PDF networks 'admin', 'mgmt', 'private' and 'public' to bridge names -BR_NAMES=('admin' 'mgmt' 'private' 'public') -BR_NETS=( \ - "${parameters__param_opnfv_maas_pxe_address}" \ - "${parameters__param_opnfv_infra_config_address}" \ - "${parameters__param_opnfv_openstack_compute_node01_tenant_address}" \ - "${parameters__param_opnfv_openstack_compute_node01_external_address}" \ -) -for ((i = 0; i < ${#BR_NETS[@]}; i++)); do - br_jump=$(eval echo "\$parameters__param_opnfv_jump_bridge_${BR_NAMES[i]}") - if [ -n "${br_jump}" ] && [ "${br_jump}" != 'None' ] && \ - [ -d "/sys/class/net/${br_jump}/bridge" ]; then - notify "[OK] Bridge found for '${BR_NAMES[i]}': ${br_jump}\n" 2 - OPNFV_BRIDGES[${i}]="${br_jump}" - elif [ -n "${BR_NETS[i]}" ]; then - bridge=$(ip addr | awk "/${BR_NETS[i]%.*}./ {print \$NF; exit}") - if [ -n "${bridge}" ] && [ -d "/sys/class/net/${bridge}/bridge" ]; then - notify "[OK] Bridge found for net ${BR_NETS[i]%.*}.0: ${bridge}\n" 2 - OPNFV_BRIDGES[${i}]="${bridge}" - fi - fi -done -notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}\n" 2 +# Jumpserver prerequisites check +notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2 +jumpserver_check_requirements "${virtual_nodes[*]}" "${OPNFV_BRIDGES[@]}" # Infra setup if [ ${DRY_RUN} -eq 1 ]; then - notify "[NOTE] Dry run, skipping all deployment tasks\n" 2 1>&2 + notify "[NOTE] Dry run, skipping all deployment tasks" 2 exit 0 elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then - notify "[NOTE] Use existing infra\n" 2 1>&2 - check_connection + notify "[NOTE] Use existing infra: skip first ${USE_EXISTING_INFRA} states" 2 + notify "[STATE] Skipping: ${cluster_states[*]::${USE_EXISTING_INFRA}}" 2 else - generate_ssh_key - prepare_vms "${base_image}" "${STORAGE_DIR}" "${virtual_repos_pkgs}" \ + prepare_vms "${base_image}" "${MCP_STORAGE_DIR}" "${virtual_repos_pkgs}" \ "${virtual_nodes[@]}" create_networks "${OPNFV_BRIDGES[@]}" - create_vms "${STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}" + do_sysctl_cfg + do_udev_cfg + create_vms "${MCP_STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}" update_mcpcontrol_network start_vms "${virtual_nodes[@]}" - check_connection -fi -if [ ${USE_EXISTING_INFRA} -lt 2 ]; then - wait_for 5 "./salt.sh ${LOCAL_PDF_RECLASS}" + + # https://github.com/docker/libnetwork/issues/1743 + # rm -f /var/lib/docker/network/files/local-kv.db + sudo systemctl restart docker + prepare_containers "${MCP_STORAGE_DIR}" fi +start_containers "${MCP_STORAGE_DIR}" +check_connection + # Openstack cluster setup set +x -if [ ${INFRA_CREATION_ONLY} -eq 1 ] || [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then - notify "[NOTE] Skip openstack cluster setup\n" 2 +if [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then + notify "[NOTE] Skip openstack cluster setup" 2 else - for state in "${cluster_states[@]}"; do - notify "[STATE] Applying state: ${state}\n" 2 + for state in "${cluster_states[@]:${USE_EXISTING_INFRA}}"; do + notify "[STATE] Applying state: ${state}" 2 # shellcheck disable=SC2086,2029 wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \ CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \ /root/fuel/mcp/config/states/${state}" + if [ "${state}" = 'maas' ]; then + # For hybrid PODs (virtual + baremetal nodes), the virtual nodes + # should be reset to force a DHCP request from MaaS DHCP + reset_vms "${virtual_nodes[@]}" + fi done fi