#!/bin/bash -e
# shellcheck disable=SC2034,SC2154,SC1090,SC1091,SC2155
##############################################################################
#!/bin/bash -e
# shellcheck disable=SC2034,SC2154,SC1090,SC1091,SC2155
##############################################################################
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
$(notify "USAGE:" 2)
$(basename "$0") -l lab-name -p pod-name -s deploy-scenario \\
[-b Lab Config Base URI] \\
[-S storage-dir] [-L /path/to/log/file.tar.gz] \\
$(notify "USAGE:" 2)
$(basename "$0") -l lab-name -p pod-name -s deploy-scenario \\
[-b Lab Config Base URI] \\
[-S storage-dir] [-L /path/to/log/file.tar.gz] \\
- -f Deploy on existing Salt master (use twice to also skip config sync)
- -F Do only create a Salt master
+ -f Deploy on existing Salt master (use twice or more to skip states)
+ -F Same as -e, do not launch environment deployment (legacy option)
-h Print this message and exit
-l Lab-name
-p Pod-name
-P Skip installation of package dependencies
-s Deploy-scenario short-name
-h Print this message and exit
-l Lab-name
-p Pod-name
-P Skip installation of package dependencies
-s Deploy-scenario short-name
It depends on the OPNFV official configuration directory/file structure
and provides a fairly simple mechanism to execute a deployment.
It depends on the OPNFV official configuration directory/file structure
and provides a fairly simple mechanism to execute a deployment.
<base-uri>/labs/<lab-name>/idf-<pod-name>.yaml
The default is using the git submodule tracking 'OPNFV Pharos' in
<./mcp/scripts/pharos>.
<base-uri>/labs/<lab-name>/idf-<pod-name>.yaml
The default is using the git submodule tracking 'OPNFV Pharos' in
<./mcp/scripts/pharos>.
-d Dry-run - Produce deploy config files, but do not execute deploy
-D Debug logging - Enable extra logging in sh deploy scripts (set -x)
-e Do not launch environment deployment
-d Dry-run - Produce deploy config files, but do not execute deploy
-D Debug logging - Enable extra logging in sh deploy scripts (set -x)
-e Do not launch environment deployment
-E Remove existing VCP VMs. It will destroy and undefine all VCP VMs
currently defined on cluster KVM nodes. If specified twice (e.g. -E -E),
baremetal nodes (VCP too, implicitly) will be removed, then reprovisioned.
Only applicable for baremetal deploys.
-f Deploy on existing Salt master. It will skip infrastructure VM creation,
but it will still sync reclass configuration from current repo to Salt
-E Remove existing VCP VMs. It will destroy and undefine all VCP VMs
currently defined on cluster KVM nodes. If specified twice (e.g. -E -E),
baremetal nodes (VCP too, implicitly) will be removed, then reprovisioned.
Only applicable for baremetal deploys.
-f Deploy on existing Salt master. It will skip infrastructure VM creation,
but it will still sync reclass configuration from current repo to Salt
- Master node. If specified twice (e.g. -f -f), config sync will also be
- skipped.
--F Do only create a Salt master
+ Master node.
+ Each additional use skips one more state file. For example, -fff would
+ skip the first 3 state files (e.g. virtual_init, maas, baremetal_init).
+-F Same as -e, do not launch environment deployment (legacy option)
-h Print this message and exit
-L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
-l Lab name as defined in the configuration directory, e.g. lf
-p POD name as defined in the configuration directory, e.g. pod2
-h Print this message and exit
-L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
-l Lab name as defined in the configuration directory, e.g. lf
-p POD name as defined in the configuration directory, e.g. pod2
+-m Use single socket compute nodes. Instead of using default NUMA-enabled
+ topology for virtual compute nodes created via libvirt, configure a
+ single guest CPU socket.
+-N Experimental: Instead of virtualizing the control plane (VCP), deploy
+ control plane directly on baremetal nodes
-P Skip installing dependency distro packages on current host
This flag should only be used if you have kept back older packages that
would be upgraded and that is undesirable on the current system.
Note that without the required packages, deploy will fail.
-s Deployment-scenario, this points to a short deployment scenario name, which
has to be defined in config directory (e.g. os-odl-nofeature-ha).
-P Skip installing dependency distro packages on current host
This flag should only be used if you have kept back older packages that
would be upgraded and that is undesirable on the current system.
Note that without the required packages, deploy will fail.
-s Deployment-scenario, this points to a short deployment scenario name, which
has to be defined in config directory (e.g. os-odl-nofeature-ha).
--S Storage dir for VM images, default is mcp/deploy/images
+-S Storage dir for VM images, default is /var/lib/opnfv/tmpdir
+ It is recommended to store the deploy artifacts on a fast disk, outside of
+ the current git repository (so clean operations won't erase it).
# BEGIN of variables to customize
#
CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
# BEGIN of variables to customize
#
CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
-REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
-DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd)
-STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd)
+MCP_REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
+DEPLOY_DIR=$(cd "${MCP_REPO_ROOT_PATH}/mcp/scripts"; pwd)
+MCP_STORAGE_DIR='/var/lib/opnfv/tmpdir'
-BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/scripts/pharos"
+BASE_CONFIG_URI="file://${MCP_REPO_ROOT_PATH}/mcp/scripts/pharos"
+OPNFV_BRANCH=$(sed -ne 's/defaultbranch=//p' "${MCP_REPO_ROOT_PATH}/.gitreview")
+DEF_DOCKER_TAG=$(basename "${OPNFV_BRANCH/master/latest}")
# Customize deploy workflow
DRY_RUN=${DRY_RUN:-0}
USE_EXISTING_PKGS=${USE_EXISTING_PKGS:-0}
USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
# Customize deploy workflow
DRY_RUN=${DRY_RUN:-0}
USE_EXISTING_PKGS=${USE_EXISTING_PKGS:-0}
USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
source "${DEPLOY_DIR}/globals.sh"
source "${DEPLOY_DIR}/lib.sh"
source "${DEPLOY_DIR}/lib_template.sh"
source "${DEPLOY_DIR}/globals.sh"
source "${DEPLOY_DIR}/lib.sh"
source "${DEPLOY_DIR}/lib_template.sh"
if [ ${USE_EXISTING_PKGS} -eq 1 ]; then
notify "[NOTE] Skipping distro pkg installation" 2
else
notify "[NOTE] Installing required distro pkgs" 2
if [ ${USE_EXISTING_PKGS} -eq 1 ]; then
notify "[NOTE] Skipping distro pkg installation" 2
else
notify "[NOTE] Installing required distro pkgs" 2
export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")"
# Expand jinja2 templates based on PDF data and env vars
export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")"
# Expand jinja2 templates based on PDF data and env vars
-export MCP_JUMP_ARCH=$(uname -i)
-do_templates_scenario "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
- "${BASE_CONFIG_URI}" "${SCENARIO_DIR}"
-do_templates_cluster "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
- "${REPO_ROOT_PATH}" \
- "${SCENARIO_DIR}/defaults.yaml" \
+[[ "${DEPLOY_SCENARIO}" =~ -ha$ ]] || MCP_VCP=0
+export MCP_REPO_ROOT_PATH MCP_VCP MCP_STORAGE_DIR MCP_DOCKER_TAG MCP_CMP_SS \
+ MCP_JUMP_ARCH=$(uname -i) MCP_DEPLOY_SCENARIO="${DEPLOY_SCENARIO}" \
+ MCP_NO_DEPLOY_ENVIRONMENT
+do_templates_scenario "${MCP_STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
+ "${BASE_CONFIG_URI}" "${SCENARIO_DIR}" \
# Determine additional data (e.g. jump bridge names) based on XDF
source "${DEPLOY_DIR}/xdf_data.sh"
# Jumpserver prerequisites check
notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2
# Determine additional data (e.g. jump bridge names) based on XDF
source "${DEPLOY_DIR}/xdf_data.sh"
# Jumpserver prerequisites check
notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2
# Infra setup
if [ ${DRY_RUN} -eq 1 ]; then
notify "[NOTE] Dry run, skipping all deployment tasks" 2
exit 0
elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then
# Infra setup
if [ ${DRY_RUN} -eq 1 ]; then
notify "[NOTE] Dry run, skipping all deployment tasks" 2
exit 0
elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then
- check_connection
-fi
-if [ ${USE_EXISTING_INFRA} -lt 2 ]; then
- wait_for 5 "./salt.sh ${STORAGE_DIR}/pod_config.yml ${virtual_nodes[*]}"
+
+ # https://github.com/docker/libnetwork/issues/1743
+ # rm -f /var/lib/docker/network/files/local-kv.db
+ sudo systemctl restart docker
+ prepare_containers "${MCP_STORAGE_DIR}"
notify "[STATE] Applying state: ${state}" 2
# shellcheck disable=SC2086,2029
wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \
CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \
/root/fuel/mcp/config/states/${state}"
notify "[STATE] Applying state: ${state}" 2
# shellcheck disable=SC2086,2029
wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \
CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \
/root/fuel/mcp/config/states/${state}"
+ if [ "${state}" = 'maas' ]; then
+ # For hybrid PODs (virtual + baremetal nodes), the virtual nodes
+ # should be reset to force a DHCP request from MaaS DHCP
+ reset_vms "${virtual_nodes[@]}"
+ fi