2 # shellcheck disable=SC2034,SC2154,SC1090,SC1091
3 ##############################################################################
4 # Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others.
5 # jonas.bjurel@ericsson.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
12 ##############################################################################
13 # BEGIN of Exit handlers
18 if [ ${RC} -eq 0 ]; then
19 notify "\n[OK] MCP: Openstack installation finished succesfully!\n\n" 2
21 notify "\n[ERROR] MCP: Openstack installation threw a fatal error!\n\n"
25 # End of Exit handlers
26 ##############################################################################
28 ##############################################################################
29 # BEGIN of usage description
34 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
35 $(notify "$(basename "$0"): Deploy the Fuel@OPNFV MCP stack" 3)
38 $(basename "$0") -b base-uri -l lab-name -p pod-name -s deploy-scenario \\
39 [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] \\
40 [-S storage-dir] [-L /path/to/log/file.tar.gz] \\
41 [-f[f]] [-F] [-e | -E[E]] [-d] [-D]
43 $(notify "OPTIONS:" 2)
44 -b Base-uri for the stack-configuration structure
45 -B Bridge(s): 1st usage = PXE, 2nd = Mgmt, 3rd = Internal, 4th = Public
48 -e Do not launch environment deployment
49 -E Remove existing VCP VMs (use twice to redeploy baremetal nodes)
50 -f Deploy on existing Salt master (use twice to also skip config sync)
51 -F Do only create a Salt master
52 -h Print this message and exit
55 -s Deploy-scenario short-name
56 -S Storage dir for VM images
57 -L Deployment log path and file name
59 $(notify "Description:" 2)
60 Deploys the Fuel@OPNFV stack on the indicated lab resource.
62 This script provides the Fuel@OPNFV deployment abstraction.
63 It depends on the OPNFV official configuration directory/file structure
64 and provides a fairly simple mechanism to execute a deployment.
66 $(notify "Input parameters to the build script are:" 2)
67 -b Base URI to the configuration directory (needs to be provided in URI style,
68 it can be a local resource: file:// or a remote resource http(s)://).
69 A POD Descriptor File (PDF) should be available at:
70 <base-uri>/labs/<lab-name>/<pod-name>.yaml
71 The default is './mcp/config'.
72 -B Bridges to be used by deploy script. It can be specified several times,
73 or as a comma separated list of bridges, or both: -B br1 -B br2,br3
74 First occurence sets PXE Brige, next Mgmt, then Internal and Public.
75 For an empty value, the deploy script will use virsh to create the default
76 expected network (e.g. -B pxe,,,public will use existing "pxe" and "public"
77 bridges, respectively create "mgmt" and "internal").
78 Note that a virtual network "mcpcontrol" is always created. For virtual
79 deploys, "mcpcontrol" is also used for PXE, leaving the PXE bridge unused.
80 For baremetal deploys, PXE bridge is used for baremetal node provisioning,
81 while "mcpcontrol" is used to provision the infrastructure VMs only.
82 The default is 'pxebr'.
83 -d Dry-run - Produce deploy config files, but do not execute deploy
84 -D Debug logging - Enable extra logging in sh deploy scripts (set -x)
85 -e Do not launch environment deployment
86 -E Remove existing VCP VMs. It will destroy and undefine all VCP VMs
87 currently defined on cluster KVM nodes. If specified twice (e.g. -E -E),
88 baremetal nodes (VCP too, implicitly) will be removed, then reprovisioned.
89 Only applicable for baremetal deploys.
90 -f Deploy on existing Salt master. It will skip infrastructure VM creation,
91 but it will still sync reclass configuration from current repo to Salt
92 Master node. If specified twice (e.g. -f -f), config sync will also be
94 -F Do only create a Salt master
95 -h Print this message and exit
96 -L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
97 -l Lab name as defined in the configuration directory, e.g. lf
98 -p POD name as defined in the configuration directory, e.g. pod2
99 -s Deployment-scenario, this points to a short deployment scenario name, which
100 has to be defined in config directory (e.g. os-odl-nofeature-ha).
101 -S Storage dir for VM images, default is mcp/deploy/images
103 $(notify "[NOTE] sudo & virsh priviledges are needed for this script to run" 3)
107 $(notify "sudo $(basename "$0") \\
108 -b file:///home/jenkins/securedlab \\
110 -s os-odl-nofeature-ha" 2)
115 # END of usage description
116 ##############################################################################
118 ##############################################################################
119 # BEGIN of colored notification wrapper
122 tput setaf "${2:-1}" || true
123 echo -en "${1:-"[WARN] Unsupported opt arg: $3\\n"}"
127 # END of colored notification wrapper
128 ##############################################################################
130 ##############################################################################
131 # BEGIN of deployment clean-up
134 echo "Cleaning up deploy tmp directories"
137 # END of deployment clean-up
138 ##############################################################################
140 ##############################################################################
141 # BEGIN of variables to customize
143 CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
144 REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
145 DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd)
146 STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd)
147 RECLASS_CLUSTER_DIR=$(cd "${REPO_ROOT_PATH}/mcp/reclass/classes/cluster"; pwd)
148 DEPLOY_TYPE='baremetal'
149 OPNFV_BRIDGES=('pxebr' 'mgmt' 'internal' 'public')
150 URI_REGEXP='(file|https?|ftp)://.*'
151 BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/config"
153 # Customize deploy workflow
154 DRY_RUN=${DRY_RUN:-0}
155 USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
156 INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0}
157 NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
158 ERASE_ENV=${ERASE_ENV:-0}
160 source "${DEPLOY_DIR}/globals.sh"
163 # END of variables to customize
164 ##############################################################################
166 ##############################################################################
171 while getopts "b:B:dDfEFl:L:p:s:S:he" OPTION
175 BASE_CONFIG_URI=${OPTARG}
176 if [[ ! $BASE_CONFIG_URI =~ ${URI_REGEXP} ]]; then
177 notify "[ERROR] -b $BASE_CONFIG_URI - invalid URI\n"
185 OPT_BRIDGES=($OPTARG)
186 for bridge in "${OPT_BRIDGES[@]}"; do
187 if [ -n "${bridge}" ]; then
188 OPNFV_BRIDGES[${OPNFV_BRIDGE_IDX}]="${bridge}"
190 ((OPNFV_BRIDGE_IDX+=1))
201 ((USE_EXISTING_INFRA+=1))
204 INFRA_CREATION_ONLY=1
207 NO_DEPLOY_ENVIRONMENT=1
216 DEPLOY_LOG="${OPTARG}"
220 if [[ "${TARGET_POD}" =~ "virtual" ]]; then
221 DEPLOY_TYPE='virtual'
225 DEPLOY_SCENARIO=${OPTARG}
228 if [[ ${OPTARG} ]]; then
229 STORAGE_DIR="${OPTARG}"
237 notify "[ERROR] Arguments not according to new argument style\n"
243 if [[ "$(sudo whoami)" != 'root' ]]; then
244 notify "[ERROR] This script requires sudo rights\n" 1>&2
248 # Validate mandatory arguments are set
249 if [ -z "${TARGET_LAB}" ] || [ -z "${TARGET_POD}" ] || \
250 [ -z "${DEPLOY_SCENARIO}" ]; then
251 notify "[ERROR] At least one of the mandatory args is missing!\n" 1>&2
256 [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
258 # Enable the automatic exit trap
259 trap do_exit SIGINT SIGTERM EXIT
261 # Set no restrictive umask so that Jenkins can remove any residuals
266 pushd "${DEPLOY_DIR}" > /dev/null
267 # Prepare the deploy config files based on lab/pod information, deployment
270 # Install required packages
271 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \
272 git make rsync mkisofs curl virtinst cpu-checker qemu-kvm uuid-runtime \
274 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \
275 git make rsync genisoimage curl virt-install qemu-kvm util-linux \
278 # For baremetal, python is indirectly required for PDF parsing
279 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
280 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \
281 python python-ipaddress python-jinja2
282 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \
283 python python-ipaddress python-jinja2
286 # AArch64 VMs use AAVMF (guest UEFI)
287 if [ "$(uname -m)" = 'aarch64' ]; then
288 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y qemu-efi
289 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken AAVMF
292 if ! virsh list >/dev/null 2>&1; then
293 notify "[ERROR] This script requires hypervisor access\n" 1>&2
297 # Clone git submodules and apply our patches
298 make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
300 # Convert Pharos-compatible POD Descriptor File (PDF) to reclass model input
301 PHAROS_GEN_CONFIG_SCRIPT="./pharos/config/utils/generate_config.py"
302 PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2"
303 BASE_CONFIG_PDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}.yaml"
304 BASE_CONFIG_IDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/idf-${TARGET_POD}.yaml"
305 LOCAL_PDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_PDF}")"
306 LOCAL_IDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_IDF}")"
307 LOCAL_PDF_RECLASS="${STORAGE_DIR}/pod_config.yml"
308 if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
309 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
310 notify "[ERROR] Could not retrieve PDF (Pod Descriptor File)!\n" 1>&2
313 notify "[WARN] Could not retrieve PDF (Pod Descriptor File)!\n" 3
315 elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then
316 notify "[WARN] POD has no IDF (Installer Descriptor File)!\n" 3
317 elif ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
318 -j "${PHAROS_INSTALLER_ADAPTER}" > "${LOCAL_PDF_RECLASS}"; then
319 notify "[ERROR] Could not convert PDF to reclass model input!\n" 1>&2
323 # Check scenario file existence
324 SCENARIO_DIR="../config/scenario"
325 if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
326 notify "[WARN] ${DEPLOY_SCENARIO}.yaml not found!\n" 3
327 notify "[WARN] Setting simplest scenario (os-nosdn-nofeature-noha)\n" 3
328 DEPLOY_SCENARIO='os-nosdn-nofeature-noha'
329 if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
330 notify "[ERROR] Scenario definition file is missing!\n" 1>&2
335 # Check defaults file existence
336 if [ ! -f "${SCENARIO_DIR}/defaults-$(uname -i).yaml" ]; then
337 notify "[ERROR] Scenario defaults file is missing!\n" 1>&2
341 # Get required infra deployment data
344 eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")"
345 eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml")"
346 eval "$(parse_yaml "${LOCAL_PDF_RECLASS}")"
347 [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
349 export CLUSTER_DOMAIN=${cluster_domain}
351 # Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
352 for node in "${virtual_nodes[@]}"; do
353 virtual_custom_ram="virtual_${node}_ram"
354 virtual_custom_vcpus="virtual_${node}_vcpus"
355 virtual_nodes_data+="${node},"
356 virtual_nodes_data+="${!virtual_custom_ram:-$virtual_default_ram},"
357 virtual_nodes_data+="${!virtual_custom_vcpus:-$virtual_default_vcpus}|"
359 virtual_nodes_data=${virtual_nodes_data%|}
361 # Expand reclass and virsh network templates
362 for tp in "${RECLASS_CLUSTER_DIR}/all-mcp-ocata-common/opnfv/"*.template \
366 EOF" 2> /dev/null > "${tp%.template}"
369 # Convert Pharos-compatible PDF to reclass network definitions
370 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
371 find "${RECLASS_CLUSTER_DIR}/${CLUSTER_DOMAIN%.local}" \
372 "${RECLASS_CLUSTER_DIR}/${DEPLOY_TYPE}-mcp-ocata-common" \
373 -name '*.j2' | while read -r tp
375 if ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
376 -j "${tp}" > "${tp%.j2}"; then
377 notify "[ERROR] Could not convert PDF to reclass network defs!\n"
383 # Map PDF networks 'admin', 'mgmt', 'private' and 'public' to bridge names
384 BR_NAMES=('admin' 'mgmt' 'private' 'public')
386 "${parameters__param_opnfv_maas_pxe_address}" \
387 "${parameters__param_opnfv_infra_config_address}" \
388 "${parameters__param_opnfv_openstack_compute_node01_tenant_address}" \
389 "${parameters__param_opnfv_openstack_compute_node01_external_address}" \
391 for ((i = 0; i < ${#BR_NETS[@]}; i++)); do
392 br_jump=$(eval echo "\$parameters__param_opnfv_jump_bridge_${BR_NAMES[i]}")
393 if [ -n "${br_jump}" ] && [ "${br_jump}" != 'None' ] && \
394 [ -d "/sys/class/net/${br_jump}/bridge" ]; then
395 notify "[OK] Bridge found for '${BR_NAMES[i]}': ${br_jump}\n" 2
396 OPNFV_BRIDGES[${i}]="${br_jump}"
397 elif [ -n "${BR_NETS[i]}" ]; then
398 bridge=$(ip addr | awk "/${BR_NETS[i]%.*}./ {print \$NF; exit}")
399 if [ -n "${bridge}" ] && [ -d "/sys/class/net/${bridge}/bridge" ]; then
400 notify "[OK] Bridge found for net ${BR_NETS[i]%.*}.0: ${bridge}\n" 2
401 OPNFV_BRIDGES[${i}]="${bridge}"
405 notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}\n" 2
408 if [ ${DRY_RUN} -eq 1 ]; then
409 notify "[NOTE] Dry run, skipping all deployment tasks\n" 2 1>&2
411 elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then
412 notify "[NOTE] Use existing infra\n" 2 1>&2
416 prepare_vms "${base_image}" "${STORAGE_DIR}" "${virtual_nodes[@]}"
417 create_networks "${OPNFV_BRIDGES[@]}"
418 create_vms "${STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}"
419 update_mcpcontrol_network
420 start_vms "${virtual_nodes[@]}"
423 if [ ${USE_EXISTING_INFRA} -lt 2 ]; then
424 wait_for 5 "./salt.sh ${LOCAL_PDF_RECLASS}"
427 # Openstack cluster setup
429 if [ ${INFRA_CREATION_ONLY} -eq 1 ] || [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then
430 notify "[NOTE] Skip openstack cluster setup\n" 2
432 for state in "${cluster_states[@]}"; do
433 notify "[STATE] Applying state: ${state}\n" 2
434 # shellcheck disable=SC2086,2029
435 wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \
436 CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \
437 /root/fuel/mcp/config/states/${state}"
441 ./log.sh "${DEPLOY_LOG}"
447 ##############################################################################