2 # shellcheck disable=SC2034,SC2154,SC1090,SC1091
3 ##############################################################################
4 # Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others.
5 # jonas.bjurel@ericsson.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
12 ##############################################################################
13 # BEGIN of Exit handlers
19 if [ ${RC} -eq 0 ]; then
20 notify "\n[OK] MCP: Openstack installation finished succesfully!\n\n" 2
22 notify "\n[ERROR] MCP: Openstack installation threw a fatal error!\n\n"
26 # End of Exit handlers
27 ##############################################################################
29 ##############################################################################
30 # BEGIN of usage description
35 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
36 $(notify "$(basename "$0"): Deploy the Fuel@OPNFV MCP stack" 3)
39 $(basename "$0") -b base-uri -l lab-name -p pod-name -s deploy-scenario \\
40 [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] \\
41 [-S storage-dir] [-L /path/to/log/file.tar.gz] \\
42 [-f[f]] [-F] [-e | -E[E]] [-d] [-D]
44 $(notify "OPTIONS:" 2)
45 -b Base-uri for the stack-configuration structure
46 -B Bridge(s): 1st usage = PXE, 2nd = Mgmt, 3rd = Internal, 4th = Public
49 -e Do not launch environment deployment
50 -E Remove existing VCP VMs (use twice to redeploy baremetal nodes)
51 -f Deploy on existing Salt master (use twice to also skip config sync)
52 -F Do only create a Salt master
53 -h Print this message and exit
56 -s Deploy-scenario short-name
57 -S Storage dir for VM images
58 -L Deployment log path and file name
60 $(notify "Description:" 2)
61 Deploys the Fuel@OPNFV stack on the indicated lab resource.
63 This script provides the Fuel@OPNFV deployment abstraction.
64 It depends on the OPNFV official configuration directory/file structure
65 and provides a fairly simple mechanism to execute a deployment.
67 $(notify "Input parameters to the build script are:" 2)
68 -b Base URI to the configuration directory (needs to be provided in URI style,
69 it can be a local resource: file:// or a remote resource http(s)://).
70 A POD Descriptor File (PDF) should be available at:
71 <base-uri>/labs/<lab-name>/<pod-name>.yaml
72 The default is './mcp/config'.
73 -B Bridges to be used by deploy script. It can be specified several times,
74 or as a comma separated list of bridges, or both: -B br1 -B br2,br3
75 First occurence sets PXE Brige, next Mgmt, then Internal and Public.
76 For an empty value, the deploy script will use virsh to create the default
77 expected network (e.g. -B pxe,,,public will use existing "pxe" and "public"
78 bridges, respectively create "mgmt" and "internal").
79 Note that a virtual network "mcpcontrol" is always created. For virtual
80 deploys, "mcpcontrol" is also used for PXE, leaving the PXE bridge unused.
81 For baremetal deploys, PXE bridge is used for baremetal node provisioning,
82 while "mcpcontrol" is used to provision the infrastructure VMs only.
83 The default is 'pxebr'.
84 -d Dry-run - Produce deploy config files, but do not execute deploy
85 -D Debug logging - Enable extra logging in sh deploy scripts (set -x)
86 -e Do not launch environment deployment
87 -E Remove existing VCP VMs. It will destroy and undefine all VCP VMs
88 currently defined on cluster KVM nodes. If specified twice (e.g. -E -E),
89 baremetal nodes (VCP too, implicitly) will be removed, then reprovisioned.
90 Only applicable for baremetal deploys.
91 -f Deploy on existing Salt master. It will skip infrastructure VM creation,
92 but it will still sync reclass configuration from current repo to Salt
93 Master node. If specified twice (e.g. -f -f), config sync will also be
95 -F Do only create a Salt master
96 -h Print this message and exit
97 -L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
98 -l Lab name as defined in the configuration directory, e.g. lf
99 -p POD name as defined in the configuration directory, e.g. pod2
100 -s Deployment-scenario, this points to a short deployment scenario name, which
101 has to be defined in config directory (e.g. os-odl-nofeature-ha).
102 -S Storage dir for VM images, default is mcp/deploy/images
104 $(notify "[NOTE] sudo & virsh priviledges are needed for this script to run" 3)
108 $(notify "sudo $(basename "$0") \\
109 -b file:///home/jenkins/securedlab \\
111 -s os-odl-nofeature-ha" 2)
116 # END of usage description
117 ##############################################################################
119 ##############################################################################
120 # BEGIN of colored notification wrapper
123 tput setaf "${2:-1}" || true
124 echo -en "${1:-"[WARN] Unsupported opt arg: $3\\n"}"
128 # END of colored notification wrapper
129 ##############################################################################
131 ##############################################################################
132 # BEGIN of deployment clean-up
135 echo "Cleaning up deploy tmp directories"
138 # END of deployment clean-up
139 ##############################################################################
141 ##############################################################################
142 # BEGIN of variables to customize
144 CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
145 REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
146 DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd)
147 STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd)
148 RECLASS_CLUSTER_DIR=$(cd "${REPO_ROOT_PATH}/mcp/reclass/classes/cluster"; pwd)
149 DEPLOY_TYPE='baremetal'
150 OPNFV_BRIDGES=('pxebr' 'mgmt' 'internal' 'public')
151 URI_REGEXP='(file|https?|ftp)://.*'
152 BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/config"
154 # Customize deploy workflow
155 DRY_RUN=${DRY_RUN:-0}
156 USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
157 INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0}
158 NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
159 ERASE_ENV=${ERASE_ENV:-0}
161 source "${DEPLOY_DIR}/globals.sh"
162 source "${DEPLOY_DIR}/lib.sh"
165 # END of variables to customize
166 ##############################################################################
168 ##############################################################################
173 while getopts "b:B:dDfEFl:L:p:s:S:he" OPTION
177 BASE_CONFIG_URI=${OPTARG}
178 if [[ ! $BASE_CONFIG_URI =~ ${URI_REGEXP} ]]; then
179 notify "[ERROR] -b $BASE_CONFIG_URI - invalid URI\n"
187 OPT_BRIDGES=($OPTARG)
188 for bridge in "${OPT_BRIDGES[@]}"; do
189 if [ -n "${bridge}" ]; then
190 OPNFV_BRIDGES[${OPNFV_BRIDGE_IDX}]="${bridge}"
192 ((OPNFV_BRIDGE_IDX+=1))
203 ((USE_EXISTING_INFRA+=1))
206 INFRA_CREATION_ONLY=1
209 NO_DEPLOY_ENVIRONMENT=1
218 DEPLOY_LOG="${OPTARG}"
222 if [[ "${TARGET_POD}" =~ "virtual" ]]; then
223 DEPLOY_TYPE='virtual'
227 DEPLOY_SCENARIO=${OPTARG}
230 if [[ ${OPTARG} ]]; then
231 STORAGE_DIR="${OPTARG}"
239 notify "[ERROR] Arguments not according to new argument style\n"
245 if [[ "$(sudo whoami)" != 'root' ]]; then
246 notify "[ERROR] This script requires sudo rights\n" 1>&2
250 # Validate mandatory arguments are set
251 if [ -z "${TARGET_LAB}" ] || [ -z "${TARGET_POD}" ] || \
252 [ -z "${DEPLOY_SCENARIO}" ]; then
253 notify "[ERROR] At least one of the mandatory args is missing!\n" 1>&2
258 [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
260 # Enable the automatic exit trap
261 trap do_exit SIGINT SIGTERM EXIT
263 # Set no restrictive umask so that Jenkins can remove any residuals
268 pushd "${DEPLOY_DIR}" > /dev/null
269 # Prepare the deploy config files based on lab/pod information, deployment
272 # Install required packages
273 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \
274 git make rsync mkisofs curl virtinst cpu-checker qemu-kvm uuid-runtime \
275 libvirt-bin cloud-guest-utils e2fsprogs
276 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \
277 git make rsync genisoimage curl virt-install qemu-kvm util-linux \
278 libvirt cloud-utils-growpart e2fsprogs
280 # For baremetal, python is indirectly required for PDF parsing
281 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
282 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \
283 python python-ipaddress python-jinja2 python-yaml
284 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \
285 python python-ipaddress python-jinja2 python-yaml
288 # AArch64 VMs use AAVMF (guest UEFI)
289 if [ "$(uname -m)" = 'aarch64' ]; then
290 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y qemu-efi
291 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken AAVMF
294 if ! virsh list >/dev/null 2>&1; then
295 notify "[ERROR] This script requires hypervisor access\n" 1>&2
299 # Clone git submodules and apply our patches
300 make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
302 # Convert Pharos-compatible POD Descriptor File (PDF) to reclass model input
303 PHAROS_GEN_CONFIG_SCRIPT="./pharos/config/utils/generate_config.py"
304 PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2"
305 BASE_CONFIG_PDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}.yaml"
306 BASE_CONFIG_IDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/idf-${TARGET_POD}.yaml"
307 LOCAL_PDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_PDF}")"
308 LOCAL_IDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_IDF}")"
309 LOCAL_PDF_RECLASS="${STORAGE_DIR}/pod_config.yml"
310 if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
311 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
312 notify "[ERROR] Could not retrieve PDF (Pod Descriptor File)!\n" 1>&2
315 notify "[WARN] Could not retrieve PDF (Pod Descriptor File)!\n" 3
317 elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then
318 notify "[WARN] POD has no IDF (Installer Descriptor File)!\n" 3
319 elif ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
320 -j "${PHAROS_INSTALLER_ADAPTER}" > "${LOCAL_PDF_RECLASS}"; then
321 notify "[ERROR] Could not convert PDF to reclass model input!\n" 1>&2
325 # Check scenario file existence
326 SCENARIO_DIR="../config/scenario"
327 if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
328 notify "[WARN] ${DEPLOY_SCENARIO}.yaml not found!\n" 3
329 notify "[WARN] Setting simplest scenario (os-nosdn-nofeature-noha)\n" 3
330 DEPLOY_SCENARIO='os-nosdn-nofeature-noha'
331 if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
332 notify "[ERROR] Scenario definition file is missing!\n" 1>&2
337 # Check defaults file existence
338 if [ ! -f "${SCENARIO_DIR}/defaults-$(uname -i).yaml" ]; then
339 notify "[ERROR] Scenario defaults file is missing!\n" 1>&2
343 # Get required infra deployment data
345 eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")"
346 eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml")"
347 eval "$(parse_yaml "${LOCAL_PDF_RECLASS}")"
348 [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
350 export CLUSTER_DOMAIN=${cluster_domain}
352 # Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
353 for node in "${virtual_nodes[@]}"; do
354 virtual_custom_ram="virtual_${node}_ram"
355 virtual_custom_vcpus="virtual_${node}_vcpus"
356 virtual_nodes_data+="${node},"
357 virtual_nodes_data+="${!virtual_custom_ram:-$virtual_default_ram},"
358 virtual_nodes_data+="${!virtual_custom_vcpus:-$virtual_default_vcpus}|"
360 virtual_nodes_data=${virtual_nodes_data%|}
362 # Serialize repos, packages to (pre-)install/remove for:
363 # - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01)
364 # - virtualized control plane VM base image (only when VCP is used)
365 base_image_flavors=common
366 if [[ "${cluster_states[*]}" =~ virtual_control ]]; then
367 base_image_flavors+=" control"
369 for sc in ${base_image_flavors}; do
370 for va in apt_keys apt_repos pkg_install pkg_remove; do
371 key=virtual_${sc}_${va}
372 eval "${key}=\${${key}[@]// /|}"
373 eval "${key}=\${${key}// /,}"
374 virtual_repos_pkgs+="${!key}^"
377 virtual_repos_pkgs=${virtual_repos_pkgs%^}
379 # Expand reclass and virsh network templates
380 for tp in "${RECLASS_CLUSTER_DIR}/all-mcp-arch-common/opnfv/"*.template \
384 EOF" 2> /dev/null > "${tp%.template}"
387 # Convert Pharos-compatible PDF to reclass network definitions
388 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
389 find "${RECLASS_CLUSTER_DIR}/${CLUSTER_DOMAIN%.local}" \
390 "${RECLASS_CLUSTER_DIR}/${DEPLOY_TYPE}-mcp-ocata-common" \
391 -name '*.j2' | while read -r tp
393 if ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
394 -j "${tp}" > "${tp%.j2}"; then
395 notify "[ERROR] Could not convert PDF to reclass network defs!\n"
401 # Map PDF networks 'admin', 'mgmt', 'private' and 'public' to bridge names
402 BR_NAMES=('admin' 'mgmt' 'private' 'public')
404 "${parameters__param_opnfv_maas_pxe_address}" \
405 "${parameters__param_opnfv_infra_config_address}" \
406 "${parameters__param_opnfv_openstack_compute_node01_tenant_address}" \
407 "${parameters__param_opnfv_openstack_compute_node01_external_address}" \
409 for ((i = 0; i < ${#BR_NETS[@]}; i++)); do
410 br_jump=$(eval echo "\$parameters__param_opnfv_jump_bridge_${BR_NAMES[i]}")
411 if [ -n "${br_jump}" ] && [ "${br_jump}" != 'None' ] && \
412 [ -d "/sys/class/net/${br_jump}/bridge" ]; then
413 notify "[OK] Bridge found for '${BR_NAMES[i]}': ${br_jump}\n" 2
414 OPNFV_BRIDGES[${i}]="${br_jump}"
415 elif [ -n "${BR_NETS[i]}" ]; then
416 bridge=$(ip addr | awk "/${BR_NETS[i]%.*}./ {print \$NF; exit}")
417 if [ -n "${bridge}" ] && [ -d "/sys/class/net/${bridge}/bridge" ]; then
418 notify "[OK] Bridge found for net ${BR_NETS[i]%.*}.0: ${bridge}\n" 2
419 OPNFV_BRIDGES[${i}]="${bridge}"
423 notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}\n" 2
426 if [ ${DRY_RUN} -eq 1 ]; then
427 notify "[NOTE] Dry run, skipping all deployment tasks\n" 2 1>&2
429 elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then
430 notify "[NOTE] Use existing infra\n" 2 1>&2
434 prepare_vms "${base_image}" "${STORAGE_DIR}" "${virtual_repos_pkgs}" \
435 "${virtual_nodes[@]}"
436 create_networks "${OPNFV_BRIDGES[@]}"
437 create_vms "${STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}"
438 update_mcpcontrol_network
439 start_vms "${virtual_nodes[@]}"
442 if [ ${USE_EXISTING_INFRA} -lt 2 ]; then
443 wait_for 5 "./salt.sh ${LOCAL_PDF_RECLASS}"
446 # Openstack cluster setup
448 if [ ${INFRA_CREATION_ONLY} -eq 1 ] || [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then
449 notify "[NOTE] Skip openstack cluster setup\n" 2
451 for state in "${cluster_states[@]}"; do
452 notify "[STATE] Applying state: ${state}\n" 2
453 # shellcheck disable=SC2086,2029
454 wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \
455 CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \
456 /root/fuel/mcp/config/states/${state}"
460 ./log.sh "${DEPLOY_LOG}"
466 ##############################################################################