2 # shellcheck disable=SC2034,SC2154,SC1090,SC1091
3 ##############################################################################
4 # Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others.
5 # jonas.bjurel@ericsson.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 ##############################################################################
12 ##############################################################################
13 # BEGIN of Exit handlers
18 if [ ${RC} -eq 0 ]; then
19 notify "\n[OK] MCP: Openstack installation finished succesfully!\n\n" 2
21 notify "\n[ERROR] MCP: Openstack installation threw a fatal error!\n\n"
25 # End of Exit handlers
26 ##############################################################################
28 ##############################################################################
29 # BEGIN of usage description
34 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
35 $(notify "$(basename "$0"): Deploy the Fuel@OPNFV MCP stack" 3)
38 $(basename "$0") -b base-uri -l lab-name -p pod-name -s deploy-scenario \\
39 [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] \\
40 [-S storage-dir] [-L /path/to/log/file.tar.gz] \\
41 [-f [-f]] [-F] [-e] [-d] [-D]
43 $(notify "OPTIONS:" 2)
44 -b Base-uri for the stack-configuration structure
45 -B Bridge(s): 1st usage = PXE, 2nd = Mgmt, 3rd = Internal, 4th = Public
48 -e Do not launch environment deployment
49 -f Deploy on existing Salt master (use twice to also skip config sync)
50 -F Do only create a Salt master
51 -h Print this message and exit
54 -s Deploy-scenario short-name
55 -S Storage dir for VM images
56 -L Deployment log path and file name
58 $(notify "Description:" 2)
59 Deploys the Fuel@OPNFV stack on the indicated lab resource.
61 This script provides the Fuel@OPNFV deployment abstraction.
62 It depends on the OPNFV official configuration directory/file structure
63 and provides a fairly simple mechanism to execute a deployment.
65 $(notify "Input parameters to the build script are:" 2)
66 -b Base URI to the configuration directory (needs to be provided in URI style,
67 it can be a local resource: file:// or a remote resource http(s)://).
68 A POD Descriptor File (PDF) should be available at:
69 <base-uri>/labs/<lab-name>/<pod-name>.yaml
70 The default is './mcp/config'.
71 -B Bridges to be used by deploy script. It can be specified several times,
72 or as a comma separated list of bridges, or both: -B br1 -B br2,br3
73 First occurence sets PXE Brige, next Mgmt, then Internal and Public.
74 For an empty value, the deploy script will use virsh to create the default
75 expected network (e.g. -B pxe,,,public will use existing "pxe" and "public"
76 bridges, respectively create "mgmt" and "internal").
77 Note that a virtual network "mcpcontrol" is always created. For virtual
78 deploys, "mcpcontrol" is also used for PXE, leaving the PXE bridge unused.
79 For baremetal deploys, PXE bridge is used for baremetal node provisioning,
80 while "mcpcontrol" is used to provision the infrastructure VMs only.
81 The default is 'pxebr'.
82 -d Dry-run - Produce deploy config files, but do not execute deploy
83 -D Debug logging - Enable extra logging in sh deploy scripts (set -x)
84 -e Do not launch environment deployment
85 -f Deploy on existing Salt master. It will skip infrastructure VM creation,
86 but it will still sync reclass configuration from current repo to Salt
87 Master node. If specified twice (e.g. -f -f), config sync will also be
89 -F Do only create a Salt master
90 -h Print this message and exit
91 -L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
92 -l Lab name as defined in the configuration directory, e.g. lf
93 -p POD name as defined in the configuration directory, e.g. pod2
94 -s Deployment-scenario, this points to a short deployment scenario name, which
95 has to be defined in config directory (e.g. os-odl-nofeature-ha).
96 -S Storage dir for VM images, default is mcp/deploy/images
98 $(notify "[NOTE] sudo & virsh priviledges are needed for this script to run" 3)
102 $(notify "sudo $(basename "$0") \\
103 -b file:///home/jenkins/securedlab \\
105 -s os-odl-nofeature-ha" 2)
110 # END of usage description
111 ##############################################################################
113 ##############################################################################
114 # BEGIN of colored notification wrapper
117 tput setaf "${2:-1}" || true
118 echo -en "${1:-"[WARN] Unsupported opt arg: $3\\n"}"
122 # END of colored notification wrapper
123 ##############################################################################
125 ##############################################################################
126 # BEGIN of deployment clean-up
129 echo "Cleaning up deploy tmp directories"
132 # END of deployment clean-up
133 ##############################################################################
135 ##############################################################################
136 # BEGIN of variables to customize
138 CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
139 REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
140 DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd)
141 STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd)
142 RECLASS_CLUSTER_DIR=$(cd "${REPO_ROOT_PATH}/mcp/reclass/classes/cluster"; pwd)
143 DEPLOY_TYPE='baremetal'
144 OPNFV_BRIDGES=('pxebr' 'mgmt' 'internal' 'public')
145 URI_REGEXP='(file|https?|ftp)://.*'
146 BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/config"
148 # Customize deploy workflow
149 DRY_RUN=${DRY_RUN:-0}
150 USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
151 INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0}
152 NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
154 source "${DEPLOY_DIR}/globals.sh"
157 # END of variables to customize
158 ##############################################################################
160 ##############################################################################
165 while getopts "b:B:dDfFl:L:p:s:S:he" OPTION
169 BASE_CONFIG_URI=${OPTARG}
170 if [[ ! $BASE_CONFIG_URI =~ ${URI_REGEXP} ]]; then
171 notify "[ERROR] -b $BASE_CONFIG_URI - invalid URI\n"
179 OPT_BRIDGES=($OPTARG)
180 for bridge in "${OPT_BRIDGES[@]}"; do
181 if [ -n "${bridge}" ]; then
182 OPNFV_BRIDGES[${OPNFV_BRIDGE_IDX}]="${bridge}"
184 ((OPNFV_BRIDGE_IDX+=1))
195 ((USE_EXISTING_INFRA+=1))
198 INFRA_CREATION_ONLY=1
201 NO_DEPLOY_ENVIRONMENT=1
207 DEPLOY_LOG="${OPTARG}"
211 if [[ "${TARGET_POD}" =~ "virtual" ]]; then
212 DEPLOY_TYPE='virtual'
216 DEPLOY_SCENARIO=${OPTARG}
219 if [[ ${OPTARG} ]]; then
220 STORAGE_DIR="${OPTARG}"
228 notify "[ERROR] Arguments not according to new argument style\n"
234 if [[ "$(sudo whoami)" != 'root' ]]; then
235 notify "[ERROR] This script requires sudo rights\n" 1>&2
239 # Validate mandatory arguments are set
240 if [ -z "${TARGET_LAB}" ] || [ -z "${TARGET_POD}" ] || \
241 [ -z "${DEPLOY_SCENARIO}" ]; then
242 notify "[ERROR] At least one of the mandatory args is missing!\n" 1>&2
247 [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
249 # Enable the automatic exit trap
250 trap do_exit SIGINT SIGTERM EXIT
252 # Set no restrictive umask so that Jenkins can remove any residuals
257 pushd "${DEPLOY_DIR}" > /dev/null
258 # Prepare the deploy config files based on lab/pod information, deployment
261 # Install required packages
262 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \
263 git make rsync mkisofs curl virtinst cpu-checker qemu-kvm uuid-runtime \
265 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \
266 git make rsync genisoimage curl virt-install qemu-kvm util-linux \
269 # For baremetal, python is indirectly required for PDF parsing
270 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
271 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y \
272 python python-ipaddress python-jinja2
273 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken \
274 python python-ipaddress python-jinja2
277 # AArch64 VMs use AAVMF (guest UEFI)
278 if [ "$(uname -m)" = 'aarch64' ]; then
279 [ -n "$(command -v apt-get)" ] && sudo apt-get install -y qemu-efi
280 [ -n "$(command -v yum)" ] && sudo yum install -y --skip-broken AAVMF
283 if ! virsh list >/dev/null 2>&1; then
284 notify "[ERROR] This script requires hypervisor access\n" 1>&2
288 # Clone git submodules and apply our patches
289 make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
291 # Convert Pharos-compatible POD Descriptor File (PDF) to reclass model input
292 PHAROS_GEN_CONFIG_SCRIPT="./pharos/config/utils/generate_config.py"
293 PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2"
294 BASE_CONFIG_PDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}.yaml"
295 BASE_CONFIG_IDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/idf-${TARGET_POD}.yaml"
296 LOCAL_PDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_PDF}")"
297 LOCAL_IDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_IDF}")"
298 LOCAL_PDF_RECLASS="${STORAGE_DIR}/pod_config.yml"
299 if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
300 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
301 notify "[ERROR] Could not retrieve PDF (Pod Descriptor File)!\n" 1>&2
304 notify "[WARN] Could not retrieve PDF (Pod Descriptor File)!\n" 3
306 elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then
307 notify "[WARN] POD has no IDF (Installer Descriptor File)!\n" 3
308 elif ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
309 -j "${PHAROS_INSTALLER_ADAPTER}" > "${LOCAL_PDF_RECLASS}"; then
310 notify "[ERROR] Could not convert PDF to reclass model input!\n" 1>&2
314 # Check scenario file existence
315 SCENARIO_DIR="../config/scenario"
316 if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
317 notify "[WARN] ${DEPLOY_SCENARIO}.yaml not found!\n" 3
318 notify "[WARN] Setting simplest scenario (os-nosdn-nofeature-noha)\n" 3
319 DEPLOY_SCENARIO='os-nosdn-nofeature-noha'
320 if [ ! -f "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml" ]; then
321 notify "[ERROR] Scenario definition file is missing!\n" 1>&2
326 # Check defaults file existence
327 if [ ! -f "${SCENARIO_DIR}/defaults-$(uname -i).yaml" ]; then
328 notify "[ERROR] Scenario defaults file is missing!\n" 1>&2
332 # Get required infra deployment data
335 eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")"
336 eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml")"
337 eval "$(parse_yaml "${LOCAL_PDF_RECLASS}")"
338 [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
340 export CLUSTER_DOMAIN=${cluster_domain}
342 # Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
343 for node in "${virtual_nodes[@]}"; do
344 virtual_custom_ram="virtual_${node}_ram"
345 virtual_custom_vcpus="virtual_${node}_vcpus"
346 virtual_nodes_data+="${node},"
347 virtual_nodes_data+="${!virtual_custom_ram:-$virtual_default_ram},"
348 virtual_nodes_data+="${!virtual_custom_vcpus:-$virtual_default_vcpus}|"
350 virtual_nodes_data=${virtual_nodes_data%|}
352 # Expand reclass and virsh network templates
353 for tp in "${RECLASS_CLUSTER_DIR}/all-mcp-ocata-common/opnfv/"*.template \
357 EOF" 2> /dev/null > "${tp%.template}"
360 # Convert Pharos-compatible PDF to reclass network definitions
361 if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
362 find "${RECLASS_CLUSTER_DIR}/${CLUSTER_DOMAIN%.local}" \
363 "${RECLASS_CLUSTER_DIR}/${DEPLOY_TYPE}-mcp-ocata-common" \
364 -name '*.j2' | while read -r tp
366 if ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
367 -j "${tp}" > "${tp%.j2}"; then
368 notify "[ERROR] Could not convert PDF to reclass network defs!\n"
374 # Map PDF networks 'admin', 'mgmt', 'private' and 'public' to bridge names
375 BR_NAMES=('admin' 'mgmt' 'private' 'public')
377 "${parameters__param_opnfv_maas_pxe_address}" \
378 "${parameters__param_opnfv_infra_config_address}" \
379 "${parameters__param_opnfv_openstack_compute_node01_tenant_address}" \
380 "${parameters__param_opnfv_openstack_compute_node01_external_address}" \
382 for ((i = 0; i < ${#BR_NETS[@]}; i++)); do
383 br_jump=$(eval echo "\$parameters__param_opnfv_jump_bridge_${BR_NAMES[i]}")
384 if [ -n "${br_jump}" ] && [ "${br_jump}" != 'None' ] && \
385 [ -d "/sys/class/net/${br_jump}/bridge" ]; then
386 notify "[OK] Bridge found for '${BR_NAMES[i]}': ${br_jump}\n" 2
387 OPNFV_BRIDGES[${i}]="${br_jump}"
388 elif [ -n "${BR_NETS[i]}" ]; then
389 bridge=$(ip addr | awk "/${BR_NETS[i]%.*}./ {print \$NF; exit}")
390 if [ -n "${bridge}" ] && [ -d "/sys/class/net/${bridge}/bridge" ]; then
391 notify "[OK] Bridge found for net ${BR_NETS[i]%.*}.0: ${bridge}\n" 2
392 OPNFV_BRIDGES[${i}]="${bridge}"
396 notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}\n" 2
399 if [ ${DRY_RUN} -eq 1 ]; then
400 notify "[NOTE] Dry run, skipping all deployment tasks\n" 2 1>&2
402 elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then
403 notify "[NOTE] Use existing infra\n" 2 1>&2
407 prepare_vms "${base_image}" "${STORAGE_DIR}" "${virtual_nodes[@]}"
408 create_networks "${OPNFV_BRIDGES[@]}"
409 create_vms "${STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}"
410 update_mcpcontrol_network
411 start_vms "${virtual_nodes[@]}"
414 if [ ${USE_EXISTING_INFRA} -lt 2 ]; then
415 wait_for 5 "./salt.sh ${LOCAL_PDF_RECLASS}"
418 # Openstack cluster setup
420 if [ ${INFRA_CREATION_ONLY} -eq 1 ] || [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then
421 notify "[NOTE] Skip openstack cluster setup\n" 2
423 for state in "${cluster_states[@]}"; do
424 notify "[STATE] Applying state: ${state}\n" 2
425 # shellcheck disable=SC2086,2029
426 wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} \
427 sudo /root/fuel/mcp/config/states/${state}"
431 ./log.sh "${DEPLOY_LOG}"
437 ##############################################################################