X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=ci%2Fdeploy.sh;h=41ac60237d9add9313862f6792f8c2020e378104;hb=dae5c2c5e677e4af8e56370c10316219a539321e;hp=a5fec4d449f845c43b1c31e407e2807423e3415f;hpb=a21457cc96a640700736189612261e9bcae3d825;p=fuel.git diff --git a/ci/deploy.sh b/ci/deploy.sh index a5fec4d44..41ac60237 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -38,7 +38,7 @@ $(notify "USAGE:" 2) $(basename "$0") -l lab-name -p pod-name -s deploy-scenario \\ [-b Lab Config Base URI] \\ [-S storage-dir] [-L /path/to/log/file.tar.gz] \\ - [-f[f]] [-F] [-e | -E[E]] [-d] [-D] [-N] + [-f] [-F] [-e | -E[E]] [-d] [-D] [-N] [-m] $(notify "OPTIONS:" 2) -b Base-uri for the stack-configuration structure @@ -46,15 +46,16 @@ $(notify "OPTIONS:" 2) -D Debug logging -e Do not launch environment deployment -E Remove existing VCP VMs (use twice to redeploy baremetal nodes) - -f Deploy on existing Salt master (use twice to also skip config sync) - -F Do only create a Salt master + -f Deploy on existing Salt master (use twice or more to skip states) + -F Same as -e, do not launch environment deployment (legacy option) -h Print this message and exit -l Lab-name -p Pod-name -P Skip installation of package dependencies -s Deploy-scenario short-name - -S Storage dir for VM images + -S Storage dir for VM images and other deploy artifacts -L Deployment log path and file name + -m Use single socket CPU compute nodes (only affects virtual computes) -N Experimental: Do not virtualize control plane (novcp) $(notify_i "Description:" 2) @@ -84,13 +85,20 @@ $(notify_i "Input parameters to the build script are:" 2) Only applicable for baremetal deploys. -f Deploy on existing Salt master. It will skip infrastructure VM creation, but it will still sync reclass configuration from current repo to Salt - Master node. If specified twice (e.g. -f -f), config sync will also be - skipped. --F Do only create a Salt master + Master node. + Each additional use skips one more state file. For example, -fff would + skip the first 3 state files (e.g. virtual_init, maas, baremetal_init). +-F Same as -e, do not launch environment deployment (legacy option) -h Print this message and exit -L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz -l Lab name as defined in the configuration directory, e.g. lf + For the sample configuration in <./mcp/config>, lab name is 'local'. -p POD name as defined in the configuration directory, e.g. pod2 + For the sample configuration in <./mcp/config>, POD name is 'virtual1' + for virtual deployments or 'pod1' for baremetal (based on lf-pod2). +-m Use single socket compute nodes. Instead of using default NUMA-enabled + topology for virtual compute nodes created via libvirt, configure a + single guest CPU socket. -N Experimental: Instead of virtualizing the control plane (VCP), deploy control plane directly on baremetal nodes -P Skip installing dependency distro packages on current host @@ -99,7 +107,9 @@ $(notify_i "Input parameters to the build script are:" 2) Note that without the required packages, deploy will fail. -s Deployment-scenario, this points to a short deployment scenario name, which has to be defined in config directory (e.g. os-odl-nofeature-ha). --S Storage dir for VM images, default is mcp/deploy/images +-S Storage dir for VM images, default is /var/lib/opnfv/tmpdir + It is recommended to store the deploy artifacts on a fast disk, outside of + the current git repository (so clean operations won't erase it). $(notify_i "[NOTE] sudo & virsh priviledges are needed for this script to run" 3) @@ -108,7 +118,8 @@ Example: $(notify_i "sudo $(basename "$0") \\ -b file:///home/jenkins/securedlab \\ -l lf -p pod2 \\ - -s os-odl-nofeature-ha" 2) + -s os-odl-nofeature-ha \\ + -S /home/jenkins/tmpdir" 2) EOF } @@ -120,20 +131,21 @@ EOF # BEGIN of variables to customize # CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x -REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..") -DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd) -STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd) +MCP_REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..") +DEPLOY_DIR=$(cd "${MCP_REPO_ROOT_PATH}/mcp/scripts"; pwd) +MCP_STORAGE_DIR='/var/lib/opnfv/tmpdir' URI_REGEXP='(file|https?|ftp)://.*' -BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/scripts/pharos" +BASE_CONFIG_URI="file://${MCP_REPO_ROOT_PATH}/mcp/scripts/pharos" # Customize deploy workflow DRY_RUN=${DRY_RUN:-0} USE_EXISTING_PKGS=${USE_EXISTING_PKGS:-0} USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0} -INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0} NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0} ERASE_ENV=${ERASE_ENV:-0} MCP_VCP=${MCP_VCP:-1} +MCP_DOCKER_TAG=${MCP_DOCKER_TAG:-latest} +MCP_CMP_SS=${MCP_CMP_SS:-0} source "${DEPLOY_DIR}/globals.sh" source "${DEPLOY_DIR}/lib.sh" @@ -167,10 +179,7 @@ do f) ((USE_EXISTING_INFRA+=1)) ;; - F) - INFRA_CREATION_ONLY=1 - ;; - e) + F|e) NO_DEPLOY_ENVIRONMENT=1 ;; E) @@ -182,16 +191,14 @@ do L) DEPLOY_LOG="${OPTARG}" ;; + m) + MCP_CMP_SS=1 + ;; N) MCP_VCP=0 ;; p) TARGET_POD=${OPTARG} - if [[ "${TARGET_POD}" =~ virtual ]]; then - # All vPODs will use 'local-virtual1' PDF/IDF for now - TARGET_LAB='local' - TARGET_POD='virtual1' - fi ;; P) USE_EXISTING_PKGS=1 @@ -201,7 +208,7 @@ do ;; S) if [[ ${OPTARG} ]]; then - STORAGE_DIR="${OPTARG}" + MCP_STORAGE_DIR="${OPTARG}" fi ;; h) @@ -238,11 +245,15 @@ pushd "${DEPLOY_DIR}" > /dev/null # scenario, etc. # Install required packages on jump server +sudo mkdir -p "${MCP_STORAGE_DIR}" +sudo chown "${USER}:${USER}" "${MCP_STORAGE_DIR}" if [ ${USE_EXISTING_PKGS} -eq 1 ]; then notify "[NOTE] Skipping distro pkg installation" 2 else notify "[NOTE] Installing required distro pkgs" 2 - jumpserver_pkg_install + jumpserver_pkg_install 'deploy' + docker_install "${MCP_STORAGE_DIR}" + virtinst_install "${MCP_STORAGE_DIR}" fi if ! virsh list >/dev/null 2>&1; then @@ -253,7 +264,7 @@ fi ./sysinfo_print.sh # Clone git submodules and apply our patches -make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import +make -C "${MCP_REPO_ROOT_PATH}/mcp/patches" deepclean patches-import # Check scenario file existence SCENARIO_DIR="$(readlink -f "../config/scenario")" @@ -267,13 +278,14 @@ generate_ssh_key export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")" # Expand jinja2 templates based on PDF data and env vars -export MCP_VCP MCP_JUMP_ARCH=$(uname -i) -do_templates_scenario "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \ - "${BASE_CONFIG_URI}" "${SCENARIO_DIR}" -do_templates_cluster "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \ - "${REPO_ROOT_PATH}" \ - "${SCENARIO_DIR}/defaults.yaml" \ +export MCP_REPO_ROOT_PATH MCP_VCP MCP_STORAGE_DIR MCP_DOCKER_TAG MCP_CMP_SS \ + MCP_JUMP_ARCH=$(uname -i) MCP_DEPLOY_SCENARIO="${DEPLOY_SCENARIO}" +do_templates_scenario "${MCP_STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \ + "${BASE_CONFIG_URI}" "${SCENARIO_DIR}" \ "${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml" +do_templates_cluster "${MCP_STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \ + "${MCP_REPO_ROOT_PATH}" \ + "${SCENARIO_DIR}/defaults.yaml" # Determine additional data (e.g. jump bridge names) based on XDF source "${DEPLOY_DIR}/xdf_data.sh" @@ -287,33 +299,43 @@ if [ ${DRY_RUN} -eq 1 ]; then notify "[NOTE] Dry run, skipping all deployment tasks" 2 exit 0 elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then - notify "[NOTE] Use existing infra" 2 - check_connection + notify "[NOTE] Use existing infra: skip first ${USE_EXISTING_INFRA} states" 2 + notify "[STATE] Skipping: ${cluster_states[*]::${USE_EXISTING_INFRA}}" 2 else - prepare_vms "${base_image}" "${STORAGE_DIR}" "${virtual_repos_pkgs}" \ + prepare_vms "${base_image}" "${MCP_STORAGE_DIR}" "${virtual_repos_pkgs}" \ "${virtual_nodes[@]}" create_networks "${OPNFV_BRIDGES[@]}" do_sysctl_cfg - create_vms "${STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}" + do_udev_cfg + create_vms "${MCP_STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}" update_mcpcontrol_network start_vms "${virtual_nodes[@]}" - check_connection -fi -if [ ${USE_EXISTING_INFRA} -lt 2 ]; then - wait_for 5 "./salt.sh ${STORAGE_DIR}/pod_config.yml ${virtual_nodes[*]}" + + # https://github.com/docker/libnetwork/issues/1743 + # rm -f /var/lib/docker/network/files/local-kv.db + sudo systemctl restart docker + prepare_containers "${MCP_STORAGE_DIR}" fi +start_containers "${MCP_STORAGE_DIR}" +check_connection + # Openstack cluster setup set +x -if [ ${INFRA_CREATION_ONLY} -eq 1 ] || [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then +if [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then notify "[NOTE] Skip openstack cluster setup" 2 else - for state in "${cluster_states[@]}"; do + for state in "${cluster_states[@]:${USE_EXISTING_INFRA}}"; do notify "[STATE] Applying state: ${state}" 2 # shellcheck disable=SC2086,2029 wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \ CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \ /root/fuel/mcp/config/states/${state}" + if [ "${state}" = 'maas' ]; then + # For hybrid PODs (virtual + baremetal nodes), the virtual nodes + # should be reset to force a DHCP request from MaaS DHCP + reset_vms "${virtual_nodes[@]}" + fi done fi