X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=mcp%2Fscripts%2Flib.sh;h=5f1275dc5fd7474a326922cd1d96eac74d08576f;hb=f870328e38c59375b2b19b1667a4baba0ce31bdb;hp=20d466faadc179d46385450308106d0e2842d170;hpb=6771512ac2a9402e61976140d1f0cef6d8944cf9;p=fuel.git diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh index 20d466faa..5f1275dc5 100644 --- a/mcp/scripts/lib.sh +++ b/mcp/scripts/lib.sh @@ -1,7 +1,7 @@ #!/bin/bash -e -# shellcheck disable=SC2155,SC1001,SC2015 +# shellcheck disable=SC2155,SC1001,SC2015,SC2128 ############################################################################## -# Copyright (c) 2017 Mirantis Inc., Enea AB and others. +# Copyright (c) 2018 Mirantis Inc., Enea AB and others. # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at @@ -38,10 +38,11 @@ function get_base_image { function __kernel_modules { # Load mandatory kernel modules: loop, nbd local image_dir=$1 - sudo modprobe loop + test -e /dev/loop-control || sudo modprobe loop if sudo modprobe nbd max_part=8 || sudo modprobe -f nbd max_part=8; then return 0 fi + if [ -e /dev/nbd0 ]; then return 0; fi # nbd might be inbuilt # CentOS (or RHEL family in general) do not provide 'nbd' out of the box echo "[WARN] 'nbd' kernel module cannot be loaded!" if [ ! -e /etc/redhat-release ]; then @@ -120,7 +121,7 @@ function mount_image { break fi done - OPNFV_LOOP_DEV=$(losetup -f) + OPNFV_LOOP_DEV=$(sudo losetup -f) OPNFV_MAP_DEV=/dev/mapper/$(basename "${OPNFV_NBD_DEV}")p1 export OPNFV_MNT_DIR OPNFV_LOOP_DEV [ -n "${OPNFV_NBD_DEV}" ] && [ -n "${OPNFV_LOOP_DEV}" ] || exit 1 @@ -213,7 +214,7 @@ function cleanup_mounts { fi fi if [ -n "${OPNFV_LOOP_DEV}" ] && \ - losetup "${OPNFV_LOOP_DEV}" 1>&2 > /dev/null; then + sudo losetup "${OPNFV_LOOP_DEV}" 1>&2 > /dev/null; then sudo losetup -d "${OPNFV_LOOP_DEV}" fi if [ -n "${OPNFV_NBD_DEV}" ]; then @@ -225,6 +226,7 @@ function cleanup_mounts { function cleanup_uefi { # Clean up Ubuntu boot entry if cfg01, kvm nodes online from previous deploy local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}" + ping -c 1 -w 1 "${SALT_MASTER}" || return 0 [ ! "$(hostname)" = 'cfg01' ] || cmd_str='eval' ${cmd_str} "sudo salt -C 'kvm* or cmp*' cmd.run \ \"which efibootmgr > /dev/null 2>&1 && \ @@ -241,7 +243,8 @@ function cleanup_vms { for node in $(virsh list --name --all | grep -P '\w{3}\d{2}'); do virsh domblklist "${node}" | awk '/^.da/ {print $2}' | \ xargs --no-run-if-empty -I{} sudo rm -f {} - virsh undefine "${node}" --remove-all-storage --nvram + virsh undefine "${node}" --remove-all-storage --nvram || \ + virsh undefine "${node}" --remove-all-storage done } @@ -293,6 +296,10 @@ function prepare_vms { -u "${user_data}" -h "${node}" "${image_dir}/mcp_${node}.iso" cp "${image_dir}/${image}" "${image_dir}/mcp_${node}.qcow2" qemu-img resize "${image_dir}/mcp_${node}.qcow2" 100G + # Prepare dedicated drive for cinder on cmp nodes + if [[ "${node}" =~ ^(cmp) ]]; then + qemu-img create "${image_dir}/mcp_${node}_storage.qcow2" 100G + fi done # VCP VMs base image specific changes @@ -313,7 +320,24 @@ function prepare_vms { fi } +function jumpserver_pkg_install { + local req_type=$1 + if [ -n "$(command -v apt-get)" ]; then + pkg_type='deb'; pkg_cmd='sudo apt-get install -y' + else + pkg_type='rpm'; pkg_cmd='sudo yum install -y --skip-broken' + fi + eval "$(parse_yaml "./requirements_${pkg_type}.yaml")" + for section in 'common' "$(uname -i)"; do + section_var="${req_type}_${section}[*]" + pkg_list+=" ${!section_var}" + done + # shellcheck disable=SC2086 + ${pkg_cmd} ${pkg_list} +} + function jumpserver_check_requirements { + # shellcheck disable=SC2178 local vnodes=$1; shift local br=("$@") local err_br_not_found='Linux bridge not found!' @@ -384,6 +408,7 @@ function create_vms { # create vms with specified options for serialized_vnode_data in "${vnodes[@]}"; do + if [ -z "${serialized_vnode_data}" ]; then continue; fi IFS=',' read -r -a vnode_data <<< "${serialized_vnode_data}" # prepare network args @@ -399,11 +424,18 @@ function create_vms { net_args="${net_args} --network bridge=${net},model=virtio" done + # dedicated storage drive for cinder on cmp nodes + virt_extra_storage= + if [[ "${vnode_data[0]}" =~ ^(cmp) ]]; then + virt_extra_storage="--disk path=${image_dir}/mcp_${vnode_data[0]}_storage.qcow2,format=qcow2,bus=virtio,cache=none,io=native" + fi + # shellcheck disable=SC2086 virt-install --name "${vnode_data[0]}" \ --ram "${vnode_data[1]}" --vcpus "${vnode_data[2]}" \ --cpu host-passthrough --accelerate ${net_args} \ --disk path="${image_dir}/mcp_${vnode_data[0]}.qcow2",format=qcow2,bus=virtio,cache=none,io=native \ + ${virt_extra_storage} \ --os-type linux --os-variant none \ --boot hd --vnc --console pty --autostart --noreboot \ --disk path="${image_dir}/mcp_${vnode_data[0]}.iso",device=cdrom \ @@ -422,6 +454,23 @@ function update_mcpcontrol_network { "" --live --config } +function reset_vms { + local vnodes=("$@") + local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}" + + # reset non-infrastructure vms, wait for them to come back online + for node in "${vnodes[@]}"; do + if [[ ! "${node}" =~ (cfg01|mas01) ]]; then + virsh reset "${node}" + fi + done + for node in "${vnodes[@]}"; do + if [[ ! "${node}" =~ (cfg01|mas01) ]]; then + wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all" + fi + done +} + function start_vms { local vnodes=("$@") @@ -498,6 +547,13 @@ function wait_for { ) } +function do_udev_cfg { + local _conf='/etc/udev/rules.d/99-opnfv-fuel-vnet-mtu.rules' + # http://linuxaleph.blogspot.com/2013/01/how-to-network-jumbo-frames-to-kvm-guest.html + echo 'SUBSYSTEM=="net", ACTION=="add", KERNEL=="vnet*", RUN+="/sbin/ip link set mtu 9000 dev '"'"%k"'"'"' |& sudo tee "${_conf}" + sudo udevadm control --reload || true +} + function do_sysctl_cfg { local _conf='/etc/sysctl.d/99-opnfv-fuel-bridge.conf' # https://wiki.libvirt.org/page/Net.bridge.bridge-nf-call_and_sysctl.conf @@ -517,53 +573,13 @@ function get_nova_compute_pillar_data { fi } -function do_templates() { - local git_repo_root=$1; shift - local image_dir=$1; shift - local target_lab=$1; shift - local target_pod=$1; shift - local lab_config_uri=$1; shift - local scenario_dir=${1:-} - - RECLASS_CLUSTER_DIR=$(cd "${git_repo_root}/mcp/reclass/classes/cluster"; pwd) - PHAROS_GEN_CFG="./pharos/config/utils/generate_config.py" - PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2" - PHAROS_INSTALLER_NETMAP="$(dirname "${PHAROS_INSTALLER_ADAPTER}")/net_map.j2" - BASE_CONFIG_PDF="${lab_config_uri}/labs/${target_lab}/${target_pod}.yaml" - BASE_CONFIG_IDF="${lab_config_uri}/labs/${target_lab}/idf-${target_pod}.yaml" - LOCAL_PDF="${image_dir}/$(basename "${BASE_CONFIG_PDF}")" - LOCAL_IDF="${image_dir}/$(basename "${BASE_CONFIG_IDF}")" - - # Two-stage expansion, first stage handles pod_config and scenarios only - if [ -n "${scenario_dir}" ]; then - # Make sample PDF/IDF available via default lab-config (pharos submodule) - ln -sf "$(readlink -f "../config/labs/local")" "./pharos/labs/" - - # Expand scenario file and main reclass input (pod_config.yaml) based on PDF - if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then - notify_e "[ERROR] Could not retrieve PDF (Pod Descriptor File)!" - elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then - notify_e "[ERROR] Could not retrieve IDF (Installer Descriptor File)!" - elif ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" \ - -j "${PHAROS_INSTALLER_ADAPTER}" > "${image_dir}/pod_config.yml"; then - notify_e "[ERROR] Could not convert PDF+IDF to reclass model input!" - fi - template_dirs="${scenario_dir}" - template_err_str='Could not convert j2 scenario definitions!' - else - # Expand reclass and virsh network templates based on PDF + IDF - printenv | \ - awk '/^(SALT|MCP|MAAS|CLUSTER).*=/ { gsub(/=/,": "); print }' >> "${LOCAL_PDF}" - template_dirs="${RECLASS_CLUSTER_DIR} virsh_net ./*j2" - template_err_str='Could not convert PDF to network definitions!' +function docker_install { + # Mininum effort attempt at installing Docker if missing + if ! which docker; then + curl -fsSL https://get.docker.com -o get-docker.sh + sudo sh get-docker.sh + rm get-docker.sh + # On RHEL distros, the Docker service should be explicitly started + sudo systemctl start docker fi - # shellcheck disable=SC2086 - find ${template_dirs} -name '*.j2' | while read -r tp; do - # Jinja2 import does not allow '..' directory traversal - ln -sf "$(readlink -f "${PHAROS_INSTALLER_NETMAP}")" "$(dirname "${tp}")" - if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" -j "${tp}" > "${tp%.j2}"; then - notify_e "[ERROR] ${template_err_str}" - fi - rm -f "$(dirname "${tp}")/$(basename "${PHAROS_INSTALLER_NETMAP}")" - done }