X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=mcp%2Fscripts%2Flib.sh;h=34229df0826012e9d526e7622bc29ad7eb65d371;hb=b666bc50b2b8b1bb8cde5cdab280f0409bde4958;hp=8e9ba97a9f491a905950e612a84b684aadaf4723;hpb=20dac8e3fd7e3a4161b0ee0ea4f5eb4833a99dda;p=fuel.git diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh index 8e9ba97a9..34229df08 100644 --- a/mcp/scripts/lib.sh +++ b/mcp/scripts/lib.sh @@ -1,7 +1,7 @@ #!/bin/bash -e -# shellcheck disable=SC2155,SC1001,SC2015 +# shellcheck disable=SC2155,SC1001,SC2015,SC2128 ############################################################################## -# Copyright (c) 2017 Mirantis Inc., Enea AB and others. +# Copyright (c) 2018 Mirantis Inc., Enea AB and others. # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at @@ -38,10 +38,11 @@ function get_base_image { function __kernel_modules { # Load mandatory kernel modules: loop, nbd local image_dir=$1 - sudo modprobe loop + test -e /dev/loop-control || sudo modprobe loop if sudo modprobe nbd max_part=8 || sudo modprobe -f nbd max_part=8; then return 0 fi + if [ -e /dev/nbd0 ]; then return 0; fi # nbd might be inbuilt # CentOS (or RHEL family in general) do not provide 'nbd' out of the box echo "[WARN] 'nbd' kernel module cannot be loaded!" if [ ! -e /etc/redhat-release ]; then @@ -120,7 +121,7 @@ function mount_image { break fi done - OPNFV_LOOP_DEV=$(losetup -f) + OPNFV_LOOP_DEV=$(sudo losetup -f) OPNFV_MAP_DEV=/dev/mapper/$(basename "${OPNFV_NBD_DEV}")p1 export OPNFV_MNT_DIR OPNFV_LOOP_DEV [ -n "${OPNFV_NBD_DEV}" ] && [ -n "${OPNFV_LOOP_DEV}" ] || exit 1 @@ -213,7 +214,7 @@ function cleanup_mounts { fi fi if [ -n "${OPNFV_LOOP_DEV}" ] && \ - losetup "${OPNFV_LOOP_DEV}" 1>&2 > /dev/null; then + sudo losetup "${OPNFV_LOOP_DEV}" 1>&2 > /dev/null; then sudo losetup -d "${OPNFV_LOOP_DEV}" fi if [ -n "${OPNFV_NBD_DEV}" ]; then @@ -225,6 +226,7 @@ function cleanup_mounts { function cleanup_uefi { # Clean up Ubuntu boot entry if cfg01, kvm nodes online from previous deploy local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}" + ping -c 1 -w 1 "${SALT_MASTER}" || return 0 [ ! "$(hostname)" = 'cfg01' ] || cmd_str='eval' ${cmd_str} "sudo salt -C 'kvm* or cmp*' cmd.run \ \"which efibootmgr > /dev/null 2>&1 && \ @@ -241,7 +243,8 @@ function cleanup_vms { for node in $(virsh list --name --all | grep -P '\w{3}\d{2}'); do virsh domblklist "${node}" | awk '/^.da/ {print $2}' | \ xargs --no-run-if-empty -I{} sudo rm -f {} - virsh undefine "${node}" --remove-all-storage --nvram + virsh undefine "${node}" --remove-all-storage --nvram || \ + virsh undefine "${node}" --remove-all-storage done } @@ -284,15 +287,14 @@ function prepare_vms { # Create config ISO and resize OS disk image for each foundation node VM for node in "${vnodes[@]}"; do - if [[ "${node}" =~ ^(cfg01|mas01) ]]; then - user_data='user-data.mcp.sh' - else - user_data='user-data.admin.sh' - fi ./create-config-drive.sh -k "$(basename "${SSH_KEY}").pub" \ - -u "${user_data}" -h "${node}" "${image_dir}/mcp_${node}.iso" + -u 'user-data.sh' -h "${node}" "${image_dir}/mcp_${node}.iso" cp "${image_dir}/${image}" "${image_dir}/mcp_${node}.qcow2" qemu-img resize "${image_dir}/mcp_${node}.qcow2" 100G + # Prepare dedicated drive for cinder on cmp nodes + if [[ "${node}" =~ ^(cmp) ]]; then + qemu-img create "${image_dir}/mcp_${node}_storage.qcow2" 100G + fi done # VCP VMs base image specific changes @@ -313,12 +315,65 @@ function prepare_vms { fi } +function jumpserver_pkg_install { + local req_type=$1 + if [ -n "$(command -v apt-get)" ]; then + pkg_type='deb'; pkg_cmd='sudo apt-get install -y' + else + pkg_type='rpm'; pkg_cmd='sudo yum install -y --skip-broken' + fi + eval "$(parse_yaml "./requirements_${pkg_type}.yaml")" + for section in 'common' "$(uname -i)"; do + section_var="${req_type}_${section}[*]" + pkg_list+=" ${!section_var}" + done + # shellcheck disable=SC2086 + ${pkg_cmd} ${pkg_list} +} + +function jumpserver_check_requirements { + # shellcheck disable=SC2178 + local vnodes=$1; shift + local br=("$@") + local err_br_not_found='Linux bridge not found!' + local err_br_virsh_net='is a virtual network, Linux bridge expected!' + local warn_br_endpoint="Endpoints might be inaccessible from external hosts!" + # MaaS requires a Linux bridge for PXE/admin + if [[ "${vnodes}" =~ mas01 ]]; then + if ! brctl showmacs "${br[0]}" >/dev/null 2>&1; then + notify_e "[ERROR] PXE/admin (${br[0]}) ${err_br_not_found}" + fi + # Assume virsh network name matches bridge name (true if created by us) + if virsh net-info "${br[0]}" >/dev/null 2>&1; then + notify_e "[ERROR] ${br[0]} ${err_br_virsh_net}" + fi + fi + # If virtual nodes are present, public should be a Linux bridge + if [ "$(echo "${vnodes}" | wc -w)" -gt 2 ]; then + if ! brctl showmacs "${br[3]}" >/dev/null 2>&1; then + if [[ "${vnodes}" =~ mas01 ]]; then + # Baremetal nodes *require* a proper public network + notify_e "[ERROR] Public (${br[3]}) ${err_br_not_found}" + else + notify_n "[WARN] Public (${br[3]}) ${err_br_not_found}" 3 + notify_n "[WARN] ${warn_br_endpoint}" 3 + fi + fi + if virsh net-info "${br[3]}" >/dev/null 2>&1; then + if [[ "${vnodes}" =~ mas01 ]]; then + notify_e "[ERROR] ${br[3]} ${err_br_virsh_net}" + else + notify_n "[WARN] ${br[3]} ${err_br_virsh_net}" 3 + notify_n "[WARN] ${warn_br_endpoint}" 3 + fi + fi + fi +} + function create_networks { - local vnode_networks=("$@") + local all_vnode_networks=("mcpcontrol" "$@") # create required networks, including constant "mcpcontrol" - # FIXME(alav): since we renamed "pxe" to "mcpcontrol", we need to make sure - # we delete the old "pxe" virtual network, or it would cause IP conflicts. - for net in "pxe" "mcpcontrol" "${vnode_networks[@]}"; do + for net in "${all_vnode_networks[@]}"; do if virsh net-info "${net}" >/dev/null 2>&1; then virsh net-destroy "${net}" || true virsh net-undefine "${net}" @@ -331,12 +386,21 @@ function create_networks { virsh net-start "${net}" fi done + # create veth pairs for relevant networks (mcpcontrol, pxebr, mgmt) + for i in $(seq 0 2 4); do + sudo ip link del "veth_mcp$i" || true + sudo ip link add "veth_mcp$i" type veth peer name "veth_mcp$((i+1))" + sudo ip link set "veth_mcp$i" up mtu 9000 + sudo ip link set "veth_mcp$((i+1))" up mtu 9000 + sudo brctl addif "${all_vnode_networks[$((i/2))]}" "veth_mcp$i" + done } function create_vms { local image_dir=$1; shift # vnode data should be serialized with the following format: - # ',,|,,[...]' + # ,,[,,,[,,, + # ,,,]]|,...' IFS='|' read -r -a vnodes <<< "$1"; shift # AArch64: prepare arch specific arguments @@ -348,8 +412,22 @@ function create_vms { # create vms with specified options for serialized_vnode_data in "${vnodes[@]}"; do + if [ -z "${serialized_vnode_data}" ]; then continue; fi IFS=',' read -r -a vnode_data <<< "${serialized_vnode_data}" + # prepare VM CPU model, count, topology (optional), NUMA cells (optional, requires topo) + local virt_cpu_args=' --cpu host-passthrough' + local idx=6 # cell0.name index in serialized data + while [ -n "${vnode_data[${idx}]}" ]; do + virt_cpu_args+=",${vnode_data[${idx}]}.memory=${vnode_data[$((idx + 1))]}" + virt_cpu_args+=",${vnode_data[${idx}]}.cpus=${vnode_data[$((idx + 2))]}" + idx=$((idx + 3)) + done + virt_cpu_args+=" --vcpus vcpus=${vnode_data[2]}" + if [ -n "${vnode_data[5]}" ]; then + virt_cpu_args+=",sockets=${vnode_data[3]},cores=${vnode_data[4]},threads=${vnode_data[5]}" + fi + # prepare network args local vnode_networks=("$@") if [[ "${vnode_data[0]}" =~ ^(cfg01|mas01) ]]; then @@ -363,11 +441,20 @@ function create_vms { net_args="${net_args} --network bridge=${net},model=virtio" done + # dedicated storage drive for cinder on cmp nodes + virt_extra_storage= + if [[ "${vnode_data[0]}" =~ ^(cmp) ]]; then + virt_extra_storage="--disk path=${image_dir}/mcp_${vnode_data[0]}_storage.qcow2,format=qcow2,bus=virtio,cache=none,io=native" + fi + + [ ! -e "${image_dir}/virt-manager" ] || VIRT_PREFIX="${image_dir}/virt-manager/" # shellcheck disable=SC2086 - virt-install --name "${vnode_data[0]}" \ - --ram "${vnode_data[1]}" --vcpus "${vnode_data[2]}" \ - --cpu host-passthrough --accelerate ${net_args} \ + ${VIRT_PREFIX}virt-install --name "${vnode_data[0]}" \ + ${virt_cpu_args} --accelerate \ + ${net_args} \ + --ram "${vnode_data[1]}" \ --disk path="${image_dir}/mcp_${vnode_data[0]}.qcow2",format=qcow2,bus=virtio,cache=none,io=native \ + ${virt_extra_storage} \ --os-type linux --os-variant none \ --boot hd --vnc --console pty --autostart --noreboot \ --disk path="${image_dir}/mcp_${vnode_data[0]}.iso",device=cdrom \ @@ -376,14 +463,21 @@ function create_vms { done } -function update_mcpcontrol_network { - # set static ip address for salt master node, MaaS node - local cmac=$(virsh domiflist cfg01 2>&1| awk '/mcpcontrol/ {print $5; exit}') - local amac=$(virsh domiflist mas01 2>&1| awk '/mcpcontrol/ {print $5; exit}') - virsh net-update "mcpcontrol" add ip-dhcp-host \ - "" --live --config - [ -z "${amac}" ] || virsh net-update "mcpcontrol" add ip-dhcp-host \ - "" --live --config +function reset_vms { + local vnodes=("$@") + local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}" + + # reset non-infrastructure vms, wait for them to come back online + for node in "${vnodes[@]}"; do + if [[ ! "${node}" =~ (cfg01|mas01) ]]; then + virsh reset "${node}" + fi + done + for node in "${vnodes[@]}"; do + if [[ ! "${node}" =~ (cfg01|mas01) ]]; then + wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all" + fi + done } function start_vms { @@ -396,6 +490,28 @@ function start_vms { done } +function prepare_containers { + local image_dir=$1 + [ -n "${image_dir}" ] || exit 1 + [ -n "${MCP_REPO_ROOT_PATH}" ] || exit 1 + docker-compose --version > /dev/null 2>&1 || COMPOSE_PREFIX="${image_dir}/" + + "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml down + if [ ! "${MCP_DOCKER_TAG}" = 'verify' ]; then + "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml pull + fi + sudo rm -rf "${image_dir}/"{salt,hosts,pki} "${image_dir}/nodes/"* + mkdir -p "${image_dir}/salt/"{master.d,minion.d} + touch "${image_dir}/hosts" +} + +function start_containers { + local image_dir=$1 + [ -n "${image_dir}" ] || exit 1 + docker-compose --version > /dev/null 2>&1 || COMPOSE_PREFIX="${image_dir}/" + "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml up -d +} + function check_connection { local total_attempts=60 local sleep_time=5 @@ -462,6 +578,15 @@ function wait_for { ) } +function do_udev_cfg { + local _conf='/etc/udev/rules.d/99-opnfv-fuel-vnet-mtu.rules' + # http://linuxaleph.blogspot.com/2013/01/how-to-network-jumbo-frames-to-kvm-guest.html + echo 'SUBSYSTEM=="net", ACTION=="add|change", KERNEL=="vnet*", RUN+="/bin/sh -c '"'/bin/sleep 1; /sbin/ip link set %k mtu 9000'\"" |& sudo tee "${_conf}" + echo 'SUBSYSTEM=="net", ACTION=="add|change", KERNEL=="*-nic", RUN+="/bin/sh -c '"'/bin/sleep 1; /sbin/ip link set %k mtu 9000'\"" |& sudo tee -a "${_conf}" + sudo udevadm control --reload + sudo udevadm trigger +} + function do_sysctl_cfg { local _conf='/etc/sysctl.d/99-opnfv-fuel-bridge.conf' # https://wiki.libvirt.org/page/Net.bridge.bridge-nf-call_and_sysctl.conf @@ -481,53 +606,46 @@ function get_nova_compute_pillar_data { fi } -function do_templates() { - local git_repo_root=$1; shift - local image_dir=$1; shift - local target_lab=$1; shift - local target_pod=$1; shift - local lab_config_uri=$1; shift - local scenario_dir=${1:-} - - RECLASS_CLUSTER_DIR=$(cd "${git_repo_root}/mcp/reclass/classes/cluster"; pwd) - PHAROS_GEN_CFG="./pharos/config/utils/generate_config.py" - PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2" - PHAROS_INSTALLER_NETMAP="$(dirname "${PHAROS_INSTALLER_ADAPTER}")/net_map.j2" - BASE_CONFIG_PDF="${lab_config_uri}/labs/${target_lab}/${target_pod}.yaml" - BASE_CONFIG_IDF="${lab_config_uri}/labs/${target_lab}/idf-${target_pod}.yaml" - LOCAL_PDF="${image_dir}/$(basename "${BASE_CONFIG_PDF}")" - LOCAL_IDF="${image_dir}/$(basename "${BASE_CONFIG_IDF}")" - - # Two-stage expansion, first stage handles pod_config and scenarios only - if [ -n "${scenario_dir}" ]; then - # Make sample PDF/IDF available via default lab-config (pharos submodule) - ln -sf "$(readlink -f "../config/labs/local")" "./pharos/labs/" - - # Expand scenario file and main reclass input (pod_config.yaml) based on PDF - if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then - notify_e "[ERROR] Could not retrieve PDF (Pod Descriptor File)!" - elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then - notify_e "[ERROR] Could not retrieve IDF (Installer Descriptor File)!" - elif ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" \ - -j "${PHAROS_INSTALLER_ADAPTER}" > "${image_dir}/pod_config.yml"; then - notify_e "[ERROR] Could not convert PDF+IDF to reclass model input!" - fi - template_dirs="${scenario_dir}" - template_err_str='Could not convert j2 scenario definitions!' +function docker_install { + local image_dir=$1 + # Mininum effort attempt at installing Docker if missing + if ! docker --version; then + curl -fsSL https://get.docker.com -o get-docker.sh + sudo sh get-docker.sh + rm get-docker.sh + # On RHEL distros, the Docker service should be explicitly started + sudo systemctl start docker else - # Expand reclass and virsh network templates based on PDF + IDF - printenv | \ - awk '/^(SALT|MCP|MAAS|CLUSTER).*=/ { gsub(/=/,": "); print }' >> "${LOCAL_PDF}" - template_dirs="${RECLASS_CLUSTER_DIR} virsh_net ./*j2" - template_err_str='Could not convert PDF to network definitions!' + DOCKER_VER=$(docker version --format '{{.Server.Version}}') + if [ "${DOCKER_VER%%.*}" -lt 2 ]; then + notify_e "[ERROR] Docker version ${DOCKER_VER} is too old, please upgrade it." + fi fi - # shellcheck disable=SC2086 - find ${template_dirs} -name '*.j2' | while read -r tp; do - # Jinja2 import does not allow '..' directory traversal - ln -sf "$(readlink -f "${PHAROS_INSTALLER_NETMAP}")" "$(dirname "${tp}")" - if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" -j "${tp}" > "${tp%.j2}"; then - notify_e "[ERROR] ${template_err_str}" + # Distro-provided docker-compose might be simply broken (Ubuntu 16.04, CentOS 7) + if ! docker-compose --version > /dev/null 2>&1; then + COMPOSE_BIN="${image_dir}/docker-compose" + COMPOSE_VERSION='1.22.0' + notify_n "[WARN] Using docker-compose ${COMPOSE_VERSION} in ${COMPOSE_BIN}" 3 + if [ ! -e "${COMPOSE_BIN}" ]; then + COMPOSE_URL="https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}" + sudo curl -L "${COMPOSE_URL}/docker-compose-$(uname -s)-$(uname -m)" -o "${COMPOSE_BIN}" + sudo chmod +x "${COMPOSE_BIN}" fi - rm -f "$(dirname "${tp}")/$(basename "${PHAROS_INSTALLER_NETMAP}")" - done + fi +} + +function virtinst_install { + local image_dir=$1 + VIRT_VER=$(virt-install --version 2>&1) + if [ "${VIRT_VER//./}" -lt 140 ]; then + VIRT_TGZ="${image_dir}/virt-manager.tar.gz" + VIRT_VER='1.4.3' + VIRT_URL="https://github.com/virt-manager/virt-manager/archive/v${VIRT_VER}.tar.gz" + notify_n "[WARN] Using virt-install ${VIRT_VER} from ${VIRT_TGZ}" 3 + if [ ! -e "${VIRT_TGZ}" ]; then + curl -L "${VIRT_URL}" -o "${VIRT_TGZ}" + mkdir -p "${image_dir}/virt-manager" + tar xzf "${VIRT_TGZ}" -C "${image_dir}/virt-manager" --strip-components=1 + fi + fi }