X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=mcp%2Fscripts%2Flib_jump_deploy.sh;h=ade1a7487784b3cbaa2df37827da6b38e21aafaf;hb=36e16f10055c74b46bbc25b85f9adf60b546b136;hp=3c3256aa05a67b9d008150aa4e735916a1ff8dc2;hpb=25bf7306d1d6f66a034c1a60037c0f9b7342c0ac;p=fuel.git diff --git a/mcp/scripts/lib_jump_deploy.sh b/mcp/scripts/lib_jump_deploy.sh index 3c3256aa0..ade1a7487 100644 --- a/mcp/scripts/lib_jump_deploy.sh +++ b/mcp/scripts/lib_jump_deploy.sh @@ -48,17 +48,13 @@ function __kernel_modules { fi # Best-effort attempt at building a non-maintaned kernel module - local __baseurl - local __subdir + local __baseurl='http://vault.centos.org/centos' + local __subdir='Source/SPackages' local __uname_r=$(uname -r) local __uname_m=$(uname -m) if [ "${__uname_m}" = 'x86_64' ]; then - __baseurl='http://vault.centos.org/centos' - __subdir='Source/SPackages' __srpm="kernel-${__uname_r%.${__uname_m}}.src.rpm" else - __baseurl='http://vault.centos.org/altarch' - __subdir="Source/${__uname_m}/Source/SPackages" # NOTE: fmt varies across releases (e.g. kernel-alt-4.11.0-44.el7a.src.rpm) __srpm="kernel-alt-${__uname_r%.${__uname_m}}.src.rpm" fi @@ -125,13 +121,16 @@ function __mount_image { sudo qemu-nbd --connect="${OPNFV_NBD_DEV}" --aio=native --cache=none \ "${image_dir}/${image}" sudo kpartx -av "${OPNFV_NBD_DEV}" - sleep 5 # /dev/nbdNp1 takes some time to come up # Hardcode partition index to 1, unlikely to change for Ubuntu UCA image + sudo partx -uvn 1:1 "${OPNFV_NBD_DEV}" if sudo growpart "${OPNFV_NBD_DEV}" 1; then sudo kpartx -u "${OPNFV_NBD_DEV}" sudo e2fsck -pf "${OPNFV_MAP_DEV}" sudo resize2fs "${OPNFV_MAP_DEV}" + else + sleep 5 # /dev/nbdNp1 takes some time to come up fi + sudo partx -d "${OPNFV_NBD_DEV}" # grub-update does not like /dev/nbd*, so use a loop device to work around it sudo losetup "${OPNFV_LOOP_DEV}" "${OPNFV_MAP_DEV}" mkdir -p "${OPNFV_MNT_DIR}" @@ -198,7 +197,7 @@ function __apt_repos_pkgs_image { function __cleanup_vms { # clean up existing nodes for node in $(${VIRSH} list --name | grep -P '\w{3}\d{2}'); do - ${VIRSH} destroy "${node}" + ${VIRSH} destroy "${node}" 2>/dev/null || true done for node in $(${VIRSH} list --name --all | grep -P '\w{3}\d{2}'); do ${VIRSH} domblklist "${node}" | awk '/^.da/ {print $2}' | \ @@ -220,9 +219,6 @@ function prepare_vms { local image=base_image_opnfv_fuel.img local vcp_image=${image%.*}_vcp.img local _o=${base_image/*\/} - local _h=$(echo "${repos_pkgs_str}.$(md5sum "${image_dir}/${_o}")" | \ - md5sum | cut -c -8) - local _tmp [ -n "${image_dir}" ] || exit 1 cleanup_uefi @@ -230,8 +226,10 @@ function prepare_vms { __get_base_image "${base_image}" "${image_dir}" IFS='^' read -r -a repos_pkgs <<< "${repos_pkgs_str}" + local _h=$(echo "${repos_pkgs_str}.$(md5sum "${image_dir}/${_o}")" | \ + md5sum | cut -c -8) + local _tmp="${image%.*}.${_h}.img" echo "[INFO] Lookup cache / build patched base image for fingerprint: ${_h}" - _tmp="${image%.*}.${_h}.img" if [ "${image_dir}/${_tmp}" -ef "${image_dir}/${image}" ]; then echo "[INFO] Patched base image found" else @@ -282,9 +280,9 @@ function prepare_vms { } function create_networks { - local all_vnode_networks=("mcpcontrol" "$@") - # create required networks, including constant "mcpcontrol" - for net in "${all_vnode_networks[@]}"; do + local all_vnode_networks=("$@") + # create required networks + for net in "mcpcontrol" "${all_vnode_networks[@]}"; do if ${VIRSH} net-info "${net}" >/dev/null 2>&1; then ${VIRSH} net-destroy "${net}" || true ${VIRSH} net-undefine "${net}" @@ -297,14 +295,46 @@ function create_networks { ${VIRSH} net-start "${net}" fi done - # create veth pairs for relevant networks (mcpcontrol, pxebr, mgmt) - for i in $(seq 0 2 4); do - sudo ip link del "veth_mcp$i" || true - sudo ip link add "veth_mcp$i" type veth peer name "veth_mcp$((i+1))" - sudo ip link set "veth_mcp$i" up mtu 9000 - sudo ip link set "veth_mcp$((i+1))" up mtu 9000 - sudo brctl addif "${all_vnode_networks[$((i/2))]}" "veth_mcp$i" - done + + sudo ip link del veth_mcp0 || true + sudo ip link del veth_mcp2 || true + # Create systemd service for veth creation after reboot + FUEL_VETHC_SERVICE="/etc/systemd/system/opnfv-fuel-vethc.service" + FUEL_VETHA_SERVICE="/etc/systemd/system/opnfv-fuel-vetha.service" + test -f /usr/sbin/ip && PREFIX=/usr/sbin || PREFIX=/sbin + cat <<-EOF | sudo tee "${FUEL_VETHC_SERVICE}" + [Unit] + After=libvirtd.service + Before=docker.service + [Service] + ExecStart=/bin/sh -ec '\ + ${PREFIX}/ip link add veth_mcp0 type veth peer name veth_mcp1 && \ + ${PREFIX}/ip link add veth_mcp2 type veth peer name veth_mcp3 && \ + ${PREFIX}/ip link set veth_mcp0 up mtu 9000 && \ + ${PREFIX}/ip link set veth_mcp1 up mtu 9000 && \ + ${PREFIX}/ip link set veth_mcp2 up mtu 9000 && \ + ${PREFIX}/ip link set veth_mcp3 up mtu 9000' + EOF + cat <<-EOF | sudo tee "${FUEL_VETHA_SERVICE}" + [Unit] + StartLimitInterval=200 + StartLimitBurst=10 + After=opnfv-fuel-vethc.service + [Service] + Restart=on-failure + RestartSec=10 + ExecStartPre=/bin/sh -ec '\ + ${PREFIX}/brctl showstp ${all_vnode_networks[0]} > /dev/null 2>&1 && \ + ${PREFIX}/brctl showstp ${all_vnode_networks[1]} > /dev/null 2>&1' + ExecStart=/bin/sh -ec '\ + ${PREFIX}/brctl addif ${all_vnode_networks[0]} veth_mcp0 && \ + ${PREFIX}/brctl addif ${all_vnode_networks[1]} veth_mcp2' + EOF + sudo ln -sf "${FUEL_VETHC_SERVICE}" "/etc/systemd/system/multi-user.target.wants/" + sudo ln -sf "${FUEL_VETHA_SERVICE}" "/etc/systemd/system/multi-user.target.wants/" + sudo systemctl daemon-reload + sudo systemctl restart opnfv-fuel-vethc + sudo systemctl restart opnfv-fuel-vetha } function create_vms { @@ -341,14 +371,8 @@ function create_vms { # prepare network args local vnode_networks=("$@") - if [[ "${vnode_data[0]}" =~ ^(cfg01|mas01) ]]; then - net_args=" --network network=mcpcontrol,model=virtio" - # 3rd interface gets connected to PXE/Admin Bridge (cfg01, mas01) - vnode_networks[2]="${vnode_networks[0]}" - else - net_args=" --network bridge=${vnode_networks[0]},model=virtio" - fi - for net in "${vnode_networks[@]:1}"; do + local net_args= + for net in "${vnode_networks[@]}"; do net_args="${net_args} --network bridge=${net},model=virtio" done @@ -360,7 +384,7 @@ function create_vms { [ ! -e "${image_dir}/virt-manager" ] || VIRT_PREFIX="${image_dir}/virt-manager/" # shellcheck disable=SC2086 - ${VIRT_PREFIX}virt-install --name "${vnode_data[0]}" \ + ${VIRT_PREFIX}${VIRSH/virsh/virt-install} --name "${vnode_data[0]}" \ ${virt_cpu_args} --accelerate \ ${net_args} \ --ram "${vnode_data[1]}" \ @@ -374,27 +398,16 @@ function create_vms { done } -function update_mcpcontrol_network { - # set static ip address for salt master node, MaaS node - local amac=$(${VIRSH} domiflist mas01 2>&1| awk '/mcpcontrol/ {print $5; exit}') - [ -z "${amac}" ] || ${VIRSH} net-update "mcpcontrol" add ip-dhcp-host \ - "" --live --config -} - function reset_vms { local vnodes=("$@") local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}" # reset non-infrastructure vms, wait for them to come back online for node in "${vnodes[@]}"; do - if [[ ! "${node}" =~ (cfg01|mas01) ]]; then - ${VIRSH} reset "${node}" - fi + ${VIRSH} reset "${node}" done for node in "${vnodes[@]}"; do - if [[ ! "${node}" =~ (cfg01|mas01) ]]; then - wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all" - fi + wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all" done } @@ -412,21 +425,38 @@ function prepare_containers { local image_dir=$1 [ -n "${image_dir}" ] || exit 1 [ -n "${MCP_REPO_ROOT_PATH}" ] || exit 1 - docker-compose --version > /dev/null 2>&1 || COMPOSE_PREFIX="${image_dir}/" + [ ! -e "${image_dir}/docker-compose" ] || COMPOSE_PREFIX="${image_dir}/" "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml down if [[ ! "${MCP_DOCKER_TAG}" =~ 'verify' ]]; then "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml pull fi - sudo rm -rf "${image_dir}/"{salt,hosts,pki} "${image_dir}/nodes/"* - mkdir -p "${image_dir}/salt/"{master.d,minion.d} - touch "${image_dir}/hosts" + # overwrite hosts only on first container up, to preserve cluster nodes + sudo cp docker-compose/files/hosts "${image_dir}/hosts" + sudo rm -rf "${image_dir}/"{salt,pki,mas01/etc} "${image_dir}/nodes/"* + find "${image_dir}/mas01/var/lib/" \ + -mindepth 2 -maxdepth 2 -not -name boot-resources \ + -exec sudo rm -rf {} \; || true + mkdir -p "${image_dir}/"{salt/master.d,salt/minion.d} + + if grep -q -e 'maas' 'docker-compose/docker-compose.yaml'; then + chmod +x docker-compose/files/entrypoint*.sh + # Apparmor workaround for bind9 inside Docker containers using AUFS + for profile in 'usr.sbin.ntpd' 'usr.sbin.named' \ + 'usr.sbin.dhcpd' 'usr.bin.tcpdump'; do + if [ -e "/etc/apparmor.d/${profile}" ] && \ + [ ! -e "/etc/apparmor.d/disable/${profile}" ]; then + sudo ln -sf "/etc/apparmor.d/${profile}" "/etc/apparmor.d/disable/" + sudo apparmor_parser -R "/etc/apparmor.d/${profile}" || true + fi + done + fi } function start_containers { local image_dir=$1 [ -n "${image_dir}" ] || exit 1 - docker-compose --version > /dev/null 2>&1 || COMPOSE_PREFIX="${image_dir}/" + [ ! -e "${image_dir}/docker-compose" ] || COMPOSE_PREFIX="${image_dir}/" "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml up -d } @@ -470,6 +500,7 @@ function cleanup_mounts { sudo losetup -d "${OPNFV_LOOP_DEV}" fi if [ -n "${OPNFV_NBD_DEV}" ]; then + sudo partx -d "${OPNFV_NBD_DEV}" || true sudo kpartx -d "${OPNFV_NBD_DEV}" || true sudo qemu-nbd -d "${OPNFV_NBD_DEV}" || true fi