set -o nounset
set -o pipefail
-APEX_PKGS="common undercloud" # removed onos for danube
IPV6_FLAG=False
# log info to console
-echo "Starting the Apex virtual deployment."
+echo "Starting the Apex deployment."
echo "--------------------------------------------------------"
echo
-if ! rpm -q wget > /dev/null; then
- sudo yum -y install wget
-fi
-
-if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
- # Build is from a verify, use local build artifacts (not RPMs)
- cd $WORKSPACE/../${BUILD_DIRECTORY}
- WORKSPACE=$(pwd)
- echo "WORKSPACE modified to $WORKSPACE"
- cd $WORKSPACE/ci
-elif [[ ! "$ARTIFACT_NAME" == "latest" ]]; then
- # if artifact name is passed the pull a
- # specific artifact from artifacts.opnfv.org
- # artifact specified should be opnfv-apex-<version>.noarch.rpm
- RPM_INSTALL_PATH=$GS_URL
- RPM_LIST=$RPM_INSTALL_PATH/$ARTIFACT_NAME
-else
- # Use latest RPMS
- if [[ $BUILD_DIRECTORY == *apex-build* ]]; then
- # Triggered from a daily so RPMS should be in local directory
- BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
- echo "BUILD DIRECTORY modified to $BUILD_DIRECTORY"
-
- if [[ -f ${BUILD_DIRECTORY}/../opnfv.properties ]]; then
- # if opnfv.properties exists then use the
- # local build. Source the file so we get local OPNFV vars
- source ${BUILD_DIRECTORY}/../opnfv.properties
- RPM_INSTALL_PATH=${BUILD_DIRECTORY}/noarch
- RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
- else
- echo "BUILD_DIRECTORY is from a daily job, so will not use latest from URL"
- echo "Check that the slave has opnfv.properties in $BUILD_DIRECTORY"
- exit 1
- fi
- else
- # use the latest from artifacts.opnfv.org
- # get the latest.properties to get the link to the latest artifact
- if ! wget -O $WORKSPACE/opnfv.properties http://$GS_URL/latest.properties; then
- echo "ERROR: Unable to find latest.properties at ${GS_URL}...exiting"
- exit 1
- fi
- # source the file so we get OPNFV vars
- source opnfv.properties
- RPM_INSTALL_PATH=$(echo "http://"$OPNFV_RPM_URL | sed 's/\/'"$(basename $OPNFV_RPM_URL)"'//')
- RPM_LIST=${RPM_INSTALL_PATH}/$(basename $OPNFV_RPM_URL)
- fi
-fi
-
-# rename odl_l3 to odl only for master
-# this can be removed once all the odl_l3 references
-# are updated to odl after the danube jobs are removed
-if [[ "$BUILD_DIRECTORY" == *master* ]]; then
- DEPLOY_SCENARIO=${DEPLOY_SCENARIO/odl_l3/odl}
-fi
if [ -z "$DEPLOY_SCENARIO" ]; then
echo "Deploy scenario not set!"
exit 1
fi
fi
-# use local build for verify and promote
-if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
- if [ ! -e "${WORKSPACE}/build/lib" ]; then
- ln -s ${WORKSPACE}/lib ${WORKSPACE}/build/lib
- fi
- DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy"
- NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network"
- DEPLOY_CMD="$(pwd)/deploy.sh"
- IMAGES="${WORKSPACE}/.build/"
- BASE="${WORKSPACE}/build"
- LIB="${WORKSPACE}/lib"
- # Make sure python34 deps are installed
- for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do
- if ! rpm -q ${dep_pkg} > /dev/null; then
- if ! sudo yum install -y ${dep_pkg}; then
- echo "Failed to install ${dep_pkg}"
- exit 1
- fi
- fi
- done
-
- # Make sure jinja2 is installed
- for python_pkg in jinja2; do
- if ! python3.4 -c "import $python_pkg"; then
- echo "$python_pkg package not found for python3.4, attempting to install..."
- if ! sudo easy_install-3.4 $python_pkg; then
- echo -e "Failed to install $python_pkg package for python3.4"
- exit 1
- fi
- fi
- done
-
- # Make sure ipxe-roms-qemu package is updated to latest.
- # This package is needed for multi virtio nic PXE boot in virtual environment.
- sudo yum update -y ipxe-roms-qemu
-
- if [ -z ${PYTHONPATH:-} ]; then
- export PYTHONPATH=${WORKSPACE}/lib/python
- else
- export PYTHONPATH=$PYTHONPATH:${WORKSPACE}/lib/python
- fi
-# use RPMs
+# Dev or RPM/ISO build
+# For upstream deployments we currently only use git repo and not RPM
+# Need to decide after Fraser if we want to use RPM or not for upstream
+if [[ "$ARTIFACT_VERSION" =~ dev || "$DEPLOY_SCENARIO" =~ "upstream" ]]; then
+ # Settings for deploying from git workspace
+ DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy"
+ NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network"
+ DEPLOY_CMD="opnfv-deploy --image-dir ${WORKSPACE}/.build"
+ CLEAN_CMD="opnfv-clean"
+ RESOURCES="${WORKSPACE}/.build/"
+ CONFIG="${WORKSPACE}/build"
+ BASE=$CONFIG
+ IMAGES=$RESOURCES
+ LIB="${WORKSPACE}/lib"
+
+ # Ensure artifacts were downloaded and extracted correctly
+ # TODO(trozet) add verification here
+
+ # Install dev build
+ sudo rm -rf /tmp/.build
+ mv -f .build /tmp/
+ sudo pip3 install --upgrade --force-reinstall .
+ mv -f /tmp/.build ${WORKSPACE}/
else
- # find version of RPM
- VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-([0-9]{8}|[a-z]+-[0-9]\.[0-9]+)')
- # build RPM List which already includes base Apex RPM
- for pkg in ${APEX_PKGS}; do
- RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}.noarch.rpm"
- done
+ DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/"
+ NETWORK_SETTINGS_DIR="/etc/opnfv-apex/"
+ DEPLOY_CMD="opnfv-deploy"
+ CLEAN_CMD="opnfv-clean"
+ RESOURCES="/var/opt/opnfv/images"
+ CONFIG="/var/opt/opnfv"
+ BASE=$CONFIG
+ IMAGES=$RESOURCES
+ LIB="/var/opt/opnfv/lib"
+ sudo mkdir -p /var/log/apex
+ sudo chmod 777 /var/log/apex
+ cd /var/log/apex
+fi
- # remove old / install new RPMs
- if rpm -q opnfv-apex > /dev/null; then
- INSTALLED_RPMS=$(rpm -qa | grep apex)
- if [ -n "$INSTALLED_RPMS" ]; then
- sudo yum remove -y ${INSTALLED_RPMS}
- fi
- fi
+# Install Dependencies
+# Make sure python34 dependencies are installed
+dependencies="epel-release python34 python34-devel libvirt-devel python34-pip \
+ansible python34-PyYAML python34-jinja2 python34-setuptools python-tox ansible"
- if ! sudo yum install -y $RPM_LIST; then
- echo "Unable to install new RPMs: $RPM_LIST"
+for dep_pkg in $dependencies; do
+ if ! rpm -q ${dep_pkg} > /dev/null; then
+ if ! sudo yum install -y ${dep_pkg}; then
+ echo "Failed to install ${dep_pkg}"
exit 1
fi
+ fi
+done
- DEPLOY_CMD=opnfv-deploy
- DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/"
- NETWORK_SETTINGS_DIR="/etc/opnfv-apex/"
- IMAGES="/var/opt/opnfv/images"
- BASE="/var/opt/opnfv"
- LIB="/var/opt/opnfv/lib"
+if [[ "$JOB_NAME" =~ "virtual" ]]; then
+ # Make sure ipxe-roms-qemu package is updated to latest.
+ # This package is needed for multi virtio nic PXE boot in virtual environment.
+ sudo yum update -y ipxe-roms-qemu
fi
-# set env vars to deploy cmd
-DEPLOY_CMD="BASE=${BASE} IMAGES=${IMAGES} LIB=${LIB} ${DEPLOY_CMD}"
-
if [ "$OPNFV_CLEAN" == 'yes' ]; then
if sudo test -e '/root/inventory/pod_settings.yaml'; then
clean_opts='-i /root/inventory/pod_settings.yaml'
else
clean_opts=''
fi
- if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *promote* ]]; then
- sudo BASE=${BASE} LIB=${LIB} ./clean.sh ${clean_opts}
- else
- sudo BASE=${BASE} LIB=${LIB} opnfv-clean ${clean_opts}
- fi
+
+ sudo ${CLEAN_CMD} ${clean_opts}
fi
if echo ${DEPLOY_SCENARIO} | grep ipv6; then
echo "ERROR: Required settings file missing: Deploy settings file ${DEPLOY_FILE}"
fi
-if [[ "$JOB_NAME" == *virtual* ]]; then
+if [[ "$JOB_NAME" =~ "virtual" ]]; then
# settings for virtual deployment
DEPLOY_CMD="${DEPLOY_CMD} -v"
if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then
- DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 4"
- TMP_DEPLOY_FILE="${WORKSPACE}/${DEPLOY_SCENARIO}.yaml"
- cp -f ${DEPLOY_FILE} ${TMP_DEPLOY_FILE}
- sed -i 's/^\(\s*hugepages:\).*$/\1 1024/g' ${TMP_DEPLOY_FILE}
- DEPLOY_FILE=${TMP_DEPLOY_FILE}
+ DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 7"
fi
if [[ "$JOB_NAME" == *csit* ]]; then
DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml"
fi
- if [[ "$JOB_NAME" == *promote* ]]; then
+ if [[ "$PROMOTE" == "True" ]]; then
DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2"
fi
else
NETWORK_SETTINGS_DIR="/root/network"
INVENTORY_FILE="/root/inventory/pod_settings.yaml"
-# (trozet) According to FDS folks uio_pci_generic works with UCS-B
-# and there appears to be a bug with vfio-pci
- # if fdio on baremetal, then we are using UCS enic and
- # need to use vfio-pci instead of uio generic
-# if [[ "$DEPLOY_SCENARIO" == *fdio* ]]; then
-# TMP_DEPLOY_FILE="${WORKSPACE}/${DEPLOY_SCENARIO}.yaml"
-# cp -f ${DEPLOY_FILE} ${TMP_DEPLOY_FILE}
-# sed -i 's/^\(\s*uio-driver:\).*$/\1 vfio-pci/g' ${TMP_DEPLOY_FILE}
-# DEPLOY_FILE=${TMP_DEPLOY_FILE}
-# fi
-
if ! sudo test -e "$INVENTORY_FILE"; then
echo "ERROR: Required settings file missing: Inventory settings file ${INVENTORY_FILE}"
exit 1
DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}"
fi
+if [[ "$DEPLOY_SCENARIO" =~ "upstream" ]]; then
+ echo "Upstream deployment detected"
+ DEPLOY_CMD="${DEPLOY_CMD} --upstream"
+fi
+
if [ "$IPV6_FLAG" == "True" ]; then
NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml"
elif echo ${DEPLOY_SCENARIO} | grep fdio; then