Apex: More CSIT promotion fixes
[releng.git] / jjb / apex / apex-deploy.sh
index ce9544b..dfaf2a8 100755 (executable)
@@ -10,26 +10,11 @@ echo "Starting the Apex deployment."
 echo "--------------------------------------------------------"
 echo
 
-sudo rm -rf /tmp/tmp*
-
 if [ -z "$DEPLOY_SCENARIO" ]; then
   echo "Deploy scenario not set!"
   exit 1
-elif [[ "$DEPLOY_SCENARIO" == *gate* ]]; then
-  echo "Detecting Gating scenario..."
-  if [ -z "$GERRIT_EVENT_COMMENT_TEXT" ]; then
-    echo "ERROR: Gate job triggered without comment!"
-    exit 1
-  else
-    DEPLOY_SCENARIO=$(echo ${GERRIT_EVENT_COMMENT_TEXT} | grep start-gate-scenario | grep -Eo 'os-.*$')
-    if [ -z "$DEPLOY_SCENARIO" ]; then
-      echo "ERROR: Unable to detect scenario in Gerrit Comment!"
-      echo "Format of comment to trigger gate should be 'start-gate-scenario: <scenario>'"
-      exit 1
-    else
-      echo "Gate scenario detected: ${DEPLOY_SCENARIO}"
-    fi
-  fi
+else
+  echo "Deploy scenario: ${DEPLOY_SCENARIO}"
 fi
 
 # Dev or RPM/ISO build
@@ -37,23 +22,44 @@ if [[ "$ARTIFACT_VERSION" =~ dev ]]; then
   # Settings for deploying from git workspace
   DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy"
   NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network"
-  DEPLOY_CMD="${WORKSPACE}/ci/deploy.sh"
-  CLEAN_CMD="${WORKSPACE}/ci/clean.sh"
-  RESOURCES="${WORKSPACE}/.build/"
+  CLEAN_CMD="opnfv-clean"
+  # if we are using master, then we are downloading/caching upstream images
+  # we want to use that built in mechanism to avoid re-downloading every job
+  # so we use a dedicated folder to hold the upstream cache
+  UPSTREAM_CACHE=$HOME/upstream_cache
+  if [ "$BRANCH" == 'master' ]; then
+    mkdir -p ${UPSTREAM_CACHE}
+    RESOURCES=$UPSTREAM_CACHE
+  else
+    RESOURCES="${WORKSPACE}/.build/"
+  fi
   CONFIG="${WORKSPACE}/build"
   BASE=$CONFIG
   IMAGES=$RESOURCES
   LIB="${WORKSPACE}/lib"
-
+  DEPLOY_CMD="opnfv-deploy --image-dir ${RESOURCES}"
   # Ensure artifacts were downloaded and extracted correctly
   # TODO(trozet) add verification here
 
+  # Install dev build
+  sudo rm -rf /tmp/.build
+  mv -f .build /tmp/
+  sudo pip3 install --upgrade --force-reinstall .
+  mv -f /tmp/.build ${WORKSPACE}/
 else
   DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/"
   NETWORK_SETTINGS_DIR="/etc/opnfv-apex/"
-  DEPLOY_CMD="opnfv-deploy"
   CLEAN_CMD="opnfv-clean"
-  RESOURCES="/var/opt/opnfv/images"
+  # set to use different directory here because upon RPM removal this
+  # directory will be wiped in daily
+  UPSTREAM_CACHE=$HOME/upstream_cache
+  if [ "$BRANCH" == 'master' ]; then
+    mkdir -p ${UPSTREAM_CACHE}
+    RESOURCES=$UPSTREAM_CACHE
+  else
+    RESOURCES="/var/opt/opnfv/images"
+  fi
+  DEPLOY_CMD="opnfv-deploy --image-dir ${RESOURCES}"
   CONFIG="/var/opt/opnfv"
   BASE=$CONFIG
   IMAGES=$RESOURCES
@@ -65,7 +71,10 @@ fi
 
 # Install Dependencies
 # Make sure python34 dependencies are installed
-for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do
+dependencies="epel-release python34 python34-devel libvirt-devel python34-pip \
+ansible python34-PyYAML python34-jinja2 python34-setuptools python-tox ansible"
+
+for dep_pkg in $dependencies; do
   if ! rpm -q ${dep_pkg} > /dev/null; then
     if ! sudo yum install -y ${dep_pkg}; then
       echo "Failed to install ${dep_pkg}"
@@ -74,31 +83,12 @@ for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do
   fi
 done
 
-# Make sure jinja2 is installed
-for python_pkg in jinja2; do
-  if ! python3.4 -c "import $python_pkg"; then
-    echo "$python_pkg package not found for python3.4, attempting to install..."
-    if ! sudo easy_install-3.4 $python_pkg; then
-      echo -e "Failed to install $python_pkg package for python3.4"
-      exit 1
-    fi
-  fi
-done
-
 if [[ "$JOB_NAME" =~ "virtual" ]]; then
   # Make sure ipxe-roms-qemu package is updated to latest.
   # This package is needed for multi virtio nic PXE boot in virtual environment.
   sudo yum update -y ipxe-roms-qemu
-  if [ -z ${PYTHONPATH:-} ]; then
-    export PYTHONPATH=${WORKSPACE}/lib/python
-  else
-    export PYTHONPATH=$PYTHONPATH:${WORKSPACE}/lib/python
-  fi
 fi
 
-# set env vars to deploy cmd
-DEPLOY_CMD="BASE=${BASE} IMAGES=${IMAGES} LIB=${LIB} ${DEPLOY_CMD}"
-
 if [ "$OPNFV_CLEAN" == 'yes' ]; then
   if sudo test -e '/root/inventory/pod_settings.yaml'; then
     clean_opts='-i /root/inventory/pod_settings.yaml'
@@ -106,7 +96,7 @@ if [ "$OPNFV_CLEAN" == 'yes' ]; then
     clean_opts=''
   fi
 
-  sudo BASE=${BASE} LIB=${LIB} ${CLEAN_CMD} ${clean_opts}
+  sudo ${CLEAN_CMD} ${clean_opts}
 fi
 
 if echo ${DEPLOY_SCENARIO} | grep ipv6; then
@@ -128,11 +118,8 @@ if [[ "$JOB_NAME" =~ "virtual" ]]; then
   if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then
     DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 7"
   fi
-  if [[ "$JOB_NAME" == *csit* ]]; then
-    DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml"
-  fi
   if [[ "$PROMOTE" == "True" ]]; then
-    DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2"
+    DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2 -e csit-environment.yaml"
   fi
 else
   # settings for bare metal deployment
@@ -147,10 +134,15 @@ else
   DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}"
 fi
 
+if [[ "$BRANCH" == "master" ]]; then
+  echo "Upstream deployment detected"
+  DEPLOY_CMD="${DEPLOY_CMD} --upstream"
+fi
+
 if [ "$IPV6_FLAG" == "True" ]; then
   NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml"
-elif echo ${DEPLOY_SCENARIO} | grep fdio; then
-  NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml"
+elif [[ "$PROMOTE" == "True" ]]; then
+  NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_csit.yaml"
 else
   NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml"
 fi
@@ -164,7 +156,7 @@ fi
 # start deployment
 sudo ${DEPLOY_CMD} -d ${DEPLOY_FILE} -n ${NETWORK_FILE} --debug
 
-if [[ "$JOB_NAME" == *csit* ]]; then
+if [[ "$PROMOTE" == 'True' ]]; then
   echo "CSIT job: setting host route for floating ip routing"
   # csit route to allow docker container to reach floating ips
   UNDERCLOUD=$(sudo virsh domifaddr undercloud | grep -Eo "[0-9\.]+{3}[0-9]+")