Add Jerma in blacklist logics
[releng.git] / jjb / apex / apex-deploy.sh
index 4244f44..09d6ca6 100755 (executable)
@@ -4,30 +4,20 @@ set -o nounset
 set -o pipefail
 
 IPV6_FLAG=False
+ALLINONE_FLAG=False
+CSIT_ENV_FLAG=False
+FUNCTEST_ENV_FLAG=False
 
 # log info to console
 echo "Starting the Apex deployment."
 echo "--------------------------------------------------------"
 echo
 
-if [ -z "$DEPLOY_SCENARIO" ]; then
+if [ -z ${DEPLOY_SCENARIO+x} ]; then
   echo "Deploy scenario not set!"
   exit 1
-elif [[ "$DEPLOY_SCENARIO" == *gate* ]]; then
-  echo "Detecting Gating scenario..."
-  if [ -z "$GERRIT_EVENT_COMMENT_TEXT" ]; then
-    echo "ERROR: Gate job triggered without comment!"
-    exit 1
-  else
-    DEPLOY_SCENARIO=$(echo ${GERRIT_EVENT_COMMENT_TEXT} | grep start-gate-scenario | grep -Eo 'os-.*$')
-    if [ -z "$DEPLOY_SCENARIO" ]; then
-      echo "ERROR: Unable to detect scenario in Gerrit Comment!"
-      echo "Format of comment to trigger gate should be 'start-gate-scenario: <scenario>'"
-      exit 1
-    else
-      echo "Gate scenario detected: ${DEPLOY_SCENARIO}"
-    fi
-  fi
+else
+  echo "Deploy scenario: ${DEPLOY_SCENARIO}"
 fi
 
 # Dev or RPM/ISO build
@@ -35,23 +25,44 @@ if [[ "$ARTIFACT_VERSION" =~ dev ]]; then
   # Settings for deploying from git workspace
   DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy"
   NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network"
-  DEPLOY_CMD="${WORKSPACE}/ci/deploy.sh"
-  CLEAN_CMD="${WORKSPACE}/ci/clean.sh"
-  RESOURCES="${WORKSPACE}/.build/"
+  CLEAN_CMD="opnfv-clean"
+  # if we are using master, then we are downloading/caching upstream images
+  # we want to use that built in mechanism to avoid re-downloading every job
+  # so we use a dedicated folder to hold the upstream cache
+  UPSTREAM_CACHE=$HOME/upstream_cache
+  if [[ "$BRANCH" != 'stable/fraser' ]]; then
+    mkdir -p ${UPSTREAM_CACHE}
+    RESOURCES=$UPSTREAM_CACHE
+  else
+    RESOURCES="${WORKSPACE}/.build/"
+  fi
   CONFIG="${WORKSPACE}/build"
   BASE=$CONFIG
   IMAGES=$RESOURCES
   LIB="${WORKSPACE}/lib"
-
+  DEPLOY_CMD="opnfv-deploy --image-dir ${RESOURCES}"
   # Ensure artifacts were downloaded and extracted correctly
   # TODO(trozet) add verification here
 
+  # Install dev build
+  sudo rm -rf /tmp/.build
+  mv -f .build /tmp/
+  sudo pip3 install --upgrade --force-reinstall .
+  mv -f /tmp/.build ${WORKSPACE}/
 else
   DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/"
   NETWORK_SETTINGS_DIR="/etc/opnfv-apex/"
-  DEPLOY_CMD="opnfv-deploy"
   CLEAN_CMD="opnfv-clean"
-  RESOURCES="/var/opt/opnfv/images"
+  # set to use different directory here because upon RPM removal this
+  # directory will be wiped in daily
+  UPSTREAM_CACHE=$HOME/upstream_cache
+  if [[ "$BRANCH" != 'stable/fraser' ]]; then
+    mkdir -p ${UPSTREAM_CACHE}
+    RESOURCES=$UPSTREAM_CACHE
+  else
+    RESOURCES="/var/opt/opnfv/images"
+  fi
+  DEPLOY_CMD="opnfv-deploy --image-dir ${RESOURCES}"
   CONFIG="/var/opt/opnfv"
   BASE=$CONFIG
   IMAGES=$RESOURCES
@@ -63,7 +74,10 @@ fi
 
 # Install Dependencies
 # Make sure python34 dependencies are installed
-for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do
+dependencies="epel-release python34 python34-devel libvirt-devel python34-pip \
+ansible python34-PyYAML python34-jinja2 python34-setuptools python-tox ansible"
+
+for dep_pkg in $dependencies; do
   if ! rpm -q ${dep_pkg} > /dev/null; then
     if ! sudo yum install -y ${dep_pkg}; then
       echo "Failed to install ${dep_pkg}"
@@ -72,31 +86,12 @@ for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do
   fi
 done
 
-# Make sure jinja2 is installed
-for python_pkg in jinja2; do
-  if ! python3.4 -c "import $python_pkg"; then
-    echo "$python_pkg package not found for python3.4, attempting to install..."
-    if ! sudo easy_install-3.4 $python_pkg; then
-      echo -e "Failed to install $python_pkg package for python3.4"
-      exit 1
-    fi
-  fi
-done
-
 if [[ "$JOB_NAME" =~ "virtual" ]]; then
   # Make sure ipxe-roms-qemu package is updated to latest.
   # This package is needed for multi virtio nic PXE boot in virtual environment.
   sudo yum update -y ipxe-roms-qemu
-  if [ -z ${PYTHONPATH:-} ]; then
-    export PYTHONPATH=${WORKSPACE}/lib/python
-  else
-    export PYTHONPATH=$PYTHONPATH:${WORKSPACE}/lib/python
-  fi
 fi
 
-# set env vars to deploy cmd
-DEPLOY_CMD="BASE=${BASE} IMAGES=${IMAGES} LIB=${LIB} ${DEPLOY_CMD}"
-
 if [ "$OPNFV_CLEAN" == 'yes' ]; then
   if sudo test -e '/root/inventory/pod_settings.yaml'; then
     clean_opts='-i /root/inventory/pod_settings.yaml'
@@ -104,15 +99,33 @@ if [ "$OPNFV_CLEAN" == 'yes' ]; then
     clean_opts=''
   fi
 
-  sudo BASE=${BASE} LIB=${LIB} ${CLEAN_CMD} ${clean_opts}
+  sudo ${CLEAN_CMD} ${clean_opts}
 fi
 
+# These are add-ons to regular scenarios where you can do like
+# os-nosdn-nofeature-noha-ipv6, or os-nosdn-nofeature-noha-allinone
 if echo ${DEPLOY_SCENARIO} | grep ipv6; then
   IPV6_FLAG=True
   DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} |  sed 's/-ipv6//')
   echo "INFO: IPV6 Enabled"
 fi
 
+if echo ${DEPLOY_SCENARIO} | grep allinone; then
+  ALLINONE_FLAG=True
+  DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} |  sed 's/-allinone//')
+  echo "INFO: All in one deployment detected"
+fi
+
+if echo ${DEPLOY_SCENARIO} | grep csit; then
+  CSIT_ENV_FLAG=True
+  DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} |  sed 's/-csit//')
+  echo "INFO: CSIT env requested in deploy scenario"
+elif echo ${DEPLOY_SCENARIO} | grep functest; then
+  FUNCTEST_ENV_FLAG=True
+  DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} |  sed 's/-functest//')
+  echo "INFO: Functest env requested in deploy scenario"
+fi
+
 echo "Deploy Scenario set to ${DEPLOY_SCENARIO}"
 DEPLOY_FILE="${DEPLOY_SETTINGS_DIR}/${DEPLOY_SCENARIO}.yaml"
 
@@ -126,12 +139,44 @@ if [[ "$JOB_NAME" =~ "virtual" ]]; then
   if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then
     DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 7"
   fi
-  if [[ "$JOB_NAME" == *csit* ]]; then
-    DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml"
-  fi
-  if [[ "$PROMOTE" == "True" ]]; then
+  if [[ "$ALLINONE_FLAG" == "True" ]]; then
+    DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 0"
+  elif [[ "$PROMOTE" == "True" ]]; then
     DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2"
   fi
+
+  if [[ "$FUNCTEST_ENV_FLAG" == "True"  || "$CSIT_ENV_FLAG" == "True" ]]; then
+    if [[ "$CSIT_ENV_FLAG" == "True" ]]; then
+      ENV_TYPE="csit"
+    else
+      ENV_TYPE="functest"
+    fi
+    if [ -z ${OS_VERSION+x} ]; then
+      echo "INFO: OS_VERSION not passed to deploy, detecting based on branch and scenario"
+      case $BRANCH in
+        master)
+          if [[ "$DEPLOY_SCENARIO" =~ "rocky" ]]; then
+            OS_VERSION=rocky
+          else
+            OS_VERSION=master
+          fi
+          ;;
+        *gambia)
+          OS_VERSION=queens
+          ;;
+        *)
+          echo "Unable to detection OS_VERSION, aborting"
+          exit 1
+          ;;
+      esac
+    fi
+    if [[ "$OS_VERSION" != "master" ]]; then
+      SNAP_ENV="${ENV_TYPE}-${OS_VERSION}-environment.yaml"
+    else
+      SNAP_ENV="${ENV_TYPE}-environment.yaml"
+    fi
+    DEPLOY_CMD="${DEPLOY_CMD} -e ${SNAP_ENV}"
+  fi
 else
   # settings for bare metal deployment
   NETWORK_SETTINGS_DIR="/root/network"
@@ -147,8 +192,9 @@ fi
 
 if [ "$IPV6_FLAG" == "True" ]; then
   NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml"
-elif echo ${DEPLOY_SCENARIO} | grep fdio; then
-  NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml"
+elif [[ "$CSIT_ENV_FLAG" == "True"  || "$FUNCTEST_ENV_FLAG" == "True" ]]; then
+  # We use csit network settings which is single network for snapshots
+  NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_csit.yaml"
 else
   NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml"
 fi
@@ -162,16 +208,6 @@ fi
 # start deployment
 sudo ${DEPLOY_CMD} -d ${DEPLOY_FILE} -n ${NETWORK_FILE} --debug
 
-if [[ "$JOB_NAME" == *csit* ]]; then
-  echo "CSIT job: setting host route for floating ip routing"
-  # csit route to allow docker container to reach floating ips
-  UNDERCLOUD=$(sudo virsh domifaddr undercloud | grep -Eo "[0-9\.]+{3}[0-9]+")
-  if sudo route | grep 192.168.37.128 > /dev/null; then
-    sudo route del -net 192.168.37.128 netmask 255.255.255.128
-  fi
-  sudo route add -net 192.168.37.128 netmask 255.255.255.128 gw ${UNDERCLOUD}
-fi
-
 echo
 echo "--------------------------------------------------------"
 echo "Done!"