set -o nounset
set -o pipefail
-APEX_PKGS="common undercloud" # removed onos for danube
IPV6_FLAG=False
+ALLINONE_FLAG=False
# log info to console
echo "Starting the Apex deployment."
if [ -z "$DEPLOY_SCENARIO" ]; then
echo "Deploy scenario not set!"
exit 1
-elif [[ "$DEPLOY_SCENARIO" == *gate* ]]; then
- echo "Detecting Gating scenario..."
- if [ -z "$GERRIT_EVENT_COMMENT_TEXT" ]; then
- echo "ERROR: Gate job triggered without comment!"
- exit 1
- else
- DEPLOY_SCENARIO=$(echo ${GERRIT_EVENT_COMMENT_TEXT} | grep start-gate-scenario | grep -Eo 'os-.*$')
- if [ -z "$DEPLOY_SCENARIO" ]; then
- echo "ERROR: Unable to detect scenario in Gerrit Comment!"
- echo "Format of comment to trigger gate should be 'start-gate-scenario: <scenario>'"
- exit 1
- else
- echo "Gate scenario detected: ${DEPLOY_SCENARIO}"
- fi
- fi
+else
+ echo "Deploy scenario: ${DEPLOY_SCENARIO}"
fi
# Dev or RPM/ISO build
# Settings for deploying from git workspace
DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy"
NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network"
- DEPLOY_CMD="${WORKSPACE}/ci/deploy.sh"
- CLEAN_CMD="${WORKSPACE}/ci/clean.sh"
- RESOURCES="${WORKSPACE}/.build/"
+ CLEAN_CMD="opnfv-clean"
+ # if we are using master, then we are downloading/caching upstream images
+ # we want to use that built in mechanism to avoid re-downloading every job
+ # so we use a dedicated folder to hold the upstream cache
+ UPSTREAM_CACHE=$HOME/upstream_cache
+ if [ "$BRANCH" == 'master' ]; then
+ mkdir -p ${UPSTREAM_CACHE}
+ RESOURCES=$UPSTREAM_CACHE
+ else
+ RESOURCES="${WORKSPACE}/.build/"
+ fi
CONFIG="${WORKSPACE}/build"
BASE=$CONFIG
IMAGES=$RESOURCES
LIB="${WORKSPACE}/lib"
-
+ DEPLOY_CMD="opnfv-deploy --image-dir ${RESOURCES}"
# Ensure artifacts were downloaded and extracted correctly
# TODO(trozet) add verification here
+ # Install dev build
+ sudo rm -rf /tmp/.build
+ mv -f .build /tmp/
+ sudo pip3 install --upgrade --force-reinstall .
+ mv -f /tmp/.build ${WORKSPACE}/
else
DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/"
NETWORK_SETTINGS_DIR="/etc/opnfv-apex/"
- DEPLOY_CMD="opnfv-deploy"
CLEAN_CMD="opnfv-clean"
- RESOURCES="/var/opt/opnfv/images"
+ # set to use different directory here because upon RPM removal this
+ # directory will be wiped in daily
+ UPSTREAM_CACHE=$HOME/upstream_cache
+ if [ "$BRANCH" == 'master' ]; then
+ mkdir -p ${UPSTREAM_CACHE}
+ RESOURCES=$UPSTREAM_CACHE
+ else
+ RESOURCES="/var/opt/opnfv/images"
+ fi
+ DEPLOY_CMD="opnfv-deploy --image-dir ${RESOURCES}"
CONFIG="/var/opt/opnfv"
BASE=$CONFIG
IMAGES=$RESOURCES
LIB="/var/opt/opnfv/lib"
-
+ sudo mkdir -p /var/log/apex
+ sudo chmod 777 /var/log/apex
+ cd /var/log/apex
fi
# Install Dependencies
# Make sure python34 dependencies are installed
-for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do
+dependencies="epel-release python34 python34-devel libvirt-devel python34-pip \
+ansible python34-PyYAML python34-jinja2 python34-setuptools python-tox ansible"
+
+for dep_pkg in $dependencies; do
if ! rpm -q ${dep_pkg} > /dev/null; then
if ! sudo yum install -y ${dep_pkg}; then
echo "Failed to install ${dep_pkg}"
fi
done
-# Make sure jinja2 is installed
-for python_pkg in jinja2; do
- if ! python3.4 -c "import $python_pkg"; then
- echo "$python_pkg package not found for python3.4, attempting to install..."
- if ! sudo easy_install-3.4 $python_pkg; then
- echo -e "Failed to install $python_pkg package for python3.4"
- exit 1
- fi
- fi
-done
-
if [[ "$JOB_NAME" =~ "virtual" ]]; then
# Make sure ipxe-roms-qemu package is updated to latest.
# This package is needed for multi virtio nic PXE boot in virtual environment.
sudo yum update -y ipxe-roms-qemu
- if [ -z ${PYTHONPATH:-} ]; then
- export PYTHONPATH=${WORKSPACE}/lib/python
- else
- export PYTHONPATH=$PYTHONPATH:${WORKSPACE}/lib/python
- fi
fi
-# set env vars to deploy cmd
-DEPLOY_CMD="BASE=${BASE} IMAGES=${IMAGES} LIB=${LIB} ${DEPLOY_CMD}"
-
if [ "$OPNFV_CLEAN" == 'yes' ]; then
if sudo test -e '/root/inventory/pod_settings.yaml'; then
clean_opts='-i /root/inventory/pod_settings.yaml'
clean_opts=''
fi
- sudo BASE=${BASE} LIB=${LIB} ${CLEAN_CMD} ${clean_opts}
+ sudo ${CLEAN_CMD} ${clean_opts}
fi
+# These are add-ons to regular scenarios where you can do like
+# os-nosdn-nofeature-noha-ipv6, or os-nosdn-nofeature-noha-allinone
if echo ${DEPLOY_SCENARIO} | grep ipv6; then
IPV6_FLAG=True
DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} | sed 's/-ipv6//')
echo "INFO: IPV6 Enabled"
+elif echo ${DEPLOY_SCENARIO} | grep allinone; then
+ ALLINONE_FLAG=True
+ DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} | sed 's/-allinone//')
+ echo "INFO: All in one deployment detected"
fi
echo "Deploy Scenario set to ${DEPLOY_SCENARIO}"
if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then
DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 7"
fi
- if [[ "$JOB_NAME" == *csit* ]]; then
- DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml"
+ if [[ "$ALLINONE_FLAG" == "True" ]]; then
+ DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 0"
+ elif [[ "$PROMOTE" == "True" ]]; then
+ DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2"
fi
+
if [[ "$PROMOTE" == "True" ]]; then
- DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2"
+ if [[ "$DEPLOY_SCENARIO" =~ "queens" ]]; then
+ CSIT_ENV="csit-queens-environment.yaml"
+ else
+ CSIT_ENV="csit-environment.yaml"
+ fi
+ DEPLOY_CMD="${DEPLOY_CMD} -e ${CSIT_ENV}"
fi
else
# settings for bare metal deployment
DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}"
fi
+if [[ "$BRANCH" == "master" ]]; then
+ echo "Upstream deployment detected"
+ DEPLOY_CMD="${DEPLOY_CMD} --upstream"
+fi
+
if [ "$IPV6_FLAG" == "True" ]; then
NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml"
-elif echo ${DEPLOY_SCENARIO} | grep fdio; then
- NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml"
+elif [[ "$PROMOTE" == "True" ]]; then
+ NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_csit.yaml"
else
NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml"
fi
# start deployment
sudo ${DEPLOY_CMD} -d ${DEPLOY_FILE} -n ${NETWORK_FILE} --debug
-if [[ "$JOB_NAME" == *csit* ]]; then
- echo "CSIT job: setting host route for floating ip routing"
- # csit route to allow docker container to reach floating ips
- UNDERCLOUD=$(sudo virsh domifaddr undercloud | grep -Eo "[0-9\.]+{3}[0-9]+")
- if sudo route | grep 192.168.37.128 > /dev/null; then
- sudo route del -net 192.168.37.128 netmask 255.255.255.128
- fi
- sudo route add -net 192.168.37.128 netmask 255.255.255.128 gw ${UNDERCLOUD}
-fi
-
echo
echo "--------------------------------------------------------"
echo "Done!"