X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=jjb%2Fapex%2Fapex-deploy.sh;h=06f7622f5697d1c0928d089b52ada1a6e6deccf4;hb=f00dc471d4ccd9b5b0e06146f09b4546d2ac3133;hp=a27c92d2914429c199fe6e273031703d31234e38;hpb=decf6fd40994946ff39d8d50910d8e146e87b894;p=releng.git diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh index a27c92d29..7840919cd 100755 --- a/jjb/apex/apex-deploy.sh +++ b/jjb/apex/apex-deploy.sh @@ -3,158 +3,113 @@ set -o errexit set -o nounset set -o pipefail -APEX_PKGS="common undercloud onos" IPV6_FLAG=False +ALLINONE_FLAG=False # log info to console -echo "Starting the Apex virtual deployment." +echo "Starting the Apex deployment." echo "--------------------------------------------------------" echo -if ! rpm -q wget > /dev/null; then - sudo yum -y install wget -fi - -if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then - # Build is from a verify, use local build artifacts (not RPMs) - cd $WORKSPACE/../${BUILD_DIRECTORY} - WORKSPACE=$(pwd) - echo "WORKSPACE modified to $WORKSPACE" - cd $WORKSPACE/ci -elif [[ ! "$ARTIFACT_NAME" == "latest" ]]; then - # if artifact name is passed the pull a - # specific artifact from artifacts.opnfv.org - # artifact specified should be opnfv-apex-.noarch.rpm - RPM_INSTALL_PATH=$GS_URL - RPM_LIST=$RPM_INSTALL_PATH/$ARTIFACT_NAME -else - # Use latest RPMS - if [[ $BUILD_DIRECTORY == *apex-build* ]]; then - # Triggered from a daily so RPMS should be in local directory - BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY - echo "BUILD DIRECTORY modified to $BUILD_DIRECTORY" - - if [[ -f ${BUILD_DIRECTORY}/../opnfv.properties ]]; then - # if opnfv.properties exists then use the - # local build. Source the file so we get local OPNFV vars - source ${BUILD_DIRECTORY}/../opnfv.properties - RPM_INSTALL_PATH=${BUILD_DIRECTORY}/noarch - RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL) - else - echo "BUILD_DIRECTORY is from a daily job, so will not use latest from URL" - echo "Check that the slave has opnfv.properties in $BUILD_DIRECTORY" - exit 1 - fi - else - # use the latest from artifacts.opnfv.org - # get the latest.properties to get the link to the latest artifact - if ! wget -O $WORKSPACE/opnfv.properties http://$GS_URL/latest.properties; then - echo "ERROR: Unable to find latest.properties at ${GS_URL}...exiting" - exit 1 - fi - # source the file so we get OPNFV vars - source opnfv.properties - RPM_INSTALL_PATH=$(echo "http://"$OPNFV_RPM_URL | sed 's/\/'"$(basename $OPNFV_RPM_URL)"'//') - RPM_LIST=${RPM_INSTALL_PATH}/$(basename $OPNFV_RPM_URL) - fi -fi - if [ -z "$DEPLOY_SCENARIO" ]; then echo "Deploy scenario not set!" exit 1 +else + echo "Deploy scenario: ${DEPLOY_SCENARIO}" fi -# use local build for verify and csit promote -if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then - if [ ! -e "${WORKSPACE}/build/lib" ]; then - ln -s ${WORKSPACE}/lib ${WORKSPACE}/build/lib - fi - DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy" - NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network" - DEPLOY_CMD="$(pwd)/deploy.sh" +# Dev or RPM/ISO build +if [[ "$ARTIFACT_VERSION" =~ dev ]]; then + # Settings for deploying from git workspace + DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy" + NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network" + CLEAN_CMD="opnfv-clean" + # if we are using master, then we are downloading/caching upstream images + # we want to use that built in mechanism to avoid re-downloading every job + # so we use a dedicated folder to hold the upstream cache + UPSTREAM_CACHE=$HOME/upstream_cache + if [ "$BRANCH" == 'master' ]; then + mkdir -p ${UPSTREAM_CACHE} + RESOURCES=$UPSTREAM_CACHE + else RESOURCES="${WORKSPACE}/.build/" - CONFIG="${WORKSPACE}/build" - LIB="${WORKSPACE}/lib" - # Make sure python34 deps are installed - for dep_pkg in epel-release python34 python34-PyYAML python34-setuptools; do - if ! rpm -q ${dep_pkg} > /dev/null; then - if ! sudo yum install -y ${dep_pkg}; then - echo "Failed to install ${dep_pkg}" - exit 1 - fi - fi - done - - # Make sure jinja2 is installed - for python_pkg in jinja2; do - if ! python3.4 -c "import $python_pkg"; then - echo "$python_pkg package not found for python3.4, attempting to install..." - if ! sudo easy_install-3.4 $python_pkg; then - echo -e "Failed to install $python_pkg package for python3.4" - exit 1 - fi - fi - done - - # Make sure ipxe-roms-qemu package is updated to latest. - # This package is needed for multi virtio nic PXE boot in virtual environment. - sudo yum update -y ipxe-roms-qemu - - if [ -z ${PYTHONPATH:-} ]; then - export PYTHONPATH=${WORKSPACE}/lib/python - else - export PYTHONPATH=$PYTHONPATH:${WORKSPACE}/lib/python - fi -# use RPMs + fi + CONFIG="${WORKSPACE}/build" + BASE=$CONFIG + IMAGES=$RESOURCES + LIB="${WORKSPACE}/lib" + DEPLOY_CMD="opnfv-deploy --image-dir ${RESOURCES}" + # Ensure artifacts were downloaded and extracted correctly + # TODO(trozet) add verification here + + # Install dev build + sudo rm -rf /tmp/.build + mv -f .build /tmp/ + sudo pip3 install --upgrade --force-reinstall . + mv -f /tmp/.build ${WORKSPACE}/ else - # find version of RPM - VERSION_EXTENSION=$(echo $(basename $RPM_LIST) | grep -Eo '[0-9]+\.[0-9]+-[0-9]{8}') - # build RPM List which already includes base Apex RPM - for pkg in ${APEX_PKGS}; do - RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}.noarch.rpm" - done - - # remove old / install new RPMs - if rpm -q opnfv-apex > /dev/null; then - INSTALLED_RPMS=$(rpm -qa | grep apex) - if [ -n "$INSTALLED_RPMS" ]; then - sudo yum remove -y ${INSTALLED_RPMS} - fi - fi + DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/" + NETWORK_SETTINGS_DIR="/etc/opnfv-apex/" + CLEAN_CMD="opnfv-clean" + # set to use different directory here because upon RPM removal this + # directory will be wiped in daily + UPSTREAM_CACHE=$HOME/upstream_cache + if [ "$BRANCH" == 'master' ]; then + mkdir -p ${UPSTREAM_CACHE} + RESOURCES=$UPSTREAM_CACHE + else + RESOURCES="/var/opt/opnfv/images" + fi + DEPLOY_CMD="opnfv-deploy --image-dir ${RESOURCES}" + CONFIG="/var/opt/opnfv" + BASE=$CONFIG + IMAGES=$RESOURCES + LIB="/var/opt/opnfv/lib" + sudo mkdir -p /var/log/apex + sudo chmod 777 /var/log/apex + cd /var/log/apex +fi + +# Install Dependencies +# Make sure python34 dependencies are installed +dependencies="epel-release python34 python34-devel libvirt-devel python34-pip \ +ansible python34-PyYAML python34-jinja2 python34-setuptools python-tox ansible" - if ! sudo yum install -y $RPM_LIST; then - echo "Unable to install new RPMs: $RPM_LIST" +for dep_pkg in $dependencies; do + if ! rpm -q ${dep_pkg} > /dev/null; then + if ! sudo yum install -y ${dep_pkg}; then + echo "Failed to install ${dep_pkg}" exit 1 fi + fi +done - DEPLOY_CMD=opnfv-deploy - DEPLOY_SETTINGS_DIR="/etc/opnfv-apex/" - NETWORK_SETTINGS_DIR="/etc/opnfv-apex/" - RESOURCES="/var/opt/opnfv/images" - CONFIG="/var/opt/opnfv" - LIB="/var/opt/opnfv/lib" +if [[ "$JOB_NAME" =~ "virtual" ]]; then + # Make sure ipxe-roms-qemu package is updated to latest. + # This package is needed for multi virtio nic PXE boot in virtual environment. + sudo yum update -y ipxe-roms-qemu fi -# set env vars to deploy cmd -DEPLOY_CMD="CONFIG=${CONFIG} RESOURCES=${RESOURCES} LIB=${LIB} ${DEPLOY_CMD}" - if [ "$OPNFV_CLEAN" == 'yes' ]; then if sudo test -e '/root/inventory/pod_settings.yaml'; then clean_opts='-i /root/inventory/pod_settings.yaml' else clean_opts='' fi - if [[ "$BUILD_DIRECTORY" == *verify* || "$BUILD_DIRECTORY" == *csit* ]]; then - sudo CONFIG=${CONFIG} LIB=${LIB} ./clean.sh ${clean_opts} - else - sudo CONFIG=${CONFIG} LIB=${LIB} opnfv-clean ${clean_opts} - fi + + sudo ${CLEAN_CMD} ${clean_opts} fi +# These are add-ons to regular scenarios where you can do like +# os-nosdn-nofeature-noha-ipv6, or os-nosdn-nofeature-noha-allinone if echo ${DEPLOY_SCENARIO} | grep ipv6; then IPV6_FLAG=True DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} | sed 's/-ipv6//') echo "INFO: IPV6 Enabled" +elif echo ${DEPLOY_SCENARIO} | grep allinone; then + ALLINONE_FLAG=True + DEPLOY_SCENARIO=$(echo ${DEPLOY_SCENARIO} | sed 's/-allinone//') + echo "INFO: All in one deployment detected" fi echo "Deploy Scenario set to ${DEPLOY_SCENARIO}" @@ -164,26 +119,29 @@ if [ ! -e "$DEPLOY_FILE" ]; then echo "ERROR: Required settings file missing: Deploy settings file ${DEPLOY_FILE}" fi -if [[ "$JOB_NAME" == *virtual* ]]; then +if [[ "$JOB_NAME" =~ "virtual" ]]; then # settings for virtual deployment - if [ "$IPV6_FLAG" == "True" ]; then - NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml" - else - NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml" - fi DEPLOY_CMD="${DEPLOY_CMD} -v" - if [[ "$JOB_NAME" == *csit* ]]; then - DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml --virtual-computes 2" + if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then + DEPLOY_CMD="${DEPLOY_CMD} --virtual-default-ram 12 --virtual-compute-ram 7" + fi + if [[ "$ALLINONE_FLAG" == "True" ]]; then + DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 0" + elif [[ "$PROMOTE" == "True" ]]; then + DEPLOY_CMD="${DEPLOY_CMD} --virtual-computes 2" + fi + + if [[ "$PROMOTE" == "True" ]]; then + if [[ "$DEPLOY_SCENARIO" =~ "queens" ]]; then + CSIT_ENV="csit-queens-environment.yaml" + else + CSIT_ENV="csit-environment.yaml" + fi + DEPLOY_CMD="${DEPLOY_CMD} -e ${CSIT_ENV}" fi else # settings for bare metal deployment - if [ "$IPV6_FLAG" == "True" ]; then - NETWORK_FILE="/root/network/network_settings_v6.yaml" - elif [[ "$JOB_NAME" == *master* ]]; then - NETWORK_FILE="/root/network/network_settings-master.yaml" - else - NETWORK_FILE="/root/network/network_settings.yaml" - fi + NETWORK_SETTINGS_DIR="/root/network" INVENTORY_FILE="/root/inventory/pod_settings.yaml" if ! sudo test -e "$INVENTORY_FILE"; then @@ -194,6 +152,19 @@ else DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}" fi +if [[ "$BRANCH" == "master" ]]; then + echo "Upstream deployment detected" + DEPLOY_CMD="${DEPLOY_CMD} --upstream" +fi + +if [ "$IPV6_FLAG" == "True" ]; then + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml" +elif [[ "$PROMOTE" == "True" ]]; then + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_csit.yaml" +else + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml" +fi + # Check that network settings file exists if ! sudo test -e "$NETWORK_FILE"; then echo "ERROR: Required settings file missing: Network Settings file ${NETWORK_FILE}" @@ -203,16 +174,6 @@ fi # start deployment sudo ${DEPLOY_CMD} -d ${DEPLOY_FILE} -n ${NETWORK_FILE} --debug -if [[ "$JOB_NAME" == *csit* ]]; then - echo "CSIT job: setting host route for floating ip routing" - # csit route to allow docker container to reach floating ips - UNDERCLOUD=$(sudo virsh domifaddr undercloud | grep -Eo "[0-9\.]+{3}[0-9]+") - if route | grep 192.168.37.128; then - sudo route del -net 192.168.37.128 netmask 255.255.255.128 - fi - sudo route add -net 192.168.37.128 netmask 255.255.255.128 gw ${UNDERCLOUD} -fi - echo echo "--------------------------------------------------------" echo "Done!"