Merge "Added Builder to push results to DB for Qtip JJB"
authorFatih Degirmenci <fatih.degirmenci@ericsson.com>
Mon, 18 Jan 2016 14:38:02 +0000 (14:38 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Mon, 18 Jan 2016 14:38:02 +0000 (14:38 +0000)
15 files changed:
jjb/apex/apex.yml
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-deploy-bare.sh
jjb/compass4nfv/compass-deploy-virtual.sh
jjb/compass4nfv/compass-project-jobs.yml
jjb/fuel/fuel-ci-jobs.yml
jjb/fuel/fuel-deploy-virtual.sh [deleted file]
jjb/fuel/fuel-deploy.sh
jjb/fuel/fuel-project-jobs.yml
jjb/functest/functest-ci-jobs.yml
jjb/joid/joid-deploy.sh
jjb/opnfv/installer-params.yml
jjb/opnfv/opnfv-docker.sh
jjb/opnfv/slave-params.yml
jjb/yardstick/yardstick-ci-jobs.yml

index 57825ce..afa4697 100644 (file)
@@ -4,8 +4,8 @@
         - 'apex-verify-{stream}'
         - 'apex-merge-{stream}'
         - 'apex-build-{stream}'
-        - 'apex-deploy-virtual-{stream}'
-        - 'apex-deploy-baremetal-{stream}'
+        - 'apex-deploy-virtual-{scenario}-{stream}'
+        - 'apex-deploy-baremetal-{scenario}-{stream}'
         - 'apex-daily-{stream}'
 
     # stream:    branch with - in place of / (eg. stable-arno)
         - brahmaputra:
             branch: 'stable/brahmaputra'
             gs-pathname: '/brahmaputra'
+            disabled: true
 
     project: 'apex'
 
+    scenario:
+         - 'os-odl_l2-nofeature-ha'
+         - 'os-odl_l2-sfc-ha'
+         - 'os-odl_l3-nofeature-ha'
+         - 'os-onos-nofeature-ha'
+         - 'os-opencontrail-nofeature-ha'
+
 - job-template:
     name: 'apex-verify-{stream}'
 
@@ -28,7 +36,6 @@
     parameters:
         - apex-parameter:
             gs-pathname: '{gs-pathname}'
-            ARTIFACT_VERSION: 'dev'
         - project-parameter:
             project: '{project}'
         - gerrit-parameter:
 
     builders:
         - 'apex-build'
-        - 'apex-deploy-virtual'
+        - trigger-builds:
+          - project: 'apex-deploy-virtual-os-odl_l2-nofeature-ha-{stream}'
+            predefined-parameters:
+              BUILD_DIRECTORY=apex-verify-master/build_output
+            git-revision: false
+            block: true
+        - trigger-builds:
+          - project: 'apex-deploy-virtual-os-onos-nofeature-ha-{stream}'
+            predefined-parameters:
+              BUILD_DIRECTORY=apex-verify-master/build_output
+            git-revision: false
+            block: true
         - 'apex-workspace-cleanup'
 
 - job-template:
 
     builders:
         - 'apex-build'
-        - 'apex-deploy-virtual'
+        - trigger-builds:
+          - project: 'apex-deploy-virtual-os-odl_l2-nofeature-ha-{stream}'
+            predefined-parameters:
+              BUILD_DIRECTORY=apex-build-master/build_output
+            git-revision: false
+            block: true
         - 'apex-upload-artifact'
-        - 'apex-workspace-cleanup'
 
 - job-template:
-    name: 'apex-deploy-virtual-{stream}'
+    name: 'apex-deploy-virtual-{scenario}-{stream}'
 
     # Job template for virtual deployment
     #
             project: '{project}'
         - apex-parameter:
             gs-pathname: '{gs-pathname}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: '{scenario}'
+            description: "Scenario to deploy with."
 
     properties:
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - "apex-verify.*"
                 - "apex-deploy.*"
-                - "apex-build.*"
 
     builders:
         - 'apex-deploy-virtual'
         - 'apex-workspace-cleanup'
 
 - job-template:
-    name: 'apex-deploy-baremetal-{stream}'
+    name: 'apex-deploy-baremetal-{scenario}-{stream}'
 
     # Job template for baremetal deployment
     #
             project: '{project}'
         - apex-parameter:
             gs-pathname: '{gs-pathname}'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: '{scenario}'
+            description: "Scenario to deploy with."
 
     properties:
         - build-blocker:
                 - "apex-build.*"
 
     triggers:
-        - 'apex-master'
+        - 'apex-{stream}'
 
     builders:
         - trigger-builds:
             current-parameters: true
             block: true
         - trigger-builds:
-          - project: 'apex-deploy-baremetal-{stream}'
+          - project: 'apex-deploy-baremetal-os-odl_l2-nofeature-ha-{stream}'
+            predefined-parameters:
+              BUILD_DIRECTORY=apex-build-master/build_output
+            git-revision: true
+            block: true
+        - trigger-builds:
+          - project: 'functest-apex-opnfv-jump-1-daily-{stream}'
+            block: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'yardstick-apex-opnfv-jump-1-daily-{stream}'
+            block: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'apex-deploy-baremetal-os-onos-nofeature-ha-{stream}'
+            predefined-parameters:
+              BUILD_DIRECTORY=apex-build-master/build_output
             git-revision: true
             block: true
         - trigger-builds:
 
             # upload artifact and additional files to google storage
             gsutil cp $BUILD_DIRECTORY/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log 2>&1
-            gsutil cp $BUILD_DIRECTORY/$(basename $OPNFV_RPM_URL) gs://$GS_URL/$(basename $OPNFV_RPM_URL) > gsutil.iso.log 2>&1
-            gsutil cp $BUILD_DIRECTORY/$(basename $OPNFV_SRPM_URL) gs://$GS_URL/$(basename $OPNFV_SRPM_URL) > gsutil.iso.log 2>&1
+            RPM_INSTALL_PATH=$BUILD_DIRECTORY/$(basename $OPNFV_RPM_URL)
+            RPM_LIST=$RPM_INSTALL_PATH
+            for pkg in common undercloud; do
+                RPM_LIST+=" ${RPM_INSTALL_PATH/opnfv-apex/opnfv-apex-${pkg}}"
+            done
+            SRPM_INSTALL_PATH=$BUILD_DIRECTORY/$(basename $OPNFV_SRPM_URL)
+            SRPM_LIST=$SRPM_INSTALL_PATH
+            for pkg in common undercloud; do
+                SRPM_LIST+=" ${SRPM_INSTALL_PATH/opnfv-apex/opnfv-apex-${pkg}}"
+            done
+            for artifact in $RPM_LIST $SRPM_LIST; do
+              gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log 2>&1
+            done
             gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log 2>&1
             gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log 2>&1
 
                 # specific artifact from artifacts.opnfv.org
                 RPM_INSTALL_PATH=$GS_URL/$ARTIFACT_NAME
             else
-                if [[ -f opnfv.properties ]]; then
+                if [[ $BUILD_DIRECTORY == *verify* ]]; then
+                  BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
+                  echo "BUILD DIRECTORY modified to $BUILD_DIRECTORY"
+                elif [[ $BUILD_DIRECTORY == *apex-build* ]]; then
+                  BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
+                  echo "BUILD DIRECTORY modified to $BUILD_DIRECTORY"
+                fi
+
+                if [[ -f ${BUILD_DIRECTORY}/../opnfv.properties ]]; then
                     # if opnfv.properties exists then use the
                     # local build. Source the file so we get local OPNFV vars
-                    source opnfv.properties
-                    RPM_INSTALL_PATH=build_output/$(basename $OPNFV_RPM_URL)
+                    source ${BUILD_DIRECTORY}/../opnfv.properties
+                    RPM_INSTALL_PATH=${BUILD_DIRECTORY}/$(basename $OPNFV_RPM_URL)
                 else
+                    if [[ $BUILD_DIRECTORY == *verify* ]]; then
+                      echo "BUILD_DIRECTORY is from a verify job, so will not use latest from URL"
+                      echo "Check that the slave has opnfv.properties in $BUILD_DIRECTORY"
+                      exit 1
+                    elif [[ $BUILD_DIRECTORY == *apex-build* ]]; then
+                      echo "BUILD_DIRECTORY is from a daily job, so will not use latest from URL"
+                      echo "Check that the slave has opnfv.properties in $BUILD_DIRECTORY"
+                      exit 1
+                    fi
                     # no opnfv.properties means use the latest from artifacts.opnfv.org
                     # get the latest.properties to get the link to the latest artifact
                     curl -s -o $WORKSPACE/opnfv.properties http://$GS_URL/latest.properties
                 fi
             fi
 
-            source opnfv.properties
-            RPM_INSTALL_PATH=build_output/$(basename $OPNFV_RPM_URL)
-            if [ ! -e "$RPM_INSTALL_PATH" ]; then
-               RPM_INSTALL_PATH=http://${OPNFV_RPM_URL}
-            fi
-
             RPM_LIST=$RPM_INSTALL_PATH
             for pkg in common undercloud; do
                 RPM_LIST+=" ${RPM_INSTALL_PATH/opnfv-apex/opnfv-apex-${pkg}}"
                  echo "RPM is already installed"
                elif sudo yum update -y $RPM_LIST | grep "does not update installed package"; then
                    if ! sudo yum downgrade -y $RPM_LIST; then
-                     yum remove -y opnfv-undercloud opnfv-common
+                     sudo yum remove -y opnfv-undercloud opnfv-common
                      sudo yum downgrade -y $RPM_INSTALL_PATH
                    fi
                fi
             # cleanup virtual machines before we start
             sudo opnfv-clean
             # initiate virtual deployment
-            if [ -e /usr/share/doc/opnfv/network_settings.yaml.example ]; then
-              sudo opnfv-deploy -v -d /usr/share/doc/opnfv/deploy_settings.yaml.example -n /usr/share/doc/opnfv/network_settings.yaml.example
+            if [ -e /etc/opnfv-apex/network_settings.yaml ]; then
+              if [ -n "$DEPLOY_SCENARIO" ]; then
+                echo "Deploy Scenario set to ${DEPLOY_SCENARIO}"
+                if [ -e /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml ]; then
+                  sudo opnfv-deploy -v -d /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml -n /etc/opnfv-apex/network_settings.yaml
+                else
+                  echo "File does not exist /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml"
+                  exit 1
+                fi
+              else
+                echo "Deploy scenario not set!"
+                exit 1
+              fi
             else
               sudo opnfv-deploy -v
             fi
                 # specific artifact from artifacts.opnfv.org
                 RPM_INSTALL_PATH=$GS_URL/$ARTIFACT_NAME
             else
-                if [[ -f opnfv.properties ]]; then
+                if [[ $BUILD_DIRECTORY == *apex-build* ]]; then
+                  BUILD_DIRECTORY=$WORKSPACE/../$BUILD_DIRECTORY
+                  echo "BUILD DIRECTORY modified to $BUILD_DIRECTORY"
+                fi
+                if [[ -f ${BUILD_DIRECTORY}/../opnfv.properties ]]; then
                     # if opnfv.properties exists then use the
                     # local build. Source the file so we get local OPNFV vars
-                    source opnfv.properties
-                    RPM_INSTALL_PATH=build_output/$(basename $OPNFV_RPM_URL)
+                    source ${BUILD_DIRECTORY}/../opnfv.properties
+                    RPM_INSTALL_PATH=${BUILD_DIRECTORY}/$(basename $OPNFV_RPM_URL)
                 else
                     # no opnfv.properties means use the latest from artifacts.opnfv.org
                     # get the latest.properties to get the link to the latest artifact
                 fi
             fi
 
-            source opnfv.properties
-            RPM_INSTALL_PATH=build_output/$(basename $OPNFV_RPM_URL)
             if [ ! -e "$RPM_INSTALL_PATH" ]; then
                RPM_INSTALL_PATH=http://${OPNFV_RPM_URL}
             fi
                  echo "RPM is already installed"
                elif sudo yum update -y $RPM_LIST | grep "does not update installed package"; then
                    if ! sudo yum downgrade -y $RPM_LIST; then
-                     yum remove -y opnfv-undercloud opnfv-common
+                     sudo yum remove -y opnfv-undercloud opnfv-common
                      sudo yum downgrade -y $RPM_INSTALL_PATH
                    fi
                fi
             # cleanup environment before we start
             sudo opnfv-clean
             # initiate baremetal deployment
-            sudo opnfv-deploy -i  /root/inventory/pod_settings.yaml \
-            -d /usr/share/doc/opnfv/deploy_settings.yaml.example \
-            -n /root/network/network_settings.yaml
+            if [ -e /etc/opnfv-apex/network_settings.yaml ]; then
+              if [ -n "$DEPLOY_SCENARIO" ]; then
+                echo "Deploy Scenario set to ${DEPLOY_SCENARIO}"
+                if [ -e /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml ]; then
+                  sudo opnfv-deploy -i  /root/inventory/pod_settings.yaml \
+                  -d /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml \
+                  -n /root/network/network_settings.yaml
+                else
+                  echo "File does not exist /etc/opnfv-apex/${DEPLOY_SCENARIO}.yaml"
+                  exit 1
+                fi
+              else
+                echo "Deploy scenario not set!"
+                exit 1
+              fi
+            else
+              echo "File /etc/opnfv-apex/network_settings.yaml does not exist!"
+              exit 1
+            fi
 
             echo
             echo "--------------------------------------------------------"
     name: 'apex-master'
     triggers:
         - timed: '0 3 * * *'
+- trigger:
+    name: 'apex-brahmaputra'
+    triggers:
+        - timed: '0 6 * * 2050'
index 535fb3b..0f760c4 100644 (file)
@@ -42,7 +42,7 @@
             auto-trigger-name: 'compass-{scenario}-{pod}-trigger'
 #        - 'os-ocl-nofeature-ha':
 #            disabled: true
-#            auto-trigger-name: 'joid-{scenario}-{pod}-trigger'
+#            auto-trigger-name: 'compass-{scenario}-{pod}-trigger'
 
     jobs:
         - 'compass-{scenario}-{pod}-daily-{stream}'
@@ -82,7 +82,7 @@
             gs-pathname: '{gs-pathname}'
         - string:
             name: DEPLOY_SCENARIO
-            default: 'none'
+            default: '{scenario}'
         - 'huawei-build-defaults'
         - '{installer}-defaults'
 
             blocking-jobs:
                 - 'compass-deploy-{pod}-daily-{stream}'
 
+    wrappers:
+        - build-name:
+            name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+
     parameters:
         - project-parameter:
             project: '{project}'
index 12b9b33..dc0aeb2 100644 (file)
@@ -6,7 +6,7 @@ echo "Starting the deployment on baremetal environment using $INSTALLER_TYPE. Th
 echo "--------------------------------------------------------"
 echo
 
-export CONFDIR=$WORKSPACE/deploy/conf/hardware_environment/huawei_us_lab/pod1
+export CONFDIR=$WORKSPACE/deploy/conf/hardware_environment/$NODE_NAME
 export ISO_URL=file://$BUILD_DIRECTORY/compass.iso
 export INSTALL_NIC=eth0
 
@@ -14,7 +14,7 @@ cd $WORKSPACE
 
 export OS_VERSION=${{COMPASS_OS_VERSION}}
 export OPENSTACK_VERSION=${{COMPASS_OPENSTACK_VERSION}}
-./deploy.sh --dha $CONFDIR/dha.yml --network $CONFDIR/network.yml
+./deploy.sh --dha $CONFDIR/${{DEPLOY_SCENARIO}}.yml --network $CONFDIR/network.yml
 if [ $? -ne 0 ]; then
     echo "depolyment failed!"
     deploy_ret=1
index b35658b..4991350 100644 (file)
@@ -10,7 +10,7 @@ export ISO_URL=file://$BUILD_DIRECTORY/compass.iso
 export OS_VERSION=${{COMPASS_OS_VERSION}}
 export OPENSTACK_VERSION=${{COMPASS_OPENSTACK_VERSION}}
 export CONFDIR=$WORKSPACE/deploy/conf/vm_environment
-./deploy.sh --dha $CONFDIR/$SDN_CONTROLLER.yml --network $CONFDIR/$NODE_NAME/network.yml
+./deploy.sh --dha $CONFDIR/$DEPLOY_SCENARIO.yml --network $CONFDIR/$NODE_NAME/network.yml
 if [ $? -ne 0 ]; then
     echo "depolyment failed!"
     deploy_ret=1
index 345c4c3..565aaa3 100644 (file)
             gs-pathname: '/{stream}'
 
 
-    sdn-controller:
-        - 'nosdn':
+    scenario:
+        - 'os-nosdn-nofeature-ha':
             disabled: false
             node: huawei-deploy-vm
-        - 'odl':
+        - 'os-odl_l2-nofeature-ha':
             disabled: false
             node: huawei-deploy-vm
-        - 'onos':
+        - 'os-onos-nofeature-ha':
             disabled: false
             node: huawei-deploy-vm
-#        - 'opencontrail':
+#        - 'os-ocl-nofeature-ha':
 #            disabled: true
 #            node: huawei-deploy-vm
 
@@ -33,7 +33,7 @@
         - 'compass-verify-{stream}'
         - 'compass-build-iso-{stream}'
         - 'compass-build-ppa-{stream}'
-        - 'compass-deploy-virtual-{sdn-controller}-{stream}'
+        - 'compass-deploy-virtual-{scenario}-{stream}'
         - 'compass-virtual-daily-{stream}'
 
 ########################
@@ -65,8 +65,8 @@
         - '{node}-defaults'
         - '{installer}-defaults'
         - string:
-            name: SDN_CONTROLLER
-            default: 'nosdn'
+            name: DEPLOY_SCENARIO
+            default: 'os-nosdn-nofeature-ha'
 
     scm:
         - gerrit-trigger-scm:
             branch: '{branch}'
 
     triggers:
-        - timed: 'H 16 * * *'
+        - timed: 'H 14 * * *'
 
     builders:
         - trigger-builds:
             git-revision: true
             block: true
         - trigger-builds:
-          - project: 'compass-deploy-virtual-nosdn-{stream}'
+          - project: 'compass-deploy-virtual-os-nosdn-nofeature-ha-{stream}'
             git-revision: true
             block: false
         - trigger-builds:
-          - project: 'compass-deploy-virtual-odl-{stream}'
+          - project: 'compass-deploy-virtual-os-odl_l2-nofeature-ha-{stream}'
             git-revision: true
             block: false
         - trigger-builds:
-          - project: 'compass-deploy-virtual-onos-{stream}'
+          - project: 'compass-deploy-virtual-os-onos-nofeature-ha-{stream}'
             git-revision: true
             block: false
 
             !include-raw: ./compass-makeppa.sh
 
 - job-template:
-    name: 'compass-deploy-virtual-{sdn-controller}-{stream}'
+    name: 'compass-deploy-virtual-{scenario}-{stream}'
 
     disabled: false
 
         - compass-project-parameter:
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
-        - string:
-            name: SDN_CONTROLLER
-            default: '{sdn-controller}'
         - string:
             name: DEPLOY_SCENARIO
-            default: 'none'
+            default: '{scenario}'
         - '{node}-defaults'
         - '{installer}-defaults'
 
index 9caf3d1..8c06679 100644 (file)
 
 # new scenario descriptions
     scenario:
-        - 'os-nosdn-kvm-ha':
+        # HA scenarios
+        - 'os-odl_l2-nofeature-ha':
+            auto-trigger-name: 'fuel-{scenario}-{pod}-trigger'
+        - 'os-odl_l3-nofeature-ha':
             auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
-        - 'os-nosdn-kvm_ovs-ha':
+        - 'os-onos-nofeature-ha':
             auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
         - 'os-nosdn-nofeature-ha':
             auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
+        - 'os-nosdn-kvm-ha':
+            auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
+        - 'os-nosdn-kvm_ovs-ha':
+            auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
         - 'os-nosdn-ovs-ha':
             auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
         - 'os-nosdn-vlan-ha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-trigger'
-        - 'os-odl_l2-nofeature-ha':
-            auto-trigger-name: 'fuel-{scenario}-{pod}-trigger'
-        - 'os-odl_l3-nofeature-ha':
+        # NOHA scenarios
+        - 'os-odl_l2-nofeature-noha':
             auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
-        - 'os-onos-nofeature-ha':
+        - 'os-odl_l3-nofeature-noha':
+            auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
+        - 'os-onos-nofeature-noha':
+            auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
+        - 'os-nosdn-nofeature-noha':
+            auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
+        - 'os-nosdn-kvm-noha':
+            auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
+        - 'os-nosdn-kvm_ovs-noha':
+            auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
+        - 'os-nosdn-ovs-noha':
+            auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
+        - 'os-odl_l2-bgpvpn-noha':
             auto-trigger-name: 'brahmaputra-trigger-daily-disabled'
 
     jobs:
         - 'fuel-deploy-{pod}-daily-{stream}'
 
 ########################
-# job templates - old type of defining the scenario
+# job templates
 ########################
 - job-template:
     name: 'fuel-{scenario}-{pod}-daily-{stream}'
 
     project-type: multijob
 
+    node: intel-build
+
     concurrent: false
 
     properties:
     parameters:
         - project-parameter:
             project: '{project}'
-        - '{pod}-defaults'
         - '{installer}-defaults'
         - string:
             name: DEPLOY_SCENARIO
             condition: SUCCESSFUL
             projects:
                 - name: 'fuel-deploy-{pod}-daily-{stream}'
-                  current-parameters: true
+                  current-parameters: false
+                  predefined-parameters: 'DEPLOY_SCENARIO={scenario}'
                   kill-phase-on: FAILURE
         - multijob:
             name: functest
             condition: COMPLETED
             projects:
                 - name: 'functest-fuel-{pod}-daily-{stream}'
-                  current-parameters: true
+                  current-parameters: false
+                  predefined-parameters: 'DEPLOY_SCENARIO={scenario}'
                   kill-phase-on: NEVER
         - multijob:
             name: yardstick
             condition: COMPLETED
             projects:
                 - name: 'yardstick-fuel-{pod}-daily-{stream}'
-                  current-parameters: true
+                  current-parameters: false
+                  predefined-parameters: 'DEPLOY_SCENARIO={scenario}'
                   kill-phase-on: NEVER
 
 - job-template:
diff --git a/jjb/fuel/fuel-deploy-virtual.sh b/jjb/fuel/fuel-deploy-virtual.sh
deleted file mode 100755 (executable)
index e7c83b7..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-set -o errexit
-set -o nounset
-set -o pipefail
-
-# source the file so we get OPNFV vars
-source latest.properties
-
-# echo the info about artifact that is used during the deployment
-echo "Using $(echo $OPNFV_ARTIFACT_URL | cut -d'/' -f3) for deployment"
-
-# checkout the commit that was used for building the downloaded artifact
-# to make sure the ISO and deployment mechanism uses same versions
-echo "Checking out $OPNFV_GIT_SHA1"
-git checkout $OPNFV_GIT_SHA1 --quiet
-
-# create TMPDIR if it doesn't exist
-export TMPDIR=$HOME/tmpdir
-mkdir -p $TMPDIR
-
-# change permissions down to TMPDIR
-chmod a+x $HOME
-chmod a+x $TMPDIR
-
-# get the lab name from SLAVE_NAME
-# we currently support ericsson and intel labs
-LAB_NAME=${NODE_NAME%%-*}
-if [[ ! "$LAB_NAME" =~ (ericsson|intel) ]]; then
-    echo "Unsupported/unidentified lab $LAB_NAME. Cannot continue!"
-    exit 1
-else
-    echo "Using configuration for $LAB_NAME"
-fi
-
-# set CONFDIR, BRIDGE
-CONFDIR=$WORKSPACE/deploy/templates/$LAB_NAME/virtual_environment/noha/conf
-BRIDGE=pxebr
-
-# log info to console
-echo "Starting the deployment for a merged change using $INSTALLER_TYPE. This could take some time..."
-echo "--------------------------------------------------------"
-echo
-
-# start the deployment
-echo "Issuing command"
-echo "sudo $WORKSPACE/ci/deploy.sh -iso $WORKSPACE/opnfv.iso -dea $CONFDIR/dea.yaml -dha $CONFDIR/dha.yaml -s $TMPDIR -b $BRIDGE -nh"
-
-sudo $WORKSPACE/ci/deploy.sh -iso $WORKSPACE/opnfv.iso -dea $CONFDIR/dea.yaml -dha $CONFDIR/dha.yaml -s $TMPDIR -b $BRIDGE -nh
-
-echo
-echo "--------------------------------------------------------"
-echo "Virtual deployment is done! Removing the intermediate files from artifact repo"
-
-PROPERTIES_FILE=$(echo $OPNFV_ARTIFACT_URL | sed 's/iso/properties/')
-gsutil rm gs://$OPNFV_ARTIFACT_URL
-gsutil rm gs://$PROPERTIES_FILE
index 6875a09..b0a1c78 100755 (executable)
@@ -9,24 +9,38 @@ source latest.properties
 # echo the info about artifact that is used during the deployment
 echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
 
-# checkout the commit that was used for building the downloaded artifact
-# to make sure the ISO and deployment mechanism uses same versions
-echo "Checking out $OPNFV_GIT_SHA1"
-git checkout $OPNFV_GIT_SHA1 --quiet
+if [[ "$JOB_NAME" =~ "merge" ]]; then
+    # set simplest scenario for virtual deploys to run for merges
+    DEPLOY_SCENARIO="os-nosdn-nofeature-ha"
+else
+    # for none-merge deployments
+    # checkout the commit that was used for building the downloaded artifact
+    # to make sure the ISO and deployment mechanism uses same versions
+    echo "Checking out $OPNFV_GIT_SHA1"
+    git checkout $OPNFV_GIT_SHA1 --quiet
+fi
 
 # set deployment parameters
-BRIDGE=pxebr
 export TMPDIR=$HOME/tmpdir
+BRIDGE=pxebr
 LAB_NAME=${NODE_NAME/-*}
 POD_NAME=${NODE_NAME/*-}
 
+if [[ "$NODE_NAME" == "opnfv-jump-2" ]]; then
+    LAB_NAME="lf"
+    POD_NAME="pod2"
+fi
+
 if [[ "$NODE_NAME" =~ "virtual" ]]; then
     POD_NAME="virtual_kvm"
 fi
 
-if [[ "$NODE_NAME" == "opnfv-jump-2" ]]; then
-    LAB_NAME="lf"
-    POD_NAME="pod2"
+# we currently support ericsson, intel, and lf labs
+if [[ ! "$LAB_NAME" =~ (ericsson|intel|lf) ]]; then
+    echo "Unsupported/unidentified lab $LAB_NAME. Cannot continue!"
+    exit 1
+else
+    echo "Using configuration for $LAB_NAME"
 fi
 
 # create TMPDIR if it doesn't exist
index 95bc3a6..cedb4ee 100644 (file)
             enabled: true
             max-total: 2
             max-per-node: 1
+        - build-blocker:
+            use-build-blocker: true
+            blocking-jobs:
+                - 'fuel-deploy-virtual-daily-.*'
 
     parameters:
         - project-parameter:
         - ssh-agent-credentials:
             users: '{ssh-credentials}'
 
-#    triggers:
-#        - gerrit:
-#            trigger-on:
-#                - change-merged-event
-#                - comment-added-contains-event:
-#                    comment-contains-value: 'remerge'
-#            projects:
-#              - project-compare-type: 'ANT'
-#                project-pattern: '{project}'
-#                branches:
-#                    - branch-compare-type: 'ANT'
-#                      branch-pattern: '**/{branch}'
-#            dependency-jobs: 'fuel-merge-build-{stream}'
+    triggers:
+        - gerrit:
+            trigger-on:
+                - change-merged-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'remerge'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                    - branch-compare-type: 'ANT'
+                      branch-pattern: '**/{branch}'
+            dependency-jobs: 'fuel-merge-build-{stream}'
 
     builders:
         - shell:
             !include-raw-escape: ./fuel-download-artifact.sh
         - shell:
-            !include-raw-escape: ./fuel-deploy-virtual.sh
+            !include-raw-escape: ./fuel-deploy.sh
         - shell:
             !include-raw-escape: ./fuel-workspace-cleanup.sh
 
index ee6e7d6..49d29cb 100644 (file)
         - string:
             name: CI_DEBUG
             default: 'false'
-            description: "Show debut output information"
+            description: "Show debug output information"
 ########################
 # trigger macros
 ########################
 
             dir_result="${HOME}/opnfv/functest/reports"
             mkdir -p ${dir_result}
-            rm -rf ${dir_result}/*
+            sudo rm -rf ${dir_result}/*
             res_volume="-v ${dir_result}:/home/opnfv/functest/results"
 
             docker pull opnfv/functest:latest_stable >$redirect
index 5ed33de..51ddb31 100644 (file)
@@ -91,33 +91,15 @@ NFV_FEATURES=${DEPLOY_OPTIONS[2]}
 HA_MODE=${DEPLOY_OPTIONS[3]}
 EXTRA=${DEPLOY_OPTIONS[4]}
 
-# Get the juju config path with those options, later we will directly use
-# scenario name
-case $SDN_CONTROLLER in
-    odl_l2)
-        SRCBUNDLE="ovs-odl"
-        SDN_CONTROLLER="odl"
-        ;;
-    onos)
-        SRCBUNDLE="onos"
-        ;;
-    ocl)
-        SRCBUNDLE="contrail"
-        SDN_CONTROLLER="opencontrail"
-        ;;
-    *)
-        SRCBUNDLE="ovs"
-        echo "${SDN_CONTROLLER} not in SDN controllers list, using 'nosdn' setting"
-        SDN_CONTROLLER="nosdn"
-        ;;
-    esac
-SRCBUNDLE="${WORKSPACE}/ci/${SDN_CONTROLLER}/juju-deployer/${SRCBUNDLE}"
+if [ "$SDN_CONTROLLER" == 'odl_l2' ] || [ "$SDN_CONTROLLER" == 'odl_l3' ]; then
+    SDN_CONTROLLER='odl'
+fi
 if [ "$HA_MODE" == 'noha' ]; then
-    SRCBUNDLE="${SRCBUNDLE}.yaml"
-    HA_MODE == 'nonha'
-else
-    SRCBUNDLE="${SRCBUNDLE}-${HA_MODE}.yaml"
+    HA_MODE='nonha'
 fi
+SRCBUNDLE="${WORKSPACE}/ci/${SDN_CONTROLLER}/juju-deployer/"
+SRCBUNDLE="${SRCBUNDLE}/ovs-${SDN_CONTROLLER}-${HA_MODE}.yaml"
+
 
 # Modify files
 
@@ -217,6 +199,8 @@ else
       --allocation-pool start=$EXTNET_FIP,end=$EXTNET_LIP \
       --disable-dhcp --gateway $EXTNET_GW $EXTNET_NET
     exit_on_error $? "External subnet creation failed"
+    neutron net-update $EXTNET_NAME --shared
+    exit_on_error $? "External network sharing failed"
 fi
 
 ##
index 0ed4865..986ecc0 100644 (file)
             name: INSTALLER_TYPE
             default: compass
             description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: DEPLOY_SCENARIO
-            default: 'none'
-            description: 'Scenario to deploy and test'
         - string:
             name: EXTERNAL_NETWORK
             default: 'ext-net'
             name: INSTALLER_TYPE
             default: fuel
             description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: DEPLOY_SCENARIO
-            default: 'none'
-            description: 'Scenario to deploy and test'
         - string:
             name: EXTERNAL_NETWORK
             default: 'net04_ext'
index 108485d..bf35597 100644 (file)
@@ -10,17 +10,12 @@ echo
 
 
 # Remove previous running containers if exist
-if [[ ! -z $(docker ps -a | grep $DOCKER_REPO_NAME) ]]; then
+if [[ -n "$(docker ps -a | grep $DOCKER_REPO_NAME)" ]]; then
     echo "Removing existing $DOCKER_REPO_NAME containers..."
-    #docker ps | grep $DOCKER_REPO_NAME | awk '{print $1}' | xargs docker stop
     docker ps -a | grep $DOCKER_REPO_NAME | awk '{print $1}' | xargs docker rm -f
     t=60
     # Wait max 60 sec for containers to be removed
-    while [[ $t -gt 0 ]]; do
-        ids=$(docker ps | grep $DOCKER_REPO_NAME |awk '{print $1}')
-        if [[ -z $ids ]]; then
-            break
-        fi
+    while [[ $t -gt 0 ]] && [[ -n "$(docker ps| grep $DOCKER_REPO_NAME)" ]]; do
         sleep 1
         let t=t-1
     done
@@ -28,13 +23,15 @@ fi
 
 
 # Remove existing images if exist
-if [[ ! -z $(docker images | grep $DOCKER_REPO_NAME) ]]; then
+if [[ -n "$(docker images | grep $DOCKER_REPO_NAME)" ]]; then
     echo "Docker images to remove:"
     docker images | head -1 && docker images | grep $DOCKER_REPO_NAME
     image_tags=($(docker images | grep $DOCKER_REPO_NAME | awk '{print $2}'))
     for tag in "${image_tags[@]}"; do
-        echo "Removing docker image $DOCKER_REPO_NAME:$tag..."
-        docker rmi -f $DOCKER_REPO_NAME:$tag
+        if [[ -n "$(docker images|grep $DOCKER_REPO_NAME|grep $tag)" ]]; then
+            echo "Removing docker image $DOCKER_REPO_NAME:$tag..."
+            docker rmi -f $DOCKER_REPO_NAME:$tag
+        fi
     done
 fi
 
index f173bd9..c6078c6 100644 (file)
             name: GIT_BASE
             default: ssh://gerrit.opnfv.org:29418/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: POD_CONF_DIR
-            default: $WORKSPACE/deploy/templates/hardware_environment/conf/linux_foundation_lab/pod2
-            description: 'Directory where POD configuration files are located.'
 
 - parameter:
     name: 'ericsson-pod1-defaults'
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: POD_CONF_DIR
-            default: $WORKSPACE/deploy/templates/hardware_environment/conf/ericsson_montreal_lab/pod2
-            description: 'Directory where POD configuration files are located.'
 
 - parameter:
     name: 'intelpod2-jumphost-defaults'
index 56582c9..6283e5d 100644 (file)
                               grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
                 INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
                 sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
-                sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
-                sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
+                if [[ -n $(sudo iptables -L FORWARD |grep "REJECT"|grep "reject-with icmp-port-unreachable") ]]; then
+                    #note: this happens only in opnfv-lf-pod1
+                    sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
+                    sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
+                fi
             elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
                 # If production lab then creds may be retrieved dynamically
                 # creds are on the jumphost, always in the same folder