Merge "jjb: infra: prototypes: bifrost: Ignore mysql and ironic failures"
authorMarkos Chandras <mchandras@suse.de>
Wed, 1 Feb 2017 11:36:50 +0000 (11:36 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 1 Feb 2017 11:36:50 +0000 (11:36 +0000)
19 files changed:
jjb/3rd_party_ci/odl-netvirt.yml
jjb/apex/apex.yml
jjb/armband/build.sh
jjb/daisy4nfv/daisy4nfv-merge-jobs.yml
jjb/doctor/doctor.yml
jjb/escalator/escalator.yml
jjb/fuel/fuel-daily-jobs.yml
jjb/global/installer-params.yml
jjb/global/releng-macros.yml
jjb/global/slave-params.yml
jjb/infra/bifrost-verify-jobs.yml
jjb/infra/bifrost-verify.sh
jjb/kvmfornfv/kvmfornfv.yml
jjb/multisite/fuel-deploy-for-multisite.sh [new file with mode: 0755]
jjb/multisite/multisite-daily-jobs.yml
jjb/opnfvdocs/opnfvdocs.yml
jjb/releng/opnfv-docs.yml
prototypes/bifrost/scripts/test-bifrost-deployment.sh
utils/jenkins-jnlp-connect.sh

index 0e479cc..a57bf17 100644 (file)
             branch: '{branch}'
         - '{slave-label}-defaults'
         - '{installer}-defaults'
+        - string:
+            name: DEPLOY_SCENARIO
+            default: 'os-odl_l2-bgpvpn-noha'
+            description: 'Scenario to deploy and test'
 
     builders:
         - description-setter:
index 0560fd5..9733c59 100644 (file)
@@ -37,6 +37,7 @@
          - 'os-odl_l2-sfc-noha'
          - 'os-odl_l3-nofeature-ha'
          - 'os-odl-bgpvpn-ha'
+         - 'os-odl-gluon-ha'
          - 'os-odl_l3-fdio-noha'
          - 'os-odl_l3-fdio-ha'
          - 'os-odl_l3-fdio_dvr-noha'
index a058ca1..a71cf11 100755 (executable)
@@ -96,6 +96,7 @@ ls -al $BUILD_DIRECTORY
     echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
     echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
     echo "OPNFV_ARTIFACT_URL=$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
+    echo "OPNFV_ARTIFACT_SHA512SUM=$(sha512sum $BUILD_DIRECTORY/opnfv-$OPNFV_ARTIFACT_VERSION.iso | cut -d' ' -f1)"
     echo "OPNFV_BUILD_URL=$BUILD_URL"
 ) > $WORKSPACE/opnfv.properties
 
index d84e46f..1e7bf90 100644 (file)
@@ -50,7 +50,7 @@
             option: 'project'
 
     scm:
-        - git-scm-gerrit
+        - git-scm
 
     wrappers:
         - ssh-agent-wrapper
             block-level: 'NODE'
 
     scm:
-        - git-scm-gerrit
+        - git-scm
 
     wrappers:
         - ssh-agent-wrapper
index 9b31cc2..0d06fb7 100644 (file)
@@ -7,7 +7,7 @@
         - master:
             branch: '{stream}'
             gs-pathname: ''
-            docker-tag: 'master'
+            docker-tag: 'latest'
             disabled: false
         - danube:
             branch: 'stable/{stream}'
             project: '{project}'
         - gerrit-parameter:
             branch: '{branch}'
+        - string:
+            name: GIT_BRANCH
+            default: 'origin/{branch}'
+            description: "Override GIT_BRANCH parameter as we need stream name here"
+            #TODO(r-mibu): remove this work around by cleanup *_BRANCH params
         - string:
             name: OS_CREDS
             default: /home/jenkins/openstack.creds
             description: 'OpenStack credentials'
         - '{slave-label}-defaults'
-        - string:
-            name: INSTALLER_TYPE
-            default: '{installer}'
-            description: 'Installer used for deploying OPNFV on this POD'
+        - '{installer}-defaults'
         - string:
             name: DOCKER_TAG
             default: '{docker-tag}'
index 31e0c00..103a696 100644 (file)
     concurrent: true
 
     scm:
-        - git-scm-gerrit
+        - git-scm
 
     wrappers:
         - ssh-agent-wrapper
index b0dee73..d65d170 100644 (file)
@@ -81,6 +81,8 @@
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         - 'os-nosdn-kvm_ovs-ha':
             auto-trigger-name: 'daily-trigger-disabled'
+        - 'os-nosdn-kvm_ovs_dpdk-ha':
+            auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         # NOHA scenarios
         - 'os-nosdn-nofeature-noha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
         - 'os-nosdn-ovs-noha':
             auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+        - 'os-nosdn-kvm_ovs_dpdk-noha':
+            auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
 
     jobs:
         - 'fuel-{scenario}-{pod}-daily-{stream}'
 - trigger:
     name: 'fuel-os-odl_l2-bgpvpn-ha-baremetal-daily-master-trigger'
     triggers:
-        - timed: '' # '5 14 * * *'
+        - timed: '5 14 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm-ha-baremetal-daily-master-trigger'
     triggers:
     name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-master-trigger'
     triggers:
         - timed: '5 20 * * *'
-
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-baremetal-daily-master-trigger'
+    triggers:
+        - timed: '30 12 * * *'
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-baremetal-daily-master-trigger'
     name: 'fuel-os-nosdn-ovs-noha-baremetal-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-master-trigger'
+    triggers:
+        - timed: '30 16 * * *'
 #-----------------------------------------------
 # Triggers for job running on fuel-baremetal against danube branch
 #-----------------------------------------------
     name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-danube-trigger'
     triggers:
         - timed: '0 20 * * *'
-
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-baremetal-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-baremetal-daily-danube-trigger'
     name: 'fuel-os-nosdn-ovs-noha-baremetal-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-danube-trigger'
+    triggers:
+        - timed: ''
 #-----------------------------------------------
 # Triggers for job running on fuel-virtual against master branch
 #-----------------------------------------------
     name: 'fuel-os-nosdn-ovs-ha-virtual-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-virtual-daily-master-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-master-trigger'
     name: 'fuel-os-nosdn-ovs-noha-virtual-daily-master-trigger'
     triggers:
         - timed: '5 9 * * *'
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-master-trigger'
+    triggers:
+        - timed: ''
 #-----------------------------------------------
 # Triggers for job running on fuel-virtual against danube branch
 #-----------------------------------------------
     name: 'fuel-os-nosdn-ovs-ha-virtual-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-virtual-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-danube-trigger'
     name: 'fuel-os-nosdn-ovs-noha-virtual-daily-danube-trigger'
     triggers:
         - timed: '0 9 * * *'
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-danube-trigger'
+    triggers:
+        - timed: ''
 #-----------------------------------------------
 # ZTE POD1 Triggers running against master branch
 #-----------------------------------------------
     name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-master-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-master-trigger'
     name: 'fuel-os-nosdn-ovs-noha-zte-pod1-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-master-trigger'
+    triggers:
+        - timed: ''
 
 #-----------------------------------------------
 # ZTE POD2 Triggers running against master branch
     name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-master-trigger'
     name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-master-trigger'
+    triggers:
+        - timed: ''
 #-----------------------------------------------
 # ZTE POD3 Triggers running against master branch
 #-----------------------------------------------
     name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod3-daily-master-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-master-trigger'
     name: 'fuel-os-nosdn-ovs-noha-zte-pod3-daily-master-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-master-trigger'
+    triggers:
+        - timed: ''
 #-----------------------------------------------
 # ZTE POD1 Triggers running against danube branch
 #-----------------------------------------------
     name: 'fuel-os-nosdn-ovs-ha-zte-pod1-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod1-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod1-daily-danube-trigger'
     name: 'fuel-os-nosdn-ovs-noha-zte-pod1-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-danube-trigger'
+    triggers:
+        - timed: ''
 
 #-----------------------------------------------
 # ZTE POD2 Triggers running against danube branch
     name: 'fuel-os-nosdn-ovs-ha-zte-pod2-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod2-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod2-daily-danube-trigger'
     name: 'fuel-os-nosdn-ovs-noha-zte-pod2-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-danube-trigger'
+    triggers:
+        - timed: ''
 #-----------------------------------------------
 # ZTE POD3 Triggers running against danube branch
 #-----------------------------------------------
     name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-zte-pod3-daily-danube-trigger'
+    triggers:
+        - timed: ''
 # NOHA Scenarios
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-zte-pod3-daily-danube-trigger'
     name: 'fuel-os-nosdn-ovs-noha-zte-pod3-daily-danube-trigger'
     triggers:
         - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-danube-trigger'
+    triggers:
+        - timed: ''
index 8e957eb..6e965a9 100644 (file)
@@ -9,10 +9,6 @@
             name: INSTALLER_TYPE
             default: apex
             description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: DEPLOY_SCENARIO
-            default: 'none'
-            description: 'Scenario to deploy and test'
         - string:
             name: EXTERNAL_NETWORK
             default: 'external'
             name: INSTALLER_TYPE
             default: netvirt
             description: 'Installer used for deploying OPNFV on this POD'
-        - string:
-            name: DEPLOY_SCENARIO
-            default: 'os-odl_l2-bgpvpn-noha'
-            description: 'Scenario to deploy and test'
         - string:
             name: EXTERNAL_NETWORK
             default: 'external'
index 89909b0..60fa22d 100644 (file)
                 failed: true
                 unstable: true
                 notbuilt: true
-            silent-start: true
 
 - wrapper:
     name: ssh-agent-wrapper
index 57bb8bd..c87fc28 100644 (file)
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
+- parameter:
+    name: 'intel-pod10-defaults'
+    parameters:
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - intel-pod10
+            default-slaves:
+                - intel-pod10
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
 - parameter:
     name: 'intel-pod3-defaults'
     parameters:
index 28e36aa..2a8d6e9 100644 (file)
@@ -38,9 +38,6 @@
             dib-os-element: 'opensuse-minimal'
             dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
             extra-dib-elements: 'openssh-server'
-            vm-disk: '30'
-            vm-memory: '4096'
-            vm-cpu: '2'
 #--------------------------------
 # type
 #--------------------------------
 #--------------------------------
 - defaults:
     name: vm_defaults
-    vm-disk: '100'
-    vm-memory: '8192'
-    vm-cpu: '4'
+    vm-disk: '30'
+    vm-disk-cache: 'unsafe'
+    vm-memory: '4096'
+    vm-cpu: '2'
 
 #--------------------------------
 # job templates
         - string:
             name: VM_DISK
             default: '{vm-disk}'
+        - string:
+            name: VM_DISK_CACHE
+            default: '{vm-disk-cache}'
         - string:
             name: VM_MEMORY
             default: '{vm-memory}'
                 file-paths:
                   - compare-type: ANT
                     pattern: 'prototypes/bifrost/**'
-                  - compare-type: ANT
-                    pattern: 'jjb/infra/**'
             readable-message: true
 
 #---------------------------
index a7ef9c4..94c7dac 100755 (executable)
@@ -17,14 +17,15 @@ function upload_logs() {
     BIFROST_CONSOLE_LOG="${BUILD_URL}/consoleText"
     BIFROST_GS_URL=${BIFROST_LOG_URL/http:/gs:}
 
-    echo "Uploading build logs to ${BIFROST_LOG_URL}"
-
-    echo "Uploading console output"
-    curl -s -L ${BIFROST_CONSOLE_LOG} > ${WORKSPACE}/build_log.txt
-    gsutil -q cp -Z ${WORKSPACE}/build_log.txt ${BIFROST_GS_URL}/build_log.txt
-    rm ${WORKSPACE}/build_log.txt
+    # Make sure the old landing page is gone in case
+    # we break later on. We don't want to publish
+    # stale information.
+    # TODO: Maybe cleanup the entire $BIFROST_GS_URL directory
+    # before we upload the new data.
+    gsutil -q rm ${BIFROST_GS_URL}/index.html || true
 
     if [[ -d ${WORKSPACE}/logs ]]; then
+        echo "Uploading collected bifrost logs to ${BIFROST_LOG_URL}"
         pushd ${WORKSPACE}/logs &> /dev/null
         for x in *.log; do
             echo "Compressing and uploading $x"
@@ -37,7 +38,7 @@ function upload_logs() {
     cat > ${WORKSPACE}/index.html <<EOF
 <html>
 <h1>Build results for <a href=https://$GERRIT_NAME/#/c/$GERRIT_CHANGE_NUMBER/$GERRIT_PATCHSET_NUMBER>$GERRIT_NAME/$GERRIT_CHANGE_NUMBER/$GERRIT_PATCHSET_NUMBER</a></h1>
-<h2>Job: $JOB_NAME</h2>
+<h2>Job: <a href=${BUILD_URL}>$JOB_NAME</a></h2>
 <ul>
 <li><a href=${BIFROST_LOG_URL}/build_log.txt>build_log.txt</a></li>
 EOF
@@ -55,8 +56,15 @@ EOF
 </html>
 EOF
 
-    gsutil -q cp ${WORKSPACE}/index.html ${BIFROST_GS_URL}/index.html
+    # Finally, download and upload the entire build log so we can retain
+    # as much build information as possible
+    echo "Uploading console output"
+    curl -s -L ${BIFROST_CONSOLE_LOG} > ${WORKSPACE}/build_log.txt
+    gsutil -q cp -Z ${WORKSPACE}/build_log.txt ${BIFROST_GS_URL}/build_log.txt
+    rm ${WORKSPACE}/build_log.txt
 
+    # Upload landing page
+    gsutil -q cp ${WORKSPACE}/index.html ${BIFROST_GS_URL}/index.html
     rm ${WORKSPACE}/index.html
 }
 
index fdce301..2d1ab57 100644 (file)
@@ -19,7 +19,7 @@
         - 'build':
             slave-label: 'opnfv-build-ubuntu'
         - 'test':
-            slave-label: 'intel-pod1'
+            slave-label: 'intel-pod10'
 #####################################
 # patch verification phases
 #####################################
                   node-parameters: false
                   kill-phase-on: FAILURE
                   abort-all-job: true
-#        - multijob:
-#            name: test
-#            condition: SUCCESSFUL
-#            projects:
-#                - name: 'kvmfornfv-verify-test-{stream}'
-#                  current-parameters: false
-#                  predefined-parameters: |
-#                    GERRIT_BRANCH=$GERRIT_BRANCH
-#                    GERRIT_REFSPEC=$GERRIT_REFSPEC
-#                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
-#                  node-parameters: false
-#                  kill-phase-on: FAILURE
-#                  abort-all-job: true
+        - multijob:
+            name: test
+            condition: SUCCESSFUL
+            projects:
+                - name: 'kvmfornfv-verify-test-{stream}'
+                  current-parameters: false
+                  predefined-parameters: |
+                    GERRIT_BRANCH=$GERRIT_BRANCH
+                    GERRIT_REFSPEC=$GERRIT_REFSPEC
+                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+                  node-parameters: false
+                  kill-phase-on: FAILURE
+                  abort-all-job: true
 - job-template:
     name: 'kvmfornfv-verify-{phase}-{stream}'
 
     scm:
         - git-scm
 
-#    triggers:
-#        - timed: '@midnight'
+    triggers:
+        - timed: '@midnight'
 
     builders:
         - description-setter:
diff --git a/jjb/multisite/fuel-deploy-for-multisite.sh b/jjb/multisite/fuel-deploy-for-multisite.sh
new file mode 100755 (executable)
index 0000000..d8b4051
--- /dev/null
@@ -0,0 +1,121 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -o nounset
+set -o pipefail
+
+# do not continue with the deployment if FRESH_INSTALL is not requested
+if [[ "$FRESH_INSTALL" == "true" ]]; then
+    echo "Fresh install requested. Proceeding with the installation."
+else
+    echo "Fresh install is not requested. Skipping the installation."
+    exit 0
+fi
+
+export TERM="vt220"
+
+# get the latest successful job console log and extract the properties filename
+FUEL_DEPLOY_BUILD_URL="https://build.opnfv.org/ci/job/fuel-deploy-virtual-daily-master/lastSuccessfulBuild/consoleText"
+FUEL_PROPERTIES_FILE=$(curl -s -L ${FUEL_DEPLOY_BUILD_URL} | grep 'ISO:' | awk '{print $2}' | sed 's/iso/properties/g')
+if [[ -z "FUEL_PROPERTIES_FILE" ]]; then
+    echo "Unable to extract the url to Fuel ISO properties from ${FUEL_DEPLOY_URL}"
+    exit 1
+fi
+curl -L -s -o $WORKSPACE/latest.properties http://artifacts.opnfv.org/fuel/$FUEL_PROPERTIES_FILE
+
+# source the file so we get OPNFV vars
+source latest.properties
+
+# echo the info about artifact that is used during the deployment
+echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+
+# download the iso
+echo "Downloading the ISO using the link http://$OPNFV_ARTIFACT_URL"
+curl -L -s -o $WORKSPACE/opnfv.iso http://$OPNFV_ARTIFACT_URL > gsutil.iso.log 2>&1
+
+
+# set deployment parameters
+DEPLOY_SCENARIO="os-nosdn-nofeature-noha"
+export TMPDIR=$HOME/tmpdir
+BRIDGE=${BRIDGE:-pxebr}
+LAB_NAME=${NODE_NAME/-*}
+POD_NAME=${NODE_NAME/*-}
+
+if [[ "$NODE_NAME" =~ "virtual" ]]; then
+    POD_NAME="virtual_kvm"
+fi
+
+# we currently support ericsson, intel, lf and zte labs
+if [[ ! "$LAB_NAME" =~ (ericsson|intel|lf|zte) ]]; then
+    echo "Unsupported/unidentified lab $LAB_NAME. Cannot continue!"
+    exit 1
+else
+    echo "Using configuration for $LAB_NAME"
+fi
+
+# create TMPDIR if it doesn't exist
+export TMPDIR=$HOME/tmpdir
+mkdir -p $TMPDIR
+
+# change permissions down to TMPDIR
+chmod a+x $HOME
+chmod a+x $TMPDIR
+
+# clone fuel repo and checkout the sha1 that corresponds to the ISO
+echo "Cloning fuel repo"
+git clone https://gerrit.opnfv.org/gerrit/p/fuel.git fuel
+cd $WORKSPACE/fuel
+echo "Checking out $OPNFV_GIT_SHA1"
+git checkout $OPNFV_GIT_SHA1 --quiet
+
+# clone the securedlab repo
+cd $WORKSPACE
+echo "Cloning securedlab repo ${GIT_BRANCH##origin/}"
+git clone ssh://jenkins-ericsson@gerrit.opnfv.org:29418/securedlab --quiet \
+    --branch ${GIT_BRANCH##origin/}
+
+# log file name
+FUEL_LOG_FILENAME="${JOB_NAME}_${BUILD_NUMBER}.log.tar.gz"
+
+# construct the command
+DEPLOY_COMMAND="sudo $WORKSPACE/fuel/ci/deploy.sh -b file://$WORKSPACE/securedlab \
+    -l $LAB_NAME -p $POD_NAME -s $DEPLOY_SCENARIO -i file://$WORKSPACE/opnfv.iso \
+    -H -B $BRIDGE -S $TMPDIR -L $WORKSPACE/$FUEL_LOG_FILENAME"
+
+# log info to console
+echo "Deployment parameters"
+echo "--------------------------------------------------------"
+echo "Scenario: $DEPLOY_SCENARIO"
+echo "Lab: $LAB_NAME"
+echo "POD: $POD_NAME"
+echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
+echo
+echo "Starting the deployment using $INSTALLER_TYPE. This could take some time..."
+echo "--------------------------------------------------------"
+echo
+
+# start the deployment
+echo "Issuing command"
+echo "$DEPLOY_COMMAND"
+echo
+
+$DEPLOY_COMMAND
+exit_code=$?
+
+echo
+echo "--------------------------------------------------------"
+echo "Deployment is done!"
+
+if [[ $exit_code -ne 0 ]]; then
+    echo "Deployment failed!"
+    exit $exit_code
+else
+    echo "Deployment is successful!"
+    exit 0
+fi
index 437dde9..f2b2871 100644 (file)
@@ -9,17 +9,15 @@
 
     phase:
         - 'fuel-deploy-regionone-virtual':
-            slave-label: 'ericsson-virtual12'
+            slave-label: ericsson-virtual12
         - 'fuel-deploy-regiontwo-virtual':
-            slave-label: 'ericsson-virtual13'
+            slave-label: ericsson-virtual13
         - 'register-endpoints':
-            slave-label: 'ericsson-virtual12'
+            slave-label: ericsson-virtual12
         - 'update-auth':
-            slave-label: 'ericsson-virtual13'
+            slave-label: ericsson-virtual13
         - 'kingbird-deploy-virtual':
-            slave-label: 'ericsson-virtual12'
-        - 'kingbird-functest':
-            slave-label: 'ericsson-virtual12'
+            slave-label: ericsson-virtual12
 
     stream:
         - master:
     parameters:
         - project-parameter:
             project: '{project}'
+        - choice:
+            name: FRESH_INSTALL
+            choices:
+                - 'true'
+                - 'false'
         - string:
             name: KINGBIRD_LOG_FILE
             default: $WORKSPACE/kingbird.log
@@ -63,6 +66,7 @@
                     OS_REGION=RegionOne
                     REGIONONE_IP=100.64.209.10
                     REGIONTWO_IP=100.64.209.11
+                    FRESH_INSTALL=$FRESH_INSTALL
                   node-parameters: false
                   node-label-name: SLAVE_LABEL
                   node-label: ericsson-virtual12
@@ -76,6 +80,7 @@
                     OS_REGION=RegionTwo
                     REGIONONE_IP=100.64.209.10
                     REGIONTWO_IP=100.64.209.11
+                    FRESH_INSTALL=$FRESH_INSTALL
                   node-parameters: false
                   node-label-name: SLAVE_LABEL
                   node-label: ericsson-virtual13
@@ -91,6 +96,7 @@
                     OS_REGION=RegionOne
                     REGIONONE_IP=100.64.209.10
                     REGIONTWO_IP=100.64.209.11
+                    FRESH_INSTALL=$FRESH_INSTALL
                   node-parameters: false
                   node-label-name: SLAVE_LABEL
                   node-label: ericsson-virtual12
                     OS_REGION=RegionTwo
                     REGIONONE_IP=100.64.209.10
                     REGIONTWO_IP=100.64.209.11
+                    FRESH_INSTALL=$FRESH_INSTALL
                   node-parameters: false
                   node-label-name: SLAVE_LABEL
                   node-label: ericsson-virtual13
                     OS_REGION=RegionOne
                     REGIONONE_IP=100.64.209.10
                     REGIONTWO_IP=100.64.209.11
+                    FRESH_INSTALL=$FRESH_INSTALL
                   node-parameters: false
                   node-label-name: SLAVE_LABEL
                   node-label: ericsson-virtual12
             name: kingbird-functest
             condition: SUCCESSFUL
             projects:
-                - name: 'multisite-kingbird-functest-{stream}'
+                - name: 'functest-fuel-virtual-suite-{stream}'
                   current-parameters: false
                   predefined-parameters: |
                     DEPLOY_SCENARIO='os-nosdn-multisite-noha'
+                    FUNCTEST_SUITE_NAME='multisite'
                     OS_REGION=RegionOne
                     REGIONONE_IP=100.64.209.10
                     REGIONTWO_IP=100.64.209.11
+                    FRESH_INSTALL=$FRESH_INSTALL
                   node-parameters: false
                   node-label-name: SLAVE_LABEL
                   node-label: ericsson-virtual12
         - string:
             name: KINGBIRD_LOG_FILE
             default: $WORKSPACE/kingbird.log
+        - gerrit-parameter:
+            branch: '{branch}'
+        - 'fuel-defaults'
         - '{slave-label}-defaults'
+        - choice:
+            name: FRESH_INSTALL
+            choices:
+                - 'true'
+                - 'false'
+
+    scm:
+        - git-scm
 
     builders:
         - description-setter:
 - builder:
     name: 'multisite-fuel-deploy-regionone-virtual-builder'
     builders:
+        - shell:
+            !include-raw-escape: ./fuel-deploy-for-multisite.sh
         - shell: |
             #!/bin/bash
 
             echo "This is where we deploy fuel, extract passwords and save into file"
+
+            cd $WORKSPACE/tools/keystone/
+            ./run.sh -t controller -r fetchpass.sh -o servicepass.ini
+
 - builder:
     name: 'multisite-fuel-deploy-regiontwo-virtual-builder'
     builders:
+        - shell:
+            !include-raw-escape: ./fuel-deploy-for-multisite.sh
         - shell: |
             #!/bin/bash
 
             echo "This is where we deploy fuel, extract publicUrl, privateUrl, and adminUrl and save into file"
+
+            cd $WORKSPACE/tools/keystone/
+            ./run.sh -t controller -r endpoint.sh -o endpoints.ini
 - builder:
     name: 'multisite-register-endpoints-builder'
     builders:
         - copyartifact:
             project: 'multisite-fuel-deploy-regiontwo-virtual-{stream}'
             which-build: multijob-build
-            filter: "RegionTwo-Endpoints.txt"
+            filter: "endpoints.ini"
         - shell: |
             #!/bin/bash
 
-            echo "This is where we register RegionTwo in RegionOne keystone"
+            echo "This is where we register RegionTwo in RegionOne keystone using endpoints.ini"
+
+            cd $WORKSPACE/tools/keystone/
+            ./run.sh -t controller -r region.sh -d $WORKSPACE/endpoints.ini
 - builder:
     name: 'multisite-update-auth-builder'
     builders:
         - copyartifact:
             project: 'multisite-fuel-deploy-regionone-virtual-{stream}'
             which-build: multijob-build
-            filter: "RegionOne-Passwords.txt"
+            filter: "servicepass.ini"
         - shell: |
             #!/bin/bash
 
-            echo "This is where we read passwords from RegionOne-passwords.txt and replace passwords in RegionTwo"
+            echo "This is where we read passwords from servicepass.ini and replace passwords in RegionTwo"
+
+            cd $WORKSPACE/tools/keystone/
+            ./run.sh -t controller -r writepass.sh -d $WORKSPACE/servicepass.ini
+            ./run.sh -t compute -r writepass.sh -d $WORKSPACE/servicepass.ini
 - builder:
     name: 'multisite-kingbird-deploy-virtual-builder'
     builders:
             #!/bin/bash
 
             echo "This is where we install kingbird"
-- builder:
-    name: 'multisite-kingbird-functest-builder'
-    builders:
-        - shell: |
-            #!/bin/bash
-
-            echo "This is where we run kingbird-functest"
+            $WORKSPACE/tools/kingbird/deploy.sh
 ########################
 # publisher macros
 ########################
     name: 'multisite-fuel-deploy-regionone-virtual-publisher'
     publishers:
         - archive:
-            artifacts: '/root/servicepass.ini'
+            artifacts: 'servicepass.ini'
             allow-empty: false
             only-if-success: true
             fingerprint: true
     name: 'multisite-fuel-deploy-regiontwo-virtual-publisher'
     publishers:
         - archive:
-            artifacts: '/root/endpoints.ini'
+            artifacts: 'endpoints.ini'
             allow-empty: false
             only-if-success: true
             fingerprint: true
index 0d4c461..cf7bae5 100644 (file)
@@ -87,7 +87,7 @@
             description: "Directory where the build artifact will be located upon the completion of the build."
 
     scm:
-        - git-scm-gerrit
+        - git-scm
 
     triggers:
         - gerrit:
index f6092ee..6224ca9 100644 (file)
@@ -89,7 +89,7 @@
             description: "JJB configured GERRIT_REFSPEC parameter"
 
     scm:
-        - git-scm-gerrit
+        - git-scm
 
     triggers:
         - gerrit:
index 90f014c..914a906 100755 (executable)
@@ -36,6 +36,7 @@ export TEST_VM_NODE_NAMES="jumphost.opnfvlocal controller00.opnfvlocal compute00
 export VM_DOMAIN_TYPE="kvm"
 export VM_CPU=${VM_CPU:-4}
 export VM_DISK=${VM_DISK:-100}
+export VM_DISK_CACHE=${VM_DISK_CACHE:-unsafe}
 TEST_PLAYBOOK="test-bifrost-infracloud.yaml"
 USE_INSPECTOR=true
 USE_CIRROS=false
index be9fe18..8fce2e0 100755 (executable)
@@ -92,13 +92,16 @@ main () {
             exit 1
         fi
 
+        chown=$(type -p chown)
+        mkdir=$(type -p mkdir)
+
         makemonit () {
             echo "Writing the following as monit config:"
         cat << EOF | tee $monitconfdir/jenkins
 check directory jenkins_piddir path /var/run/$jenkinsuser
-if does not exist then exec "/usr/bin/mkdir -p /var/run/$jenkinsuser"
-if failed uid $jenkinsuser then exec "/usr/bin/chown $jenkinsuser /var/run/$jenkinsuser"
-if failed gid $jenkinsuser then exec "/usr/bin/chown :$jenkinsuser /var/run/$jenkinsuser"
+if does not exist then exec "$mkdir -p /var/run/$jenkinsuser"
+if failed uid $jenkinsuser then exec "$chown $jenkinsuser /var/run/$jenkinsuser"
+if failed gid $jenkinsuser then exec "$chown :$jenkinsuser /var/run/$jenkinsuser"
 
 check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid
 start program = "/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $jenkinshome; export started_monit=true; $0 $@' with timeout 60 seconds"
@@ -111,9 +114,9 @@ EOF
             #test for diff
             if [[ "$(diff $monitconfdir/jenkins <(echo "\
 check directory jenkins_piddir path /var/run/$jenkinsuser
-if does not exist then exec \"/usr/bin/mkdir -p /var/run/$jenkinsuser\"
-if failed uid $jenkinsuser then exec \"/usr/bin/chown $jenkinsuser /var/run/$jenkinsuser\"
-if failed gid $jenkinsuser then exec \"/usr/bin/chown :$jenkinsuser /var/run/$jenkinsuser\"
+if does not exist then exec \"$mkdir -p /var/run/$jenkinsuser\"
+if failed uid $jenkinsuser then exec \"$chown $jenkinsuser /var/run/$jenkinsuser\"
+if failed gid $jenkinsuser then exec \"$chown :$jenkinsuser /var/run/$jenkinsuser\"
 
 check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid
 start program = \"/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $jenkinshome; export started_monit=true; $0 $@' with timeout 60 seconds\"