Merge "Doctor: pass project parameter to other job"
authorRyota Mibu <r-mibu@cq.jp.nec.com>
Mon, 12 Feb 2018 10:57:27 +0000 (10:57 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 12 Feb 2018 10:57:27 +0000 (10:57 +0000)
15 files changed:
jjb/apex/apex-verify-jobs.yml
jjb/apex/apex.yml
jjb/apex/apex.yml.j2
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-verify-jobs.yml
jjb/container4nfv/arm64/yardstick-arm64.sh [new file with mode: 0755]
jjb/container4nfv/yardstick-arm64.yml [new file with mode: 0644]
jjb/dovetail/dovetail-ci-jobs.yml
jjb/dovetail/dovetail-run.sh
jjb/functest/functest-alpine.sh
jjb/global/releng-macros.yml
jjb/global/slave-params.yml
jjb/releng/opnfv-utils.yml
jjb/vswitchperf/vswitchperf.yml
jjb/xci/bifrost-verify.sh

index 111d0c1..421a3fa 100644 (file)
 - job-template:
     name: 'apex-verify-{stream}'
 
-    node: 'apex-virtual-master'
-
     concurrent: true
 
     disabled: '{obj:disabled}'
     project-type: 'multijob'
 
     parameters:
+      - '{project}-virtual-{stream}-defaults'
       - apex-parameter:
           gs-pathname: '{gs-pathname}/dev'
       - project-parameter:
 - job-template:
     name: 'apex-verify-gate-{stream}'
 
-    node: 'apex-build-{stream}'
-
     concurrent: true
 
     disabled: '{obj:disabled}'
     project-type: 'multijob'
 
     parameters:
+      - '{project}-virtual-{stream}-defaults'
       - apex-parameter:
           gs-pathname: '{gs-pathname}/dev'
       - project-parameter:
index 6714d6a..b07ccd6 100644 (file)
 - job-template:
     name: 'apex-deploy-{platform}-{stream}'
 
-    node: 'apex-{platform}-{stream}'
-
     concurrent: true
 
     disabled: false
 - job-template:
     name: 'apex-virtual-{stream}'
 
-    node: 'apex-virtual-master'
-
     project-type: 'multijob'
 
     disabled: false
     #     branch:    branch (eg. stable)
     project-type: 'multijob'
 
-    node: '{baremetal-slave}'
-
     disabled: '{obj:disable_daily}'
 
     scm:
index 27a854d..b9cbd02 100644 (file)
 - job-template:
     name: 'apex-deploy-{platform}-{stream}'
 
-    node: 'apex-{platform}-{stream}'
-
     concurrent: true
 
     disabled: false
 - job-template:
     name: 'apex-virtual-{stream}'
 
-    node: 'apex-virtual-master'
-
     project-type: 'multijob'
 
     disabled: false
     #     branch:    branch (eg. stable)
     project-type: 'multijob'
 
-    node: '{baremetal-slave}'
-
     disabled: '{obj:disable_daily}'
 
     scm:
index 70296bd..81d76d5 100644 (file)
           use-build-blocker: true
           blocking-jobs:
             - 'compass-os-.*?-{pod}-daily-.*?'
+            - 'compass-k8-.*?-{pod}-daily-.*?'
             - 'compass-os-.*?-baremetal-daily-.*?'
+            - 'compass-k8-.*?-baremetal-daily-.*?'
             - 'compass-verify-[^-]*-[^-]*'
           block-level: 'NODE'
 
               build-step-failure-threshold: 'never'
               failure-threshold: 'never'
               unstable-threshold: 'FAILURE'
-      # dovetail only master by now, not sync with A/B/C branches
       # here the stream means the SUT stream, dovetail stream is defined in its own job
-      # only run on os-(nosdn|odl_l2|odl_l3)-nofeature-ha scenario
-      # run against SUT master/euphrates branch, dovetail docker image with latest tag(Monday, Tuesday)
-      # run against SUT master/euphrates branch, dovetail docker image with cvp.X.X.X tag(Thursday, Friday)
-      # run against SUT danube branch, dovetail docker image with cvp.X.X.X tag on huawei-pod7
+      # only run on os-(nosdn|odl_l3)-nofeature-ha scenario
+      # run with testsuite default, dovetail docker image with latest tag(Monday, Tuesday)
+      # run with testsuite proposed_tests, dovetail docker image with latest tag(Thursday, Friday)
       - conditional-step:
           condition-kind: and
           condition-operands:
               use-build-time: true
           steps:
             - trigger-builds:
-                - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
+                - project: 'dovetail-compass-{pod}-default-{stream}'
                   current-parameters: false
                   predefined-parameters: |
                     DOCKER_TAG=latest
index f215b78..444b173 100644 (file)
@@ -70,6 +70,7 @@
           blocking-jobs:
             - 'compass-verify-[^-]*-[^-]*'
             - 'compass-os-.*?-virtual-daily-.*?'
+            - 'compass-k8-.*?-virtual-daily-.*?'
           block-level: 'NODE'
 
     wrappers:
diff --git a/jjb/container4nfv/arm64/yardstick-arm64.sh b/jjb/container4nfv/arm64/yardstick-arm64.sh
new file mode 100755 (executable)
index 0000000..26c6fdc
--- /dev/null
@@ -0,0 +1,93 @@
+#!/bin/bash
+set -e
+
+sshpass -p root ssh root@10.1.0.50 \
+  "mkdir -p /etc/yardstick; rm -rf /etc/yardstick/admin.conf"
+
+
+sshpass -p root ssh root@10.1.0.50 \
+  kubectl config set-cluster yardstick --server=127.0.0.1:8080 --insecure-skip-tls-verify=true --kubeconfig=/etc/yardstick/admin.conf
+sshpass -p root ssh root@10.1.0.50 \
+  kubectl config set-context yardstick --cluster=yardstick --kubeconfig=/etc/yardstick/admin.conf
+sshpass -p root ssh root@10.1.0.50 \
+  kubectl config use-context yardstick --kubeconfig=/etc/yardstick/admin.conf 
+
+
+
+if [ ! -n "$redirect" ]; then
+  redirect="/dev/stdout"
+fi
+
+if [ ! -n "$DOCKER_TAG" ]; then
+  DOCKER_TAG='latest'
+fi
+
+if [ ! -n "$NODE_NAME" ]; then
+  NODE_NAME='arm-virutal03'
+fi
+
+if [ ! -n "$DEPLOY_SCENARIO" ]; then
+  DEPLOY_SCENARIO='k8-nosdn-lb-noha_daily'
+fi
+
+if [ ! -n "$YARDSTICK_DB_BACKEND" ]; then
+  YARDSTICK_DB_BACKEND='-i 104.197.68.199:8086'
+fi
+
+# Pull the image with correct tag
+DOCKER_REPO='opnfv/yardstick'
+if [ "$(uname -m)" = 'aarch64' ]; then
+    DOCKER_REPO="${DOCKER_REPO}_$(uname -m)"
+fi
+echo "Yardstick: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
+sshpass -p root ssh root@10.1.0.50 \
+  docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
+
+if [ ! -n "$BRANCH" ]; then
+  BRANCH=master
+fi
+
+opts="--name=yardstick --privileged=true --net=host -d -it "
+envs="-e YARDSTICK_BRANCH=${BRANCH} -e BRANCH=${BRANCH} \
+  -e NODE_NAME=${NODE_NAME} \
+  -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
+rc_file_vol="-v /etc/yardstick/admin.conf:/etc/yardstick/admin.conf"
+cacert_file_vol=""
+map_log_dir=""
+sshkey=""
+YARDSTICK_SCENARIO_SUITE_NAME="opnfv_k8-nosdn-lb-noha_daily.yaml"
+
+# map log directory
+branch=${BRANCH##*/}
+#branch="master"
+dir_result="${HOME}/opnfv/yardstick/results/${branch}"
+mkdir -p ${dir_result}
+sudo rm -rf ${dir_result}/*
+map_log_dir="-v ${dir_result}:/tmp/yardstick"
+
+# Run docker
+cmd="docker rm -f yardstick || true"
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} ${map_log_dir} ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
+echo "Yardstick: Running docker cmd: ${cmd}"
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+
+cmd='sudo docker exec yardstick sed -i.bak "/# execute tests/i\sed -i.bak \"s/openretriever\\\/yardstick/openretriever\\\/yardstick_aarch64/g\" \
+    $\{YARDSTICK_REPO_DIR\}/tests/opnfv/test_cases/opnfv_yardstick_tc080.yaml" /usr/local/bin/exec_tests.sh'
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+echo "Yardstick: run tests: ${YARDSTICK_SCENARIO_SUITE_NAME}"
+cmd="sudo docker exec yardstick exec_tests.sh ${YARDSTICK_DB_BACKEND} ${YARDSTICK_SCENARIO_SUITE_NAME}"
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+cmd="docker rm -f yardstick"
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+echo "Yardstick: done!"
diff --git a/jjb/container4nfv/yardstick-arm64.yml b/jjb/container4nfv/yardstick-arm64.yml
new file mode 100644 (file)
index 0000000..bd1d8aa
--- /dev/null
@@ -0,0 +1,121 @@
+---
+###################################
+# job configuration for yardstick
+###################################
+- project:
+    name: yardstick-arm64
+
+    project: '{name}'
+
+    # -------------------------------
+    # BRANCH ANCHORS
+    # -------------------------------
+    master: &master
+      stream: master
+      branch: '{stream}'
+      gs-pathname: ''
+      docker-tag: 'latest'
+    # -------------------------------
+    # POD, INSTALLER, AND BRANCH MAPPING
+    # -------------------------------
+    #    Installers using labels
+    #            CI PODs
+    # This section should only contain the installers
+    # that have been switched using labels for slaves
+    # -------------------------------
+    pod:
+      # apex CI PODs
+      - arm-virtual03:
+          slave-label: arm-packet01
+          installer: compass
+          auto-trigger-name: 'daily-trigger-disabled'
+          <<: *master
+    # -------------------------------
+    testsuite:
+      - 'daily'
+
+    jobs:
+      - 'yardstick-arm64-{installer}-{pod}-{testsuite}-{stream}'
+
+################################
+# job templates
+################################
+- job-template:
+    name: 'yardstick-arm64-{installer}-{pod}-{testsuite}-{stream}'
+    disabled: false
+
+    concurrent: true
+
+    properties:
+      - logrotate-default
+      - throttle:
+          enabled: true
+          max-per-node: 1
+          option: 'project'
+
+    wrappers:
+      - build-name:
+          name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+      - timeout:
+          timeout: 60
+          abort: true
+
+    triggers:
+      - '{auto-trigger-name}'
+
+    parameters:
+      - project-parameter:
+          project: '{project}'
+          branch: '{branch}'
+      - '{installer}-defaults'
+      - 'yardstick-params-{slave-label}'
+      - string:
+          name: DEPLOY_SCENARIO
+          default: 'k8-nosdn-lb-noha_daily'
+      - string:
+          name: DOCKER_TAG
+          default: '{docker-tag}'
+          description: 'Tag to pull docker image'
+      - string:
+          name: YARDSTICK_SCENARIO_SUITE_NAME
+          default: opnfv_${{DEPLOY_SCENARIO}}_{testsuite}.yaml
+          description: 'Path to test scenario suite'
+      - string:
+          name: CI_DEBUG
+          default: 'false'
+          description: "Show debut output information"
+
+    scm:
+      - git-scm
+
+    builders:
+      - description-setter:
+          description: "POD: $NODE_NAME"
+      - 'yardstick-arm64'
+
+    publishers:
+      - email:
+          recipients: trevor.tao@arm.com yibo.cai@arm.com
+      - email-jenkins-admins-on-failure
+
+########################
+# builder macros
+########################
+- builder:
+    name: yardstick-arm64
+    builders:
+      - shell:
+          !include-raw: arm64/yardstick-arm64.sh
+
+########################
+# parameter macros
+########################
+
+
+- parameter:
+    name: 'yardstick-params-arm-packet01'
+    parameters:
+      - string:
+          name: YARDSTICK_DB_BACKEND
+          default: '-i 104.197.68.199:8086'
+          description: 'Arguments to use in order to choose the backend DB'
index 99867c3..5e5b6e1 100644 (file)
       dovetail-branch: '{stream}'
       gs-pathname: ''
       docker-tag: 'latest'
-    danube: &danube
-      stream: danube
-      branch: 'stable/{stream}'
-      dovetail-branch: master
-      gs-pathname: '/{stream}'
-      docker-tag: 'cvp.0.9.0'
     euphrates: &euphrates
       stream: euphrates
       branch: 'stable/{stream}'
       dovetail-branch: master
       gs-pathname: '/{stream}'
-      docker-tag: 'cvp.0.9.0'
+      docker-tag: 'latest'
 
     # ----------------------------------
     # POD, PLATFORM, AND BRANCH MAPPING
           SUT: fuel
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - baremetal:
-          slave-label: fuel-baremetal
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - virtual:
-          slave-label: fuel-virtual
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - baremetal:
           slave-label: fuel-baremetal
           SUT: fuel
           SUT: compass
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - baremetal:
-          slave-label: compass-baremetal
-          SUT: compass
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - virtual:
-          slave-label: compass-virtual
-          SUT: compass
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - baremetal:
           slave-label: compass-baremetal
           SUT: compass
           SUT: apex
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - virtual:
-          slave-label: apex-virtual-danube
-          SUT: apex
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - baremetal:
-          slave-label: apex-baremetal-danube
-          SUT: apex
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - virtual:
           slave-label: apex-virtual-master
           SUT: apex
           SUT: fuel
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - armband-baremetal:
-          slave-label: armband-baremetal
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - armband-virtual:
-          slave-label: armband-virtual
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - armband-baremetal:
           slave-label: armband-baremetal
           SUT: fuel
           SUT: fuel
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - zte-pod1:
-          slave-label: zte-pod1
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - zte-pod3:
-          slave-label: zte-pod3
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - huawei-pod4:
-          slave-label: huawei-pod4
-          SUT: apex
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - huawei-pod7:
-          slave-label: huawei-pod7
-          SUT: compass
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - zte-pod1:
           slave-label: zte-pod1
           SUT: fuel
 
     # -------------------------------
     testsuite:
+      - 'default'
       - 'proposed_tests'
 
     jobs:
index e50242b..451662a 100755 (executable)
@@ -246,8 +246,6 @@ if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_T
     sed -i 's/internal/public/g' ${OPENRC}
     if [[ ${public_url} =~ 'v2' ]]; then
         sed -i "s/OS_IDENTITY_API_VERSION=3/OS_IDENTITY_API_VERSION=2.0/g" ${OPENRC}
-        sed -i '/OS_PROJECT_DOMAIN_NAME/d' ${OPENRC}
-        sed -i '/OS_USER_DOMAIN_NAME/d' ${OPENRC}
     fi
     cat ${OPENRC}
 fi
@@ -275,10 +273,13 @@ cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/ho
 echo "exec command: ${cp_tempest_cmd}"
 $cp_tempest_cmd
 
-list_cmd="dovetail list ${TESTSUITE}"
-run_cmd="dovetail run --testsuite ${TESTSUITE} -d"
-echo "Container exec command: ${list_cmd}"
-docker exec $container_id ${list_cmd}
+if [[ ${TESTSUITE} == 'default' ]]; then
+    testsuite=''
+else
+    testsuite="--testsuite ${TESTSUITE}"
+fi
+
+run_cmd="dovetail run ${testsuite} -d"
 echo "Container exec command: ${run_cmd}"
 docker exec $container_id ${run_cmd}
 
index e7171f0..432bbbb 100755 (executable)
@@ -4,7 +4,9 @@ set -e
 set +u
 set +o pipefail
 
+CI_LOOP=${CI_LOOP:-daily}
 TEST_DB_URL=http://testresults.opnfv.org/test/api/v1/results
+ENERGY_RECORDER_API_URL=http://energy.opnfv.fr/resources
 
 check_os_deployment() {
     FUNCTEST_IMAGE=opnfv/functest-healthcheck:${DOCKER_TAG}
@@ -147,8 +149,8 @@ test -f ${HOME}/opnfv/functest/custom/params_${DOCKER_TAG} && custom_params=$(ca
 
 envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
     -e NODE_NAME=${NODE_NAME} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} \
-    -e BUILD_TAG=${BUILD_TAG} -e DEPLOY_TYPE=${DEPLOY_TYPE} \
-    -e TEST_DB_URL=${TEST_DB_URL}"
+    -e BUILD_TAG=${BUILD_TAG} -e DEPLOY_TYPE=${DEPLOY_TYPE} -e CI_LOOP=${CI_LOOP} \
+    -e TEST_DB_URL=${TEST_DB_URL} -e ENERGY_RECORDER_API_URL=${ENERGY_RECORDER_API_URL}"
 
 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
 
index bd9f97d..a7d947f 100644 (file)
           if [[ -s violation.log ]]; then
               echo "Reporting lint result..."
               msg="Found syntax error and/or coding style violation(s) in the files modified by your patchset."
-              sed -i -e '1s/^//$msg\n\n/' violation.log
+              sed -i -e "1s#^#${msg}\n\n#" violation.log
               cmd="gerrit review -p $GERRIT_PROJECT -m \"$(cat violation.log)\" $GERRIT_PATCHSET_REVISION --notify NONE"
               ssh -p 29418 gerrit.opnfv.org "$cmd"
 
     builders:
       - shell: |
           #!/bin/bash
-          # Install python package 
+          # Install python package
           sudo pip install "flake8==2.6.2"
 
           echo "Checking python code..."
index 16de0bc..86b369b 100644 (file)
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-pod1
-          default-slaves:
-            - lf-pod1
 
 - parameter:
     name: 'apex-baremetal-euphrates-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-pod1
-          default-slaves:
-            - lf-pod1
 
 - parameter:
     name: 'apex-baremetal-danube-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-pod1
-          default-slaves:
-            - lf-pod1
+
 
 - parameter:
     name: 'apex-virtual-master-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-virtual2
-            - lf-virtual3
-          default-slaves:
-            - lf-virtual2
-            - lf-virtual3
 
 - parameter:
     name: 'apex-virtual-danube-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-pod3
-          default-slaves:
-            - lf-pod3
 
 - parameter:
     name: 'lf-pod1-defaults'
index 6f77cd2..b12f663 100644 (file)
@@ -7,6 +7,7 @@
       - 'prune-docker-images'
       - 'archive-repositories'
       - 'check-status-of-slaves'
+      - 'ansible-build-server'
 
 ########################
 # job templates
           name: SLAVE_NAME
           description: Slaves to prune docker images
           default-slaves:
-            - arm-build2
+            - arm-build3
+            - arm-build4
+            - arm-build5
+            - arm-build6
             - ericsson-build3
             - ericsson-build4
             - lf-build2
     builders:
       - description-setter:
           description: "Built on $NODE_NAME"
+      # yamllint disable rule:line-length
       - shell: |
           #!/bin/bash
-
           (docker ps -q; docker ps -aq) | sort | uniq -u | xargs --no-run-if-empty docker rm
           docker images -f dangling=true -q | xargs --no-run-if-empty docker rmi
 
+
+    # yamllint enable rule:line-length
     triggers:
       - timed: '@midnight'
 
@@ -88,7 +94,7 @@
     parameters:
       - node:
           name: SLAVE_NAME
-          description: We don't want workspace wiped. so I just threw the script on the master
+          description: 'script lives on master node'
           default-slaves:
             - master
           allowed-multiselect: false
       - shell: |
           cd /opt/jenkins-ci/slavemonitor
           bash slave-monitor-0.1.sh | sort
+
+- job-template:
+    name: 'ansible-build-server'
+
+    project-type: freestyle
+
+    disabled: false
+    concurrent: true
+
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: Build Servers
+          default-slaves:
+            - lf-build1
+            - lf-build2
+          allowed-multiselect: true
+          ignore-offline-nodes: true
+      - project-parameter:
+          project: releng
+          branch: master
+
+    scm:
+      - git-scm
+
+    triggers:
+      - timed: '@midnight'
+
+    builders:
+      - install-ansible
+      - run-ansible-build-server-playbook
+
+
+- builder:
+    name: install-ansible
+    builders:
+      - shell: |
+          # Install ansible here
+          if [ -f /etc/centos-release ] \
+          || [ -f /etc/redhat-release ] \
+          || [ -f /etc/system-release ]; then
+          sudo yum -y install ansible
+          fi
+          if [ -f /etc/debian_version ] \
+          || grep -qi ubuntu /etc/lsb-release \
+          || grep -qi ubuntu /etc/os-release; then
+          sudo apt-get -y install ansible
+          fi
+
+- builder:
+    name: run-ansible-build-server-playbook
+    builders:
+      - shell: |
+          # run playbook
+          sudo ansible-playbook -C -D -i \
+          $WORKSPACE/utils/build-server-ansible/inventory.ini \
+          $WORKSPACE/utils/build-server-ansible/main.yml
index 16ceb2e..7fd875b 100644 (file)
 
     disabled: '{obj:disabled}'
 
+    properties:
+      - logrotate-default
+      - build-blocker:
+          use-build-blocker: true
+          blocking-jobs:
+            - 'vswitchperf-verify-.*'
+            - 'vswitchperf-merge-.*'
+            - 'vswitchperf-daily-.*'
+          block-level: 'NODE'
+
     parameters:
       - project-parameter:
           project: '{project}'
           cd src
           make clobber
           make MORE_MAKE_FLAGS="-j 10"
-          # run basic sanity test
-          make sanity
           cd ../ci
-          scl enable python33 "source ~/vsperfenv/bin/activate ; ./build-vsperf.sh daily"
+          scl enable rh-python34 "source ~/vsperfenv/bin/activate ; ./build-vsperf.sh daily"
 
 - job-template:
     name: 'vswitchperf-verify-{stream}'
@@ -66,6 +74,7 @@
           blocking-jobs:
             - 'vswitchperf-verify-.*'
             - 'vswitchperf-merge-.*'
+            - 'vswitchperf-daily-.*'
           block-level: 'NODE'
 
     parameters:
           name: SLAVE_NAME
           description: 'Slave name on Jenkins'
           allowed-slaves:
+            - intel-pod12
             - ericsson-build4
           default-slaves:
+            - intel-pod12
             - ericsson-build4
 
     scm:
           cd src
           make clobber
           make MORE_MAKE_FLAGS="-j 5"
-          # run basic sanity test
-          make sanity
           cd ../ci
           ./build-vsperf.sh verify
 
           blocking-jobs:
             - 'vswitchperf-verify-.*'
             - 'vswitchperf-merge-.*'
+            - 'vswitchperf-daily-.*'
           block-level: 'NODE'
 
     parameters:
           name: SLAVE_NAME
           description: 'Slave name on Jenkins'
           allowed-slaves:
+            - intel-pod12
             - ericsson-build4
           default-slaves:
+            - intel-pod12
             - ericsson-build4
 
     scm:
index 198f2e1..451f33b 100755 (executable)
@@ -21,6 +21,11 @@ git fetch $PROJECT_REPO $GERRIT_REFSPEC && sudo git checkout FETCH_HEAD
 
 cd $WORKSPACE/releng-xci
 cat > bifrost_test.sh<<EOF
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
 cd ~/bifrost
 # provision 3 VMs; xcimaster, controller, and compute
 ./scripts/bifrost-provision.sh