Merge "Update the postscriptbuild usage"
authorSerena Feng <feng.xiaowei@zte.com.cn>
Sat, 24 Feb 2018 08:46:03 +0000 (08:46 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Sat, 24 Feb 2018 08:46:03 +0000 (08:46 +0000)
45 files changed:
jjb/apex/apex-verify-jobs.yml
jjb/apex/apex.yml
jjb/apex/apex.yml.j2
jjb/apex/scenarios.yaml.hidden
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-logs.sh
jjb/compass4nfv/compass-verify-jobs.yml
jjb/container4nfv/arm64/compass-deploy.sh
jjb/container4nfv/arm64/yardstick-arm64.sh [new file with mode: 0755]
jjb/container4nfv/container4nfv-arm64.yml
jjb/container4nfv/yardstick-arm64.yml [new file with mode: 0644]
jjb/daisy4nfv/daisy-daily-jobs.yml
jjb/doctor/doctor.yml
jjb/dovetail/dovetail-ci-jobs.yml
jjb/dovetail/dovetail-run.sh
jjb/fuel/fuel-daily-jobs.yml
jjb/functest/functest-alpine.sh
jjb/functest/functest-daily-jobs.yml [changed mode: 0755->0644]
jjb/functest/functest-env-presetup.sh
jjb/functest/functest-k8.sh
jjb/global/releng-macros.yml
jjb/global/slave-params.yml
jjb/pharos/pharos.yml
jjb/releng/opnfv-lint.yml
jjb/releng/opnfv-utils.yml
jjb/releng/releng-ci-jobs.yml
jjb/vswitchperf/vswitchperf.yml
jjb/xci/bifrost-periodic-jobs.yml
jjb/xci/bifrost-verify-jobs.yml
jjb/xci/bifrost-verify.sh
jjb/xci/xci-daily-jobs.yml
jjb/xci/xci-deploy.sh
jjb/xci/xci-merge-jobs.yml
jjb/xci/xci-start-new-vm.sh
jjb/xci/xci-verify-jobs.yml
utils/artifacts.opnfv.org.sh [new file with mode: 0755]
utils/build-server-ansible/inventory.ini [new file with mode: 0644]
utils/build-server-ansible/main.yml [new file with mode: 0644]
utils/build-server-ansible/vars/CentOS.yml [new file with mode: 0644]
utils/build-server-ansible/vars/Ubuntu.yml [new file with mode: 0644]
utils/build-server-ansible/vars/defaults.yml [new file with mode: 0644]
utils/build-server-ansible/vars/docker-compose-CentOS.yml [new file with mode: 0644]
utils/build-server-ansible/vars/docker-compose-Ubuntu.yml [new file with mode: 0644]
utils/jenkins-jnlp-connect.sh
utils/slave-monitor-0.1.sh [changed mode: 0644->0755]

index 88c1b17..3029322 100644 (file)
 - job-template:
     name: 'apex-verify-{stream}'
 
-    node: 'apex-virtual-master'
-
     concurrent: true
 
     disabled: '{obj:disabled}'
     project-type: 'multijob'
 
     parameters:
+      - '{project}-virtual-{stream}-defaults'
       - apex-parameter:
           gs-pathname: '{gs-pathname}/dev'
       - project-parameter:
 - job-template:
     name: 'apex-verify-gate-{stream}'
 
-    node: 'apex-build-{stream}'
-
     concurrent: true
 
     disabled: '{obj:disabled}'
     project-type: 'multijob'
 
     parameters:
+      - '{project}-virtual-{stream}-defaults'
       - apex-parameter:
           gs-pathname: '{gs-pathname}/dev'
       - project-parameter:
index 6714d6a..cf29b92 100644 (file)
           <<: *master
       - 'os-odl-bgpvpn-noha':
           <<: *master
+      - 'os-odl-sriov-noha':
+          <<: *master
+      - 'os-odl-ovs_offload-noha':
+          <<: *master
       - 'os-ovn-nofeature-noha':
           <<: *master
       - 'os-nosdn-fdio-noha':
           <<: *master
       - 'os-nosdn-fdio-ha':
           <<: *master
-      - 'os-odl-fdio-noha':
-          <<: *master
-      - 'os-odl-fdio-ha':
-          <<: *master
       - 'os-nosdn-bar-ha':
           <<: *master
       - 'os-nosdn-bar-noha':
           <<: *master
-      - 'os-nosdn-nofeature-ha-ipv6':
-          <<: *master
       - 'os-nosdn-ovs_dpdk-noha':
           <<: *master
       - 'os-nosdn-ovs_dpdk-ha':
           <<: *master
-      - 'os-nosdn-kvm_ovs_dpdk-noha':
+      - 'os-odl-ovs_dpdk-noha':
           <<: *master
-      - 'os-nosdn-kvm_ovs_dpdk-ha':
+      - 'os-odl-ovs_dpdk-ha':
           <<: *master
       - 'os-odl-sfc-noha':
           <<: *master
       - 'os-odl-sfc-ha':
           <<: *master
-      - 'os-odl-fdio_dvr-noha':
-          <<: *master
-      - 'os-odl-fdio_dvr-ha':
+      - 'os-nosdn-calipso-noha':
           <<: *master
       - 'os-nosdn-nofeature-noha':
           <<: *euphrates
 - job-template:
     name: 'apex-deploy-{platform}-{stream}'
 
-    node: 'apex-{platform}-{stream}'
-
     concurrent: true
 
     disabled: false
 - job-template:
     name: 'apex-virtual-{stream}'
 
-    node: 'apex-virtual-master'
-
     project-type: 'multijob'
 
+    concurrent: true
+
     disabled: false
 
     scm:
     #     branch:    branch (eg. stable)
     project-type: 'multijob'
 
-    node: '{baremetal-slave}'
-
     disabled: '{obj:disable_daily}'
 
     scm:
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-ovn-nofeature-noha-baremetal-master'
+            - name: 'apex-os-odl-sriov-noha-baremetal-master'
               node-parameters: false
               current-parameters: false
               predefined-parameters: |
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-nosdn-fdio-noha-baremetal-master'
+            - name: 'apex-os-odl-ovs_offload-noha-baremetal-master'
               node-parameters: false
               current-parameters: false
               predefined-parameters: |
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-nosdn-fdio-ha-baremetal-master'
+            - name: 'apex-os-ovn-nofeature-noha-baremetal-master'
               node-parameters: false
               current-parameters: false
               predefined-parameters: |
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-odl-fdio-noha-baremetal-master'
+            - name: 'apex-os-nosdn-fdio-noha-baremetal-master'
               node-parameters: false
               current-parameters: false
               predefined-parameters: |
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-odl-fdio-ha-baremetal-master'
+            - name: 'apex-os-nosdn-fdio-ha-baremetal-master'
               node-parameters: false
               current-parameters: false
               predefined-parameters: |
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-nosdn-nofeature-ha-ipv6-baremetal-master'
-              node-parameters: false
-              current-parameters: false
-              predefined-parameters: |
-                OPNFV_CLEAN=yes
-              kill-phase-on: NEVER
-              abort-all-job: true
-              git-revision: false
             - name: 'apex-os-nosdn-ovs_dpdk-noha-baremetal-master'
               node-parameters: false
               current-parameters: false
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-nosdn-kvm_ovs_dpdk-noha-baremetal-master'
+            - name: 'apex-os-odl-ovs_dpdk-noha-baremetal-master'
               node-parameters: false
               current-parameters: false
               predefined-parameters: |
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-nosdn-kvm_ovs_dpdk-ha-baremetal-master'
+            - name: 'apex-os-odl-ovs_dpdk-ha-baremetal-master'
               node-parameters: false
               current-parameters: false
               predefined-parameters: |
               kill-phase-on: NEVER
               abort-all-job: true
               git-revision: false
-            - name: 'apex-os-odl-fdio_dvr-noha-baremetal-master'
-              node-parameters: false
-              current-parameters: false
-              predefined-parameters: |
-                OPNFV_CLEAN=yes
-              kill-phase-on: NEVER
-              abort-all-job: true
-              git-revision: false
-            - name: 'apex-os-odl-fdio_dvr-ha-baremetal-master'
+            - name: 'apex-os-nosdn-calipso-noha-baremetal-master'
               node-parameters: false
               current-parameters: false
               predefined-parameters: |
index 27a854d..ab65c4e 100644 (file)
 - job-template:
     name: 'apex-deploy-{platform}-{stream}'
 
-    node: 'apex-{platform}-{stream}'
-
     concurrent: true
 
     disabled: false
 - job-template:
     name: 'apex-virtual-{stream}'
 
-    node: 'apex-virtual-master'
-
     project-type: 'multijob'
 
+    concurrent: true
+
     disabled: false
 
     scm:
     #     branch:    branch (eg. stable)
     project-type: 'multijob'
 
-    node: '{baremetal-slave}'
-
     disabled: '{obj:disable_daily}'
 
     scm:
index 789ca7f..93ce931 100644 (file)
@@ -5,22 +5,20 @@ master:
   - 'os-odl-nofeature-noha'
   - 'os-odl-bgpvpn-ha'
   - 'os-odl-bgpvpn-noha'
+  - 'os-odl-sriov-noha'
+  - 'os-odl-ovs_offload-noha'
   - 'os-ovn-nofeature-noha'
   - 'os-nosdn-fdio-noha'
   - 'os-nosdn-fdio-ha'
-  - 'os-odl-fdio-noha'
-  - 'os-odl-fdio-ha'
   - 'os-nosdn-bar-ha'
   - 'os-nosdn-bar-noha'
-  - 'os-nosdn-nofeature-ha-ipv6'
   - 'os-nosdn-ovs_dpdk-noha'
   - 'os-nosdn-ovs_dpdk-ha'
-  - 'os-nosdn-kvm_ovs_dpdk-noha'
-  - 'os-nosdn-kvm_ovs_dpdk-ha'
+  - 'os-odl-ovs_dpdk-noha'
+  - 'os-odl-ovs_dpdk-ha'
   - 'os-odl-sfc-noha'
   - 'os-odl-sfc-ha'
-  - 'os-odl-fdio_dvr-noha'
-  - 'os-odl-fdio_dvr-ha'
+  - 'os-nosdn-calipso-noha'
 euphrates:
   - 'os-nosdn-nofeature-noha'
   - 'os-nosdn-nofeature-ha'
index 70296bd..81d76d5 100644 (file)
           use-build-blocker: true
           blocking-jobs:
             - 'compass-os-.*?-{pod}-daily-.*?'
+            - 'compass-k8-.*?-{pod}-daily-.*?'
             - 'compass-os-.*?-baremetal-daily-.*?'
+            - 'compass-k8-.*?-baremetal-daily-.*?'
             - 'compass-verify-[^-]*-[^-]*'
           block-level: 'NODE'
 
               build-step-failure-threshold: 'never'
               failure-threshold: 'never'
               unstable-threshold: 'FAILURE'
-      # dovetail only master by now, not sync with A/B/C branches
       # here the stream means the SUT stream, dovetail stream is defined in its own job
-      # only run on os-(nosdn|odl_l2|odl_l3)-nofeature-ha scenario
-      # run against SUT master/euphrates branch, dovetail docker image with latest tag(Monday, Tuesday)
-      # run against SUT master/euphrates branch, dovetail docker image with cvp.X.X.X tag(Thursday, Friday)
-      # run against SUT danube branch, dovetail docker image with cvp.X.X.X tag on huawei-pod7
+      # only run on os-(nosdn|odl_l3)-nofeature-ha scenario
+      # run with testsuite default, dovetail docker image with latest tag(Monday, Tuesday)
+      # run with testsuite proposed_tests, dovetail docker image with latest tag(Thursday, Friday)
       - conditional-step:
           condition-kind: and
           condition-operands:
               use-build-time: true
           steps:
             - trigger-builds:
-                - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
+                - project: 'dovetail-compass-{pod}-default-{stream}'
                   current-parameters: false
                   predefined-parameters: |
                     DOCKER_TAG=latest
index 03c8b20..c028194 100644 (file)
@@ -14,7 +14,7 @@ OPNFV_ARTIFACT_VERSION=$(date -u +"%Y-%m-%d_%H-%M-%S")
 COMPASS_LOG_FILENAME="${JOB_NAME}_${BUILD_NUMBER}_${OPNFV_ARTIFACT_VERSION}.log.tar.gz"
 
 
-sudo docker exec -it compass-tasks /bin/bash /opt/collect-log.sh
+sudo docker exec compass-tasks /bin/bash /opt/collect-log.sh
 sudo docker cp compass-tasks:/opt/log.tar.gz ${LOG_DIRECTORY}/${COMPASS_LOG_FILENAME}
 
 sudo chown $(whoami):$(whoami) ${LOG_DIRECTORY}/${COMPASS_LOG_FILENAME}
index f215b78..444b173 100644 (file)
@@ -70,6 +70,7 @@
           blocking-jobs:
             - 'compass-verify-[^-]*-[^-]*'
             - 'compass-os-.*?-virtual-daily-.*?'
+            - 'compass-k8-.*?-virtual-daily-.*?'
           block-level: 'NODE'
 
     wrappers:
index 3c59927..bbbf3f4 100755 (executable)
@@ -8,6 +8,6 @@ export OS_VERSION="centos7"
 export KUBERNETES_VERSION="v1.7.3"
 export DHA="deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml"
 export NETWORK="deploy/conf/vm_environment/network.yml"
-export VIRT_NUMBER=2 VIRT_CPUS=2 VIRT_MEM=4096 VIRT_DISK=50G
+export VIRT_NUMBER=2 VIRT_CPUS=4 VIRT_MEM=8192 VIRT_DISK=50G
 
 ./deploy.sh
diff --git a/jjb/container4nfv/arm64/yardstick-arm64.sh b/jjb/container4nfv/arm64/yardstick-arm64.sh
new file mode 100755 (executable)
index 0000000..26c6fdc
--- /dev/null
@@ -0,0 +1,93 @@
+#!/bin/bash
+set -e
+
+sshpass -p root ssh root@10.1.0.50 \
+  "mkdir -p /etc/yardstick; rm -rf /etc/yardstick/admin.conf"
+
+
+sshpass -p root ssh root@10.1.0.50 \
+  kubectl config set-cluster yardstick --server=127.0.0.1:8080 --insecure-skip-tls-verify=true --kubeconfig=/etc/yardstick/admin.conf
+sshpass -p root ssh root@10.1.0.50 \
+  kubectl config set-context yardstick --cluster=yardstick --kubeconfig=/etc/yardstick/admin.conf
+sshpass -p root ssh root@10.1.0.50 \
+  kubectl config use-context yardstick --kubeconfig=/etc/yardstick/admin.conf 
+
+
+
+if [ ! -n "$redirect" ]; then
+  redirect="/dev/stdout"
+fi
+
+if [ ! -n "$DOCKER_TAG" ]; then
+  DOCKER_TAG='latest'
+fi
+
+if [ ! -n "$NODE_NAME" ]; then
+  NODE_NAME='arm-virutal03'
+fi
+
+if [ ! -n "$DEPLOY_SCENARIO" ]; then
+  DEPLOY_SCENARIO='k8-nosdn-lb-noha_daily'
+fi
+
+if [ ! -n "$YARDSTICK_DB_BACKEND" ]; then
+  YARDSTICK_DB_BACKEND='-i 104.197.68.199:8086'
+fi
+
+# Pull the image with correct tag
+DOCKER_REPO='opnfv/yardstick'
+if [ "$(uname -m)" = 'aarch64' ]; then
+    DOCKER_REPO="${DOCKER_REPO}_$(uname -m)"
+fi
+echo "Yardstick: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
+sshpass -p root ssh root@10.1.0.50 \
+  docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
+
+if [ ! -n "$BRANCH" ]; then
+  BRANCH=master
+fi
+
+opts="--name=yardstick --privileged=true --net=host -d -it "
+envs="-e YARDSTICK_BRANCH=${BRANCH} -e BRANCH=${BRANCH} \
+  -e NODE_NAME=${NODE_NAME} \
+  -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
+rc_file_vol="-v /etc/yardstick/admin.conf:/etc/yardstick/admin.conf"
+cacert_file_vol=""
+map_log_dir=""
+sshkey=""
+YARDSTICK_SCENARIO_SUITE_NAME="opnfv_k8-nosdn-lb-noha_daily.yaml"
+
+# map log directory
+branch=${BRANCH##*/}
+#branch="master"
+dir_result="${HOME}/opnfv/yardstick/results/${branch}"
+mkdir -p ${dir_result}
+sudo rm -rf ${dir_result}/*
+map_log_dir="-v ${dir_result}:/tmp/yardstick"
+
+# Run docker
+cmd="docker rm -f yardstick || true"
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+cmd="sudo docker run ${opts} ${envs} ${rc_file_vol} ${cacert_file_vol} ${map_log_dir} ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
+echo "Yardstick: Running docker cmd: ${cmd}"
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+
+cmd='sudo docker exec yardstick sed -i.bak "/# execute tests/i\sed -i.bak \"s/openretriever\\\/yardstick/openretriever\\\/yardstick_aarch64/g\" \
+    $\{YARDSTICK_REPO_DIR\}/tests/opnfv/test_cases/opnfv_yardstick_tc080.yaml" /usr/local/bin/exec_tests.sh'
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+echo "Yardstick: run tests: ${YARDSTICK_SCENARIO_SUITE_NAME}"
+cmd="sudo docker exec yardstick exec_tests.sh ${YARDSTICK_DB_BACKEND} ${YARDSTICK_SCENARIO_SUITE_NAME}"
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+cmd="docker rm -f yardstick"
+sshpass -p root ssh root@10.1.0.50 \
+  ${cmd}
+
+echo "Yardstick: done!"
index 16a10ea..b69c214 100644 (file)
@@ -32,7 +32,7 @@
 
     wrappers:
       - timeout:
-          timeout: 120
+          timeout: 150
           fail: true
 
     builders:
           !include-raw: arm64/compass-build.sh
       - shell:
           !include-raw: arm64/compass-deploy.sh
-
+      - trigger-builds:
+          - project: yardstick-arm64-compass-arm-virtual03-daily-master
+            current-parameters: false
+            same-node: true
+            block: false
 
 - trigger:
     name: 'trigger-k8-multus-nofeature-noha-virtual'
diff --git a/jjb/container4nfv/yardstick-arm64.yml b/jjb/container4nfv/yardstick-arm64.yml
new file mode 100644 (file)
index 0000000..bd1d8aa
--- /dev/null
@@ -0,0 +1,121 @@
+---
+###################################
+# job configuration for yardstick
+###################################
+- project:
+    name: yardstick-arm64
+
+    project: '{name}'
+
+    # -------------------------------
+    # BRANCH ANCHORS
+    # -------------------------------
+    master: &master
+      stream: master
+      branch: '{stream}'
+      gs-pathname: ''
+      docker-tag: 'latest'
+    # -------------------------------
+    # POD, INSTALLER, AND BRANCH MAPPING
+    # -------------------------------
+    #    Installers using labels
+    #            CI PODs
+    # This section should only contain the installers
+    # that have been switched using labels for slaves
+    # -------------------------------
+    pod:
+      # apex CI PODs
+      - arm-virtual03:
+          slave-label: arm-packet01
+          installer: compass
+          auto-trigger-name: 'daily-trigger-disabled'
+          <<: *master
+    # -------------------------------
+    testsuite:
+      - 'daily'
+
+    jobs:
+      - 'yardstick-arm64-{installer}-{pod}-{testsuite}-{stream}'
+
+################################
+# job templates
+################################
+- job-template:
+    name: 'yardstick-arm64-{installer}-{pod}-{testsuite}-{stream}'
+    disabled: false
+
+    concurrent: true
+
+    properties:
+      - logrotate-default
+      - throttle:
+          enabled: true
+          max-per-node: 1
+          option: 'project'
+
+    wrappers:
+      - build-name:
+          name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
+      - timeout:
+          timeout: 60
+          abort: true
+
+    triggers:
+      - '{auto-trigger-name}'
+
+    parameters:
+      - project-parameter:
+          project: '{project}'
+          branch: '{branch}'
+      - '{installer}-defaults'
+      - 'yardstick-params-{slave-label}'
+      - string:
+          name: DEPLOY_SCENARIO
+          default: 'k8-nosdn-lb-noha_daily'
+      - string:
+          name: DOCKER_TAG
+          default: '{docker-tag}'
+          description: 'Tag to pull docker image'
+      - string:
+          name: YARDSTICK_SCENARIO_SUITE_NAME
+          default: opnfv_${{DEPLOY_SCENARIO}}_{testsuite}.yaml
+          description: 'Path to test scenario suite'
+      - string:
+          name: CI_DEBUG
+          default: 'false'
+          description: "Show debut output information"
+
+    scm:
+      - git-scm
+
+    builders:
+      - description-setter:
+          description: "POD: $NODE_NAME"
+      - 'yardstick-arm64'
+
+    publishers:
+      - email:
+          recipients: trevor.tao@arm.com yibo.cai@arm.com
+      - email-jenkins-admins-on-failure
+
+########################
+# builder macros
+########################
+- builder:
+    name: yardstick-arm64
+    builders:
+      - shell:
+          !include-raw: arm64/yardstick-arm64.sh
+
+########################
+# parameter macros
+########################
+
+
+- parameter:
+    name: 'yardstick-params-arm-packet01'
+    parameters:
+      - string:
+          name: YARDSTICK_DB_BACKEND
+          default: '-i 104.197.68.199:8086'
+          description: 'Arguments to use in order to choose the backend DB'
index 6f4643a..fbbb086 100644 (file)
     #        CI PODs
     # -------------------------------
     pod:
-      # - baremetal:
-      #     slave-label: daisy-baremetal
-      #     <<: *master
-      - virtual:
-          slave-label: daisy-virtual
-          <<: *master
       - baremetal:
           slave-label: daisy-baremetal
-          <<: *euphrates
+          <<: *master
       - virtual:
           slave-label: daisy-virtual
-          <<: *euphrates
+          <<: *master
+      # - baremetal:
+      #     slave-label: daisy-baremetal
+      #     <<: *euphrates
+      # - virtual:
+      #     slave-label: daisy-virtual
+      #     <<: *euphrates
       # -------------------------------
       #        None-CI PODs
       # -------------------------------
-      - baremetal:
-          slave-label: zte-pod3
-          <<: *master
+      - baremetal:
+          slave-label: zte-pod3
+          <<: *master
       - zte-pod9:
           slave-label: zte-pod9
           <<: *master
index d8f43c9..ba76b8a 100644 (file)
           projects:
             - name: 'doctor-verify-unit-test-{stream}'
               predefined-parameters: |
+                PROJECT=$PROJECT
                 GERRIT_BRANCH=$GERRIT_BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
               kill-phase-on: FAILURE
+              current-parameters: true
+              git-revision: true
 
 - job-template:
     name: 'doctor-verify-unit-test-{stream}'
           projects:
             - name: 'doctor-{task}-apex-{inspector}-x86_64-{stream}'
               predefined-parameters: |
+                PROJECT=$PROJECT
                 GERRIT_BRANCH=$GERRIT_BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
               kill-phase-on: FAILURE
+              current-parameters: true
+              git-revision: true
       - multijob:
           name: 'doctor-verify-fuel-inspector'
           execution-type: PARALLEL
           projects:
             - name: 'doctor-{task}-fuel-{inspector}-x86_64-{stream}'
               predefined-parameters: |
+                PROJECT=$PROJECT
                 GERRIT_BRANCH=$GERRIT_BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
               kill-phase-on: FAILURE
+              current-parameters: true
+              git-revision: true
       - multijob:
           name: 'doctor-verify-fuel-inspector'
           execution-type: PARALLEL
           projects:
             - name: 'doctor-{task}-fuel-{inspector}-aarch64-{stream}'
               predefined-parameters: |
+                PROJECT=$PROJECT
                 GERRIT_BRANCH=$GERRIT_BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
               kill-phase-on: FAILURE
+              current-parameters: true
+              git-revision: true
 
 - job-template:
     name: 'doctor-{task}-{installer}-{inspector}-{arch}-{stream}'
index 99867c3..5e5b6e1 100644 (file)
       dovetail-branch: '{stream}'
       gs-pathname: ''
       docker-tag: 'latest'
-    danube: &danube
-      stream: danube
-      branch: 'stable/{stream}'
-      dovetail-branch: master
-      gs-pathname: '/{stream}'
-      docker-tag: 'cvp.0.9.0'
     euphrates: &euphrates
       stream: euphrates
       branch: 'stable/{stream}'
       dovetail-branch: master
       gs-pathname: '/{stream}'
-      docker-tag: 'cvp.0.9.0'
+      docker-tag: 'latest'
 
     # ----------------------------------
     # POD, PLATFORM, AND BRANCH MAPPING
           SUT: fuel
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - baremetal:
-          slave-label: fuel-baremetal
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - virtual:
-          slave-label: fuel-virtual
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - baremetal:
           slave-label: fuel-baremetal
           SUT: fuel
           SUT: compass
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - baremetal:
-          slave-label: compass-baremetal
-          SUT: compass
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - virtual:
-          slave-label: compass-virtual
-          SUT: compass
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - baremetal:
           slave-label: compass-baremetal
           SUT: compass
           SUT: apex
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - virtual:
-          slave-label: apex-virtual-danube
-          SUT: apex
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - baremetal:
-          slave-label: apex-baremetal-danube
-          SUT: apex
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - virtual:
           slave-label: apex-virtual-master
           SUT: apex
           SUT: fuel
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - armband-baremetal:
-          slave-label: armband-baremetal
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - armband-virtual:
-          slave-label: armband-virtual
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - armband-baremetal:
           slave-label: armband-baremetal
           SUT: fuel
           SUT: fuel
           auto-trigger-name: 'daily-trigger-disabled'
           <<: *master
-      - zte-pod1:
-          slave-label: zte-pod1
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - zte-pod3:
-          slave-label: zte-pod3
-          SUT: fuel
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - huawei-pod4:
-          slave-label: huawei-pod4
-          SUT: apex
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
-      - huawei-pod7:
-          slave-label: huawei-pod7
-          SUT: compass
-          auto-trigger-name: 'daily-trigger-disabled'
-          <<: *danube
       - zte-pod1:
           slave-label: zte-pod1
           SUT: fuel
 
     # -------------------------------
     testsuite:
+      - 'default'
       - 'proposed_tests'
 
     jobs:
index e50242b..451662a 100755 (executable)
@@ -246,8 +246,6 @@ if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_T
     sed -i 's/internal/public/g' ${OPENRC}
     if [[ ${public_url} =~ 'v2' ]]; then
         sed -i "s/OS_IDENTITY_API_VERSION=3/OS_IDENTITY_API_VERSION=2.0/g" ${OPENRC}
-        sed -i '/OS_PROJECT_DOMAIN_NAME/d' ${OPENRC}
-        sed -i '/OS_USER_DOMAIN_NAME/d' ${OPENRC}
     fi
     cat ${OPENRC}
 fi
@@ -275,10 +273,13 @@ cp_tempest_cmd="docker cp ${DOVETAIL_CONFIG}/tempest_conf.yaml $container_id:/ho
 echo "exec command: ${cp_tempest_cmd}"
 $cp_tempest_cmd
 
-list_cmd="dovetail list ${TESTSUITE}"
-run_cmd="dovetail run --testsuite ${TESTSUITE} -d"
-echo "Container exec command: ${list_cmd}"
-docker exec $container_id ${list_cmd}
+if [[ ${TESTSUITE} == 'default' ]]; then
+    testsuite=''
+else
+    testsuite="--testsuite ${TESTSUITE}"
+fi
+
+run_cmd="dovetail run ${testsuite} -d"
 echo "Container exec command: ${run_cmd}"
 docker exec $container_id ${run_cmd}
 
index 1cb29d0..bbc8a84 100644 (file)
 - trigger:
     name: 'fuel-os-nosdn-nofeature-ha-baremetal-daily-euphrates-trigger'
     triggers:
-      - timed: '0 20 * * *'
+      - timed: ''  # '0 20 * * *'
 - trigger:
     name: 'fuel-os-odl-nofeature-ha-baremetal-daily-euphrates-trigger'
     triggers:
-      - timed: '0 2 * * *'
+      - timed: ''  # '0 2 * * *'
 - trigger:
     name: 'fuel-os-onos-sfc-ha-baremetal-daily-euphrates-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-baremetal-daily-euphrates-trigger'
     triggers:
-      - timed: '0 20 * * *'
+      - timed: ''  # '0 20 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-ha-baremetal-daily-euphrates-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-nofeature-noha-virtual-daily-euphrates-trigger'
     triggers:
-      - timed: '0 13 * * *'
+      - timed: ''  # '0 13 * * *'
 - trigger:
     name: 'fuel-os-odl-nofeature-noha-virtual-daily-euphrates-trigger'
     triggers:
-      - timed: '0 18 * * *'
+      - timed: ''  # '0 18 * * *'
 - trigger:
     name: 'fuel-os-onos-sfc-noha-virtual-daily-euphrates-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-ovs-noha-virtual-daily-euphrates-trigger'
     triggers:
-      - timed: '0 9 * * *'
+      - timed: ''  # '0 9 * * *'
 - trigger:
     name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-euphrates-trigger'
     triggers:
index b5db2ed..432bbbb 100755 (executable)
@@ -4,6 +4,10 @@ set -e
 set +u
 set +o pipefail
 
+CI_LOOP=${CI_LOOP:-daily}
+TEST_DB_URL=http://testresults.opnfv.org/test/api/v1/results
+ENERGY_RECORDER_API_URL=http://energy.opnfv.fr/resources
+
 check_os_deployment() {
     FUNCTEST_IMAGE=opnfv/functest-healthcheck:${DOCKER_TAG}
     echo "Functest: Pulling Functest Docker image ${FUNCTEST_IMAGE} ..."
@@ -145,7 +149,8 @@ test -f ${HOME}/opnfv/functest/custom/params_${DOCKER_TAG} && custom_params=$(ca
 
 envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
     -e NODE_NAME=${NODE_NAME} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} \
-    -e BUILD_TAG=${BUILD_TAG} -e DEPLOY_TYPE=${DEPLOY_TYPE}"
+    -e BUILD_TAG=${BUILD_TAG} -e DEPLOY_TYPE=${DEPLOY_TYPE} -e CI_LOOP=${CI_LOOP} \
+    -e TEST_DB_URL=${TEST_DB_URL} -e ENERGY_RECORDER_API_URL=${ENERGY_RECORDER_API_URL}"
 
 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
 
old mode 100755 (executable)
new mode 100644 (file)
index cd15d71..b7d33e7 100755 (executable)
@@ -38,7 +38,7 @@ elif [[ ${INSTALLER_TYPE} == 'daisy' ]]; then
 
         installer_mac=$(sudo virsh domiflist daisy | grep vnet | \
                       grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
-        export INSTALLER_IP=$(/usr/sbin/arp -e -i $bridge_name | grep ${installer_mac} | awk {'print $1'})
+        export INSTALLER_IP=$(/usr/sbin/arp -e -i $bridge_name | grep ${installer_mac} | head -n 1 | awk {'print $1'})
 
         echo "Installer ip is ${INSTALLER_IP}"
     else
index 6df5c53..888aded 100755 (executable)
@@ -11,12 +11,12 @@ rc_file=${HOME}/k8.creds
 sudo rm -rf $rc_file
 
 if [[ ${INSTALLER_TYPE} == 'compass' ]]; then
-    admin_conf_file_vol = "-v ${HOME}/admin.conf:/root/.kube/config"
+    admin_conf_file_vol="-v ${HOME}/admin.conf:/root/.kube/config"
     echo "export KUBECONFIG=/root/.kube/config" >> $rc_file
     echo "export KUBERNETES_PROVIDER=local" >> $rc_file
-    KUBE_MASTER_URL = $(cat ${HOME}/admin.conf|grep server| awk '{print $2}')
+    KUBE_MASTER_URL=$(cat ${HOME}/admin.conf|grep server| awk '{print $2}')
     echo "export KUBE_MASTER_URL=$KUBE_MASTER_URL" >> $rc_file
-    KUBE_MASTER_IP = $(echo $KUBE_MASTER_URL|awk -F'https://|:[0-9]+' '$0=$2')
+    KUBE_MASTER_IP=$(echo $KUBE_MASTER_URL|awk -F'https://|:[0-9]+' '$0=$2')
     echo "export KUBE_MASTER_IP=$KUBE_MASTER_IP" >> $rc_file
 else
     echo "Not supported by other installers yet"
index d3ffb0b..3433cee 100644 (file)
       - shell: |
           #!/bin/bash
           if [[ -s violation.log ]]; then
-              echo "Reporting lint result..."
+              cat violation.log
+              echo "Reporting lint result...."
+              set -x
               msg="Found syntax error and/or coding style violation(s) in the files modified by your patchset."
-              sed -i -e '1s/^//$msg\n\n/' violation.log
+              sed -i -e "1s#^#${msg}\n\n#" violation.log
               cmd="gerrit review -p $GERRIT_PROJECT -m \"$(cat violation.log)\" $GERRIT_PATCHSET_REVISION --notify NONE"
               ssh -p 29418 gerrit.opnfv.org "$cmd"
 
       - email-ext:
           <<: *email_ptl_defaults
           recipients: >
-            hongbo.tianhongbo@huawei.com
+            georg.kunz@ericsson.com
 
 - publisher:
     name: 'email-dpacc-ptl'
       - email-ext:
           <<: *email_ptl_defaults
           recipients: >
-            jack.morgan@intel.com
+            zhang.jun3g@zte.com.cn
 - publisher:
     name: 'email-pharos-tools-ptl'
     <<: *email_pharos_ptl_defaults
       - email-ext:
           <<: *email_ptl_defaults
           recipients: >
-            ManuelBuilmbuil@suse.com
+            mbuil@suse.com
 
 - publisher:
     name: 'email-snaps-ptl'
index 4902dcf..86b369b 100644 (file)
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-pod1
-          default-slaves:
-            - lf-pod1
 
 - parameter:
     name: 'apex-baremetal-euphrates-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-pod1
-          default-slaves:
-            - lf-pod1
 
 - parameter:
     name: 'apex-baremetal-danube-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-pod1
-          default-slaves:
-            - lf-pod1
+
 
 - parameter:
     name: 'apex-virtual-master-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-virtual2
-            - lf-virtual3
-          default-slaves:
-            - lf-virtual2
-            - lf-virtual3
 
 - parameter:
     name: 'apex-virtual-euphrates-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-virtual2
-            - lf-virtual3
-          default-slaves:
-            - lf-virtual2
-            - lf-virtual3
 
 - parameter:
     name: 'apex-virtual-danube-defaults'
           name: SSH_KEY
           default: /root/.ssh/id_rsa
           description: 'SSH key to use for Apex'
-      - node:
-          name: SLAVE_NAME
-          description: 'Slave name on Jenkins'
-          allowed-slaves:
-            - lf-pod3
-          default-slaves:
-            - lf-pod3
 
 - parameter:
     name: 'lf-pod1-defaults'
index acf1488..c1bb1ba 100644 (file)
@@ -11,7 +11,8 @@
     jobs:
       - '{project}-verify-basic'
       - 'backup-pharos-dashboard'
-      - 'deploy-pharos-dashboard'
+      - 'deploy-pharos-dashboard':
+          disabled: true
 
 - job-template:
     name: 'backup-pharos-dashboard'
       - 'pharos-dashboard-defaults'
 
     scm:
-      - git-scm-gerrit
+      - git:
+          choosing-strategy: 'gerrit'
+          refspec: '$GERRIT_REFSPEC'
+          branches:
+            - 'origin/$BRANCH'
+          timeout: 15
+          credentials-id: '$SSH_CREDENTIAL_ID'
+          url: '$GIT_BASE'
+          skip-tag: true
+          wipe-workspace: false
 
     triggers:
       - gerrit:
@@ -71,6 +81,8 @@
     builders:
       - shell: |
           cp $HOME/config.env $WORKSPACE/dashboard
+          cp $HOME/rsa.pub $WORKSPACE/dashboard
+          cp $HOME/rsa.pem $WORKSPACE/dashboard
           cd $WORKSPACE/dashboard
           sudo docker-compose build
           sudo docker-compose up -d
index e9e929d..0ac5520 100644 (file)
                 comment-contains-value: 'reverify'
           projects:
             - project-compare-type: 'REG_EXP'
-              project-pattern: 'functest|sdnvpn|qtip|daisy|sfc|escalator'
+              project-pattern: 'functest|functest-kubernetes|sdnvpn|qtip|daisy|sfc|escalator'
               branches:
                 - branch-compare-type: 'ANT'
                   branch-pattern: '**/{branch}'
index 6f77cd2..fb3bab4 100644 (file)
@@ -7,6 +7,8 @@
       - 'prune-docker-images'
       - 'archive-repositories'
       - 'check-status-of-slaves'
+      - 'ansible-build-server'
+      - 'generate-artifacts-index-pages'
 
 ########################
 # job templates
           name: SLAVE_NAME
           description: Slaves to prune docker images
           default-slaves:
-            - arm-build2
+            - arm-build3
+            - arm-build4
+            - arm-build5
+            - arm-build6
             - ericsson-build3
             - ericsson-build4
             - lf-build2
     builders:
       - description-setter:
           description: "Built on $NODE_NAME"
+      # yamllint disable rule:line-length
       - shell: |
           #!/bin/bash
-
           (docker ps -q; docker ps -aq) | sort | uniq -u | xargs --no-run-if-empty docker rm
           docker images -f dangling=true -q | xargs --no-run-if-empty docker rmi
 
+
+    # yamllint enable rule:line-length
     triggers:
       - timed: '@midnight'
 
     name: 'check-status-of-slaves'
 
     disabled: false
-
     concurrent: true
 
     parameters:
       - node:
           name: SLAVE_NAME
-          description: We don't want workspace wiped. so I just threw the script on the master
+          description: 'script lives on master node'
           default-slaves:
-            - master
+            - lf-build1
           allowed-multiselect: false
           ignore-offline-nodes: true
+      - project-parameter:
+          project: releng
+          branch: master
+
+    scm:
+      - git-scm
 
     triggers:
       - timed: '@midnight'
 
     builders:
       - shell: |
-          cd /opt/jenkins-ci/slavemonitor
-          bash slave-monitor-0.1.sh | sort
+          cd $WORKSPACE/utils/
+          bash slave-monitor-0.1.sh
+
+- job-template:
+    name: 'ansible-build-server'
+
+    project-type: freestyle
+
+    disabled: false
+    concurrent: true
+
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: Build Servers
+          default-slaves:
+            - lf-build1
+            - lf-build2
+          allowed-multiselect: true
+          ignore-offline-nodes: true
+      - project-parameter:
+          project: releng
+          branch: master
+
+    scm:
+      - git-scm
+
+    triggers:
+      - timed: '@midnight'
+
+    builders:
+      - install-ansible
+      - run-ansible-build-server-playbook
+
+
+- builder:
+    name: install-ansible
+    builders:
+      - shell: |
+          # Install ansible here
+          if [ -f /etc/centos-release ] \
+          || [ -f /etc/redhat-release ] \
+          || [ -f /etc/system-release ]; then
+          sudo yum -y install ansible
+          fi
+          if [ -f /etc/debian_version ] \
+          || grep -qi ubuntu /etc/lsb-release \
+          || grep -qi ubuntu /etc/os-release; then
+          sudo apt-get -y install ansible
+          fi
+
+- builder:
+    name: run-ansible-build-server-playbook
+    builders:
+      - shell: |
+          # run playbook
+          sudo ansible-playbook -i \
+          $WORKSPACE/utils/build-server-ansible/inventory.ini \
+          $WORKSPACE/utils/build-server-ansible/main.yml
+
+
+- job-template:
+    name: 'generate-artifacts-index-pages'
+
+    project-type: freestyle
+
+    disabled: false
+
+    concurrent: false
+
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: Build Servers
+          default-slaves:
+            - lf-build2
+      - project-parameter:
+          project: releng
+          branch: master
+
+    scm:
+      - git-scm
+
+    triggers:
+      - timed: '@hourly'
+
+    builders:
+      - generate-artifact-html
+
+
+- builder:
+    name: generate-artifact-html
+    builders:
+      - shell: |
+          cd $WORKSPACE/utils/
+          ./artifacts.opnfv.org.sh
index ef99f5a..6fb6804 100644 (file)
     builders:
       - shell: |
               source /opt/virtualenv/jenkins-job-builder/bin/activate
-              cd /opt/jenkins-ci/releng
-              git pull
               jenkins-jobs update -r --delete-old jjb/
index 16ceb2e..7fd875b 100644 (file)
 
     disabled: '{obj:disabled}'
 
+    properties:
+      - logrotate-default
+      - build-blocker:
+          use-build-blocker: true
+          blocking-jobs:
+            - 'vswitchperf-verify-.*'
+            - 'vswitchperf-merge-.*'
+            - 'vswitchperf-daily-.*'
+          block-level: 'NODE'
+
     parameters:
       - project-parameter:
           project: '{project}'
           cd src
           make clobber
           make MORE_MAKE_FLAGS="-j 10"
-          # run basic sanity test
-          make sanity
           cd ../ci
-          scl enable python33 "source ~/vsperfenv/bin/activate ; ./build-vsperf.sh daily"
+          scl enable rh-python34 "source ~/vsperfenv/bin/activate ; ./build-vsperf.sh daily"
 
 - job-template:
     name: 'vswitchperf-verify-{stream}'
@@ -66,6 +74,7 @@
           blocking-jobs:
             - 'vswitchperf-verify-.*'
             - 'vswitchperf-merge-.*'
+            - 'vswitchperf-daily-.*'
           block-level: 'NODE'
 
     parameters:
           name: SLAVE_NAME
           description: 'Slave name on Jenkins'
           allowed-slaves:
+            - intel-pod12
             - ericsson-build4
           default-slaves:
+            - intel-pod12
             - ericsson-build4
 
     scm:
           cd src
           make clobber
           make MORE_MAKE_FLAGS="-j 5"
-          # run basic sanity test
-          make sanity
           cd ../ci
           ./build-vsperf.sh verify
 
           blocking-jobs:
             - 'vswitchperf-verify-.*'
             - 'vswitchperf-merge-.*'
+            - 'vswitchperf-daily-.*'
           block-level: 'NODE'
 
     parameters:
           name: SLAVE_NAME
           description: 'Slave name on Jenkins'
           allowed-slaves:
+            - intel-pod12
             - ericsson-build4
           default-slaves:
+            - intel-pod12
             - ericsson-build4
 
     scm:
index 0bca26a..fbe2e20 100644 (file)
           name: SLAVE_LABEL
           default: '{slave-label}'
       - string:
-          name: XCI_LOOP
+          name: CI_LOOP
           default: 'periodic'
 
     wrappers:
index e4c2d0e..7e01175 100644 (file)
           use-build-blocker: true
           blocking-jobs:
             - 'xci-verify-{distro}-.*'
+            - 'xci-.*-{distro}-merge-.*'
             - '.*-bifrost-verify.*-{type}'
+      - throttle:
+          max-per-node: 2
+          max-total: 10
+          categories:
+            - xci-verify-virtual
+          option: category
 
           block-level: 'NODE'
 
           name: SLAVE_LABEL
           default: 'xci-virtual'
       - string:
-          name: XCI_LOOP
+          name: CI_LOOP
           default: 'verify'
 
     scm:
     wrappers:
       - fix-workspace-permissions
       - build-timeout:
-          timeout: 90
+          timeout: 180
 
     publishers:
       # yamllint disable rule:line-length
index 198f2e1..d3a37ce 100755 (executable)
@@ -21,9 +21,14 @@ git fetch $PROJECT_REPO $GERRIT_REFSPEC && sudo git checkout FETCH_HEAD
 
 cd $WORKSPACE/releng-xci
 cat > bifrost_test.sh<<EOF
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
 cd ~/bifrost
 # provision 3 VMs; xcimaster, controller, and compute
-./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh | ts
 
 sudo -H -E virsh list
 EOF
index a953749..a92e490 100644 (file)
           name: SLAVE_LABEL
           default: '{slave-label}'
       - string:
-          name: XCI_LOOP
+          name: CI_LOOP
           default: 'daily'
 
     triggers:
             predefined-parameters: |
               DEPLOY_SCENARIO=$DEPLOY_SCENARIO
               XCI_FLAVOR=$XCI_FLAVOR
-              XCI_LOOP=$XCI_LOOP
+              CI_LOOP=$CI_LOOP
             same-node: true
             block: true
       - trigger-builds:
             predefined-parameters: |
               DEPLOY_SCENARIO=$DEPLOY_SCENARIO
               XCI_FLAVOR=$XCI_FLAVOR
-              XCI_LOOP=$XCI_LOOP
+              CI_LOOP=$CI_LOOP
             same-node: true
             block: true
             block-thresholds:
           name: SLAVE_LABEL
           default: '{slave-label}'
       - string:
-          name: XCI_LOOP
+          name: CI_LOOP
           default: 'daily'
       - string:
           name: INSTALLER_TYPE
index 211d282..71cf96d 100755 (executable)
@@ -15,14 +15,14 @@ cd $WORKSPACE/xci
 
 # for daily jobs, we want to use working versions
 # for periodic jobs, we will use whatever is set in the job, probably master
-if [[ "$XCI_LOOP" == "daily" ]]; then
+if [[ "$CI_LOOP" == "daily" ]]; then
     # source pinned-vars to get releng version
     source ./config/pinned-versions
 
     # checkout the version
     git checkout -q $OPNFV_RELENG_VERSION
     echo "Info: Using $OPNFV_RELENG_VERSION"
-elif [[ "$XCI_LOOP" == "periodic" ]]; then
+elif [[ "$CI_LOOP" == "periodic" ]]; then
     echo "Info: Using $OPNFV_RELENG_VERSION"
 fi
 
@@ -31,7 +31,7 @@ fi
 # to take this into account while deploying anyways
 # clone openstack-ansible
 # stable/ocata already use pinned versions so this is only valid for master
-if [[ "$XCI_LOOP" == "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
+if [[ "$CI_LOOP" == "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
     cd $WORKSPACE
     # get the url to openstack-ansible git
     source ./config/env-vars
index 351fe22..492348d 100644 (file)
           use-build-blocker: true
           blocking-jobs:
             - 'xci-verify-{distro}-.*'
-            - 'bifrost-verify-{distro}-.*'
-            - 'bifrost-periodic-{distro}-.*'
+            - 'xci-.*-{distro}-merge-.*'
+            - 'openstack-bifrost-verify-{distro}-.*'
             - 'xci-osa-verify-{distro}-.*'
             - 'xci-osa-periodic-{distro}-.*'
             - 'xci-(os|k8s).*?-virtual-{distro}-.*'
           block-level: 'NODE'
+      - throttle:
+          max-per-node: 2
+          max-total: 10
+          categories:
+            - xci-verify-virtual
+          option: category
 
     wrappers:
       - ssh-agent-wrapper
index b72c339..b38ebe5 100755 (executable)
@@ -35,6 +35,7 @@ cd $WORKSPACE
 # yourself.
 cat > xci_test.sh<<EOF
 #!/bin/bash
+set -o pipefail
 export DISTRO=$DISTRO
 export DEPLOY_SCENARIO=$DEPLOY_SCENARIO
 export FUNCTEST_MODE=$FUNCTEST_MODE
@@ -53,7 +54,7 @@ if [[ ! -z ${WORKSPACE+x} && $GERRIT_PROJECT != "releng-xci" ]]; then
 fi
 
 cd xci
-./xci-deploy.sh
+./xci-deploy.sh | ts
 EOF
 chmod a+x xci_test.sh
 
index 28bc317..c6b6bc4 100644 (file)
           notbuilt: false
       - centos:
           disabled: false
-          successful: false
-          failed: false
-          unstable: false
-          notbuilt: false
+          successful: true
+          failed: true
+          unstable: true
+          notbuilt: true
       - opensuse:
           disabled: false
           successful: false
@@ -66,8 +66,8 @@
           use-build-blocker: true
           blocking-jobs:
             - 'xci-verify-{distro}-.*'
-            - 'bifrost-verify-{distro}-.*'
-            - 'bifrost-periodic-{distro}-.*'
+            - 'xci-.*-{distro}-merge-.*'
+            - 'openstack-bifrost-verify-{distro}-.*'
             - 'xci-osa-verify-{distro}-.*'
             - 'xci-osa-periodic-{distro}-.*'
           block-level: 'NODE'
diff --git a/utils/artifacts.opnfv.org.sh b/utils/artifacts.opnfv.org.sh
new file mode 100755 (executable)
index 0000000..1984b49
--- /dev/null
@@ -0,0 +1,162 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#export PATH=${PATH}:/root/gsutil
+
+#Step Generate index.html
+if [ -f index.html ] ; then
+      rm -f index.html
+fi
+
+OUTPUT="index.html"
+
+for index in $(gsutil ls -l gs://artifacts.opnfv.org | grep -v logs | grep -v review | awk 'NF==1'| sed s,gs://artifacts.opnfv.org/,,)
+do
+echo $index
+  echo "<LI><a href=\"${index%/*}.html\">"$index"</a></LI>" >> $OUTPUT
+done
+
+#functest logs##########################
+
+for project in functest vswitchperf
+do
+
+    for index in $(gsutil ls -l gs://artifacts.opnfv.org/logs/"$project"/ |awk 'NF==1'| sed s,gs://artifacts.opnfv.org/,, )
+    do
+    index="$(echo ${index%/*} | sed s,/,_,g)"
+      echo "<LI><a href=\"http://artifacts.opnfv.org/${index%/*}.html\">"$index"</a></LI>" >> $OUTPUT
+    done
+
+done
+#End step 1
+#####################################
+
+
+#genrate html files for all project except vswitchperf
+for index in $(gsutil ls -l gs://artifacts.opnfv.org | grep -v logs |awk 'NF==1'| sed s,gs://artifacts.opnfv.org/,,)
+do
+OUTPUT=${index%/*}.html
+rm -f $OUTPUT
+
+
+    for filepath in $(gsutil ls -R gs://artifacts.opnfv.org/"$index" | sed s,gs://artifacts.opnfv.org/,, | grep -v "favicon.ico" | grep -v "gsutil" ); do
+    echo $filepath
+
+    if [[ $filepath =~ "/:" ]]; then
+      path=$(echo $filepath| sed s,/:,,g)
+      echo "<UL>" >> $OUTPUT
+      echo "<LI>$path</LI>" >> $OUTPUT
+      echo "</UL>" >> $OUTPUT
+    else
+      echo "<LI><a href=\"http://artifacts.opnfv.org/$filepath\">"$filepath"</a></LI>" >> $OUTPUT
+    fi
+done
+
+gsutil cp $OUTPUT gs://artifacts.opnfv.org/
+
+gsutil -m setmeta \
+     -h "Content-Type:text/html" \
+     -h "Cache-Control:private, max-age=0, no-transform" \
+      gs://artifacts.opnfv.org/$OUTPUT \
+
+done
+
+
+
+#generate file for vswitch perf (I dont know what happend here but there is a wierd character in this bucket)
+
+index=vswitchperf
+OUTPUT=${index%/*}.html
+rm -f $OUTPUT
+
+        for filepath in $(gsutil ls -R gs://artifacts.opnfv.org/"$index" | sed s,gs://artifacts.opnfv.org/,, | grep -v "favicon.ico" | grep -v "gsutil" ); do
+        echo $filepath
+
+        if [[ $filepath =~ "/:" ]]; then
+          path=$(echo $filepath| sed s,/:,,g)
+          echo "<UL>" >> $OUTPUT
+          echo "<LI>$path</LI>" >> $OUTPUT
+          echo "</UL>" >> $OUTPUT
+        else
+          echo "<LI><a href=\"http://artifacts.opnfv.org/$filepath\">"$filepath"</a></LI>" >> $OUTPUT
+        fi
+
+done
+
+
+gsutil cp $OUTPUT gs://artifacts.opnfv.org/
+
+gsutil -m setmeta \
+     -h "Content-Type:text/html" \
+     -h "Cache-Control:private, max-age=0, no-transform" \
+      gs://artifacts.opnfv.org/$OUTPUT \
+
+# Gerate html for logs
+
+for project in functest vswitchperf
+do
+    for index in $(gsutil ls -l gs://artifacts.opnfv.org/logs/"$project"/ |awk 'NF==1'| sed s,gs://artifacts.opnfv.org/,, )
+    do
+
+        OUTPUT="$(echo ${index%/*}.html | sed s,/,_,g)"
+        echo $OUTPUT
+        rm -f $OUTPUT
+
+
+            for filepath in $(gsutil ls -R gs://artifacts.opnfv.org/"$index" | sed s,gs://artifacts.opnfv.org/,, | grep -v "favicon.ico" | grep -v "gsutil" ); do
+            echo $filepath
+
+            if [[ $filepath =~ "/:" ]]; then
+              path=$(echo $filepath| sed s,/:,,g)
+              echo "<UL>" >> $OUTPUT
+              echo "<LI>$path</LI>" >> $OUTPUT
+              echo "</UL>" >> $OUTPUT
+            else
+              echo "<LI><a href=\"http://artifacts.opnfv.org/$filepath\">"$filepath"</a></LI>" >> $OUTPUT
+            fi
+
+
+            done
+
+
+        gsutil cp $OUTPUT gs://artifacts.opnfv.org/
+
+        gsutil -m setmeta \
+             -h "Content-Type:text/html" \
+             -h "Cache-Control:private, max-age=0, no-transform" \
+              gs://artifacts.opnfv.org/$OUTPUT \
+
+
+    done
+done
+
+
+
+OUTPUT="index.html"
+echo "<p> Generated on $(date) </p>" >> $OUTPUT
+
+cat <<EOF >> $OUTPUT
+<script>
+(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+ga('create', 'UA-831873-26', 'auto');
+ga('send', 'pageview');
+</script>
+EOF
+
+#copy and uplad index file genrated in first step, last
+gsutil cp $OUTPUT gs://artifacts.opnfv.org/
+
+gsutil -m setmeta \
+     -h "Content-Type:text/html" \
+     -h "Cache-Control:private, max-age=0, no-transform" \
+      gs://artifacts.opnfv.org/$OUTPUT \
diff --git a/utils/build-server-ansible/inventory.ini b/utils/build-server-ansible/inventory.ini
new file mode 100644 (file)
index 0000000..115b130
--- /dev/null
@@ -0,0 +1,8 @@
+#############################################################################
+# Copyright (c) 2016 The Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+localhost              ansible_connection=local
diff --git a/utils/build-server-ansible/main.yml b/utils/build-server-ansible/main.yml
new file mode 100644 (file)
index 0000000..0fcce71
--- /dev/null
@@ -0,0 +1,37 @@
+############################################################################
+# Copyright (c) 2016 The Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+- hosts: "localhost"
+  become: "True"
+  tasks:
+    - debug:
+        msg: "{{ inventory_hostname }} is {{ ansible_distribution }}"
+    - include_vars: vars/defaults.yml
+    - include: vars/CentOS.yml
+      when: ansible_distribution == "CentOS"
+    - include: vars/Ubuntu.yml
+      when: ansible_distribution == "Ubuntu"
+    - name: Install Docker.
+      package: name={{ docker_package }} state={{ docker_package_state }}
+    - name: Ensure Docker is started and enabled at boot.
+      service:
+        name: docker
+        state: started
+        enabled: "yes"
+    - name: install gsutil
+      pip:
+        name: gsutil
+        state: present
+    - name: install tox
+      pip:
+        name: tox
+        state: present
+    - include: vars/docker-compose-CentOS.yml
+      when: ansible_distribution == "CentOS"
+    - include: vars/docker-compose-Ubuntu.yml
+      when: ansible_distribution == "Ubuntu"
diff --git a/utils/build-server-ansible/vars/CentOS.yml b/utils/build-server-ansible/vars/CentOS.yml
new file mode 100644 (file)
index 0000000..0d5a011
--- /dev/null
@@ -0,0 +1,72 @@
+############################################################################
+# Copyright (c) 2016 The Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+############################################################################
+---
+- name: Ensure old versions of Docker are not installed.
+  package:
+    name: '{{ item }}'
+    state: absent
+  with_items:
+    - docker
+    - docker-common
+    - docker-engine
+
+- name: Add Docker GPG key.
+  rpm_key:
+    key: https://download.docker.com/linux/centos/gpg
+    state: present
+
+- name: Ensure epel is installed.
+  yum:
+    name: epel-release
+    state: present
+- name: Ensure depdencies are installed.
+  yum:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - python-pip
+    - rpm-build
+    - kernel-headers
+    - libpcap-devel
+    - zlib-devel
+    - numactl-devel
+    - doxygen
+    - python-sphinx
+    - libvirt-devel
+    - python-devel
+    - openssl-devel
+    - python-six
+    - net-tools
+    - bc
+
+- name: install the 'Development tools' package group
+  yum:
+    name: "@Development tools"
+    state: present
+
+- name: Add Docker repository.
+  get_url:
+    url: "{{ docker_yum_repo_url }}"
+    dest: '/etc/yum.repos.d/docker-ce.repo'
+    owner: root
+    group: root
+    mode: 0644
+
+- name: Configure Docker Edge repo.
+  ini_file:
+    dest: '/etc/yum.repos.d/docker-ce.repo'
+    section: 'docker-ce-edge'
+    option: enabled
+    value: '{{ docker_yum_repo_enable_edge }}'
+
+- name: Configure Docker Test repo.
+  ini_file:
+    dest: '/etc/yum.repos.d/docker-ce.repo'
+    section: 'docker-ce-test'
+    option: enabled
+    value: '{{ docker_yum_repo_enable_test }}'
diff --git a/utils/build-server-ansible/vars/Ubuntu.yml b/utils/build-server-ansible/vars/Ubuntu.yml
new file mode 100644 (file)
index 0000000..609c8d5
--- /dev/null
@@ -0,0 +1,84 @@
+#############################################################################
+# Copyright (c) 2016 The Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+- name: Ensure old versions of Docker are not installed.
+  package:
+    name: '{{ item }}'
+    state: absent
+  with_items:
+    - docker
+    - docker-engine
+
+- name: Ensure depdencies are installed.
+  apt:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - apt-transport-https
+    - ca-certificates
+    - git
+    - build-essential
+    - curl
+    - wget
+    - rpm
+    - fuseiso
+    - createrepo
+    - genisoimage
+    - libfuse-dev
+    - dh-autoreconf
+    - pkg-config
+    - zlib1g-dev
+    - libglib2.0-dev
+    - libpixman-1-dev
+    - python-virtualenv
+    - python-dev
+    - libffi-dev
+    - libssl-dev
+    - libxml2-dev
+    - libxslt1-dev
+    - bc
+    - qemu-kvm
+    - libvirt-bin
+    - ubuntu-vm-builder
+    - bridge-utils
+    - monit
+    - openjdk-8-jre-headless
+    - python-nose
+    - dirmngr
+    - collectd
+    - flex
+    - bison
+    - libnuma-dev
+    - shellcheck
+    - python-pip
+
+- name: Add Docker apt key.
+  apt_key:
+    url: https://download.docker.com/linux/ubuntu/gpg
+    id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
+    state: present
+  register: add_repository_key
+  ignore_errors: true
+
+- name: Ensure curl is present (on older systems without SNI).
+  package: name=curl state=present
+  when: add_repository_key|failed
+
+- name: Add Docker apt key (alternative for older systems without SNI).
+  # yamllint disable rule:line-length
+  shell: "curl -sSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -"
+  # yamllint enable rule:line-length
+  args:
+    warn: "no"
+  when: add_repository_key|failed
+
+- name: Add Docker repository.
+  apt_repository:
+    repo: "{{ docker_apt_repository }}"
+    state: present
+    update_cache: "yes"
diff --git a/utils/build-server-ansible/vars/defaults.yml b/utils/build-server-ansible/vars/defaults.yml
new file mode 100644 (file)
index 0000000..8d83380
--- /dev/null
@@ -0,0 +1,23 @@
+#############################################################################
+# Copyright (c) 2016 The Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+docker_package: "docker-ce"
+docker_package_state: present
+
+# Used only for Debian/Ubuntu. Switch 'stable' to 'edge' if needed.
+docker_apt_release_channel: stable
+# yamllint disable rule:line-length
+docker_apt_repository: "deb https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
+# yamllint enable rule:line-length
+
+# Used only for RedHat/CentOS.
+# yamllint disable rule:line-length
+docker_yum_repo_url: https://download.docker.com/linux/centos/docker-ce.repo
+# yamllint enable rule:line-length
+docker_yum_repo_enable_edge: 0
+docker_yum_repo_enable_test: 0
diff --git a/utils/build-server-ansible/vars/docker-compose-CentOS.yml b/utils/build-server-ansible/vars/docker-compose-CentOS.yml
new file mode 100644 (file)
index 0000000..fc4bcba
--- /dev/null
@@ -0,0 +1,12 @@
+#############################################################################
+# Copyright (c) 2016 The Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+- name: Ensure docker compose is installed.
+  yum:
+    name: 'docker-compose'
+    state: present
diff --git a/utils/build-server-ansible/vars/docker-compose-Ubuntu.yml b/utils/build-server-ansible/vars/docker-compose-Ubuntu.yml
new file mode 100644 (file)
index 0000000..f985b6a
--- /dev/null
@@ -0,0 +1,12 @@
+#############################################################################
+# Copyright (c) 2016 The Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+- name: Ensure docker compose is installed
+  apt:
+    name: 'docker-compose'
+    state: present
index cd81f29..f7c6769 100755 (executable)
@@ -103,7 +103,7 @@ if does not exist then exec "$mkdir -p /var/run/$jenkinsuser"
 if failed uid $jenkinsuser then exec "$chown $jenkinsuser /var/run/$jenkinsuser"
 if failed gid $jenkinsuser then exec "$chown :$jenkinsuser /var/run/$jenkinsuser"
 
-check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid
+check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid every 2 cycles
 start program = "/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $jenkinshome; export started_monit=true; $0 $@' with timeout 60 seconds"
 stop program = "/bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'"
 depends on jenkins_piddir
@@ -118,7 +118,7 @@ if does not exist then exec \"$mkdir -p /var/run/$jenkinsuser\"
 if failed uid $jenkinsuser then exec \"$chown $jenkinsuser /var/run/$jenkinsuser\"
 if failed gid $jenkinsuser then exec \"$chown :$jenkinsuser /var/run/$jenkinsuser\"
 
-check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid
+check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid every 2 cycles
 start program = \"/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $jenkinshome; export started_monit=true; $0 $@' with timeout 60 seconds\"
 stop program = \"/bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'\"
 depends on jenkins_piddir\
old mode 100644 (file)
new mode 100755 (executable)
index 161aaef..5201f93
@@ -8,9 +8,8 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-#This will put a bunch of files in the pwd. you have been warned.
 #Counts how long slaves have been online or offline
-
+#exec 2>/dev/null
 
 #Yes I know about jq
 curlcommand() {
@@ -25,74 +24,66 @@ curl -s "https://build.opnfv.org/ci/computer/api/json?tree=computer\[displayName
     | sed s,\",,g
 }
 
-if [ -f podoutput-current ]; then
-  cp podoutput-current podoutput-lastiteration
-fi
-
-curlcommand > podoutput-current
+curlcommand > /tmp/podoutput-current
 
-declare -A slavescurrent slaveslastiteration
+declare -A slavescurrent
 
 while read -r name status ; do
             slavescurrent["$name"]="$status"
-done < <(cat podoutput-current)
-
-while read -r name status ; do
-            slaveslastiteration["$name"]=$status
-done < <(cat podoutput-lastiteration)
-
+done < <(cat /tmp/podoutput-current)
+
+#haste bin stopped allowing post :(
+#files=(*online)
+#for ((i=0; i<${#files[@]}; i+=9)); do
+#./eplot -d -r [-1:74][-1:30] -m    ${files[i]} ${files[i+1]} ${files[i+2]} ${files[i+3]} ${files[i+4]} ${files[i+5]}  ${files[i+6]} ${files[i+7]} ${files[i+8]} ${files[i+9]}
+#done  | ./haste.bash
+##
 main () {
+
 for slavename in "${!slavescurrent[@]}"; do
-    #Slave is online. Mark it down.
+
+  #Slave is online. Mark it down.
     if [ "${slavescurrent[$slavename]}" == "false" ]; then
 
-        if  [ -f "$slavename"-offline ]; then
-            echo "removing offline status from $slavename slave was offline for $(cat "$slavename"-offline ) iterations"
-            rm "$slavename"-offline
-        fi
-
-        if  ! [ -f "$slavename"-online ]; then
-            echo "1" > "$slavename"-online
-        elif [ -f "$slavename"-online ]; then
-            #read and increment slavename
-            read -r -d $'\x04' var < "$slavename"-online
-            ((var++))
-            echo -n "ONLINE $slavename "
-            echo "for $var iterations"
-            echo "$var" > "$slavename"-online
-        fi
-    fi
+      if  ! [ -f /tmp/"$slavename"-online ]; then
+        echo "1" > /tmp/"$slavename"-online
+                echo "new online slave file created $slavename ${slavescurrent[$slavename]} up for 1 iterations"
+          fi
 
-    #went offline since last iteration.
-    if [ "${slavescurrent[$slavename]}" == "false" ] && [ "${slaveslastiteration[$slavename]}" == "true" ];  then
-        echo "JUST WENT OFFLINE $slavename "
-        if  [ -f "$slavename"-online ]; then
-            echo "removing online status from $slavename. slave was online for $(cat "$slavename"-online ) iterations"
-            rm "$slavename"-online
-        fi
+                #read and increment slavename
+                var="$(cat /tmp/"$slavename"-online |tail -n 1)"
+                if [[ "$var" == "0" ]]; then
+                    echo "slave $slavename ${slavescurrent[$slavename]} back up for $var iterations"
+                fi
+                ((var++))
+                echo "$var" >> /tmp/"$slavename"-online
+                unset var
+                echo "$slavename up $(cat /tmp/$slavename-online | tail -n 10 | xargs)"
 
     fi
 
-    #slave is offline
+    #slave is offline remove all points
     if [ "${slavescurrent[$slavename]}" == "true" ]; then
-        if  ! [ -f "$slavename"-offline ]; then
-            echo "1" > "$slavename"-offline
-        fi
-
-        if [ -f "$slavename"-offline ]; then
-            #read and increment slavename
-            read -r -d $'\x04' var < "$slavename"-offline
-            ((var++))
-            echo "$var" > "$slavename"-offline
-                if  [ "$var" -gt "30" ]; then
-                    echo "OFFLINE FOR $var ITERATIONS REMOVE $slavename "
-                else
-                    echo "OFFLINE $slavename FOR $var ITERATIONS "
-                fi
-        fi
+      if  ! [ -f /tmp/"$slavename"-online ]; then
+        echo "0" > /tmp/"$slavename"-online
+                echo "new offline slave file created $slavename ${slavescurrent[$slavename]} up for 0 iterations"
+
+          fi
+          var="$(cat /tmp/"$slavename"-online |tail -n 1)"
+
+            if [[ "$var" != "0" ]]; then
+                    echo "slave $slavename ${slavescurrent[$slavename]} was up for $var iterations"
+                echo "slave $slavename ${slavescurrent[$slavename]} has gone offline, was $var iterations now reset to 0"
+            fi
+
+        echo "0" >> /tmp/"$slavename"-online
+            echo "$slavename down $(cat /tmp/$slavename-online | tail -n 10 | xargs)"
+            unset var
+
     fi
 
+
 done
 }
 
-main
+main | sort | column -t