Merge "Enable verify jobs for external scenarios"
authorFatih Degirmenci <fdegir@gmail.com>
Mon, 26 Feb 2018 10:26:11 +0000 (10:26 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 26 Feb 2018 10:26:11 +0000 (10:26 +0000)
18 files changed:
jjb/dovetail/dovetail-run.sh
jjb/global/releng-macros.yml
jjb/pharos/pharos.yml
jjb/releng/opnfv-lint.yml
jjb/releng/opnfv-utils.yml
jjb/releng/releng-ci-jobs.yml
jjb/xci/bifrost-periodic-jobs.yml
jjb/xci/bifrost-verify-jobs.yml
jjb/xci/bifrost-verify.sh
jjb/xci/xci-daily-jobs.yml
jjb/xci/xci-deploy.sh
jjb/xci/xci-merge-jobs.yml
jjb/xci/xci-start-new-vm.sh
jjb/xci/xci-verify-jobs.yml
utils/artifacts.opnfv.org.sh [new file with mode: 0755]
utils/build-server-ansible/vars/docker-compose-CentOS.yml [moved from utils/build-server-ansible/vars/docker-compose-Centos.yml with 100% similarity]
utils/jenkins-jnlp-connect.sh
utils/slave-monitor-0.1.sh [changed mode: 0644->0755]

index 451662a..ec879e3 100755 (executable)
@@ -199,6 +199,27 @@ if [[ ! -f ${cirros_image} ]]; then
 fi
 sudo cp ${cirros_image} ${DOVETAIL_CONFIG}
 
+# snaps_smoke test case needs to download this image first before running
+ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img
+if [[ ! -f ${ubuntu14_image} ]]; then
+    echo "Download image ubuntu-14.04-server-cloudimg-amd64-disk1.img ..."
+    wget -q -nc https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${image_path}
+fi
+sudo cp ${ubuntu14_image} ${DOVETAIL_CONFIG}
+
+# cloudify_ims test case needs to download these 2 images first before running
+cloudify_image=${image_path}/cloudify-manager-premium-4.0.1.qcow2
+if [[ ! -f ${cloudify_image} ]]; then
+    echo "Download image cloudify-manager-premium-4.0.1.qcow2 ..."
+    wget -q -nc http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 -P ${image_path}
+fi
+sudo cp ${cloudify_image} ${DOVETAIL_CONFIG}
+trusty_image=${image_path}/trusty-server-cloudimg-amd64-disk1.img
+if [[ ! -f ${trusty_image} ]]; then
+    echo "Download image trusty-server-cloudimg-amd64-disk1.img ..."
+    wget -q -nc http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -P ${image_path}
+fi
+sudo cp ${trusty_image} ${DOVETAIL_CONFIG}
 
 opts="--privileged=true -id"
 
@@ -238,18 +259,6 @@ if [ $(docker ps | grep "${DOCKER_REPO}:${DOCKER_TAG}" | wc -l) == 0 ]; then
     exit 1
 fi
 
-if [[ ! "${SUT_BRANCH}" =~ "danube" && ${INSTALLER_TYPE} == 'fuel' && ${DEPLOY_TYPE} == 'baremetal' ]]; then
-    source_cmd="source ${OPENRC}"
-    get_public_url_cmd="openstack --insecure endpoint list --service keystone --interface public | sed -n 4p | awk '{print \$14}'"
-    public_url=$(sudo docker exec "$container_id" /bin/bash -c "${source_cmd} && ${get_public_url_cmd}")
-    sed -i 's#OS_AUTH_URL=.*#OS_AUTH_URL='"${public_url}"'#g' ${OPENRC}
-    sed -i 's/internal/public/g' ${OPENRC}
-    if [[ ${public_url} =~ 'v2' ]]; then
-        sed -i "s/OS_IDENTITY_API_VERSION=3/OS_IDENTITY_API_VERSION=2.0/g" ${OPENRC}
-    fi
-    cat ${OPENRC}
-fi
-
 # Modify tempest_conf.yaml file
 tempest_conf_file=${DOVETAIL_CONFIG}/tempest_conf.yaml
 if [[ ${INSTALLER_TYPE} == 'compass' || ${INSTALLER_TYPE} == 'apex' ]]; then
index a12a3c8..3433cee 100644 (file)
       - shell: |
           #!/bin/bash
           if [[ -s violation.log ]]; then
-              echo "Reporting lint result..."
+              cat violation.log
+              echo "Reporting lint result...."
+              set -x
               msg="Found syntax error and/or coding style violation(s) in the files modified by your patchset."
               sed -i -e "1s#^#${msg}\n\n#" violation.log
               cmd="gerrit review -p $GERRIT_PROJECT -m \"$(cat violation.log)\" $GERRIT_PATCHSET_REVISION --notify NONE"
       - email-ext:
           <<: *email_ptl_defaults
           recipients: >
-            ManuelBuilmbuil@suse.com
+            mbuil@suse.com
 
 - publisher:
     name: 'email-snaps-ptl'
             - shell:
                 !include-raw-escape: installer-report.sh
       - postbuildscript:
+          script-only-if-succeeded: false
           script-only-if-failed: true
           builders:
             - shell: |
index acf1488..c1bb1ba 100644 (file)
@@ -11,7 +11,8 @@
     jobs:
       - '{project}-verify-basic'
       - 'backup-pharos-dashboard'
-      - 'deploy-pharos-dashboard'
+      - 'deploy-pharos-dashboard':
+          disabled: true
 
 - job-template:
     name: 'backup-pharos-dashboard'
       - 'pharos-dashboard-defaults'
 
     scm:
-      - git-scm-gerrit
+      - git:
+          choosing-strategy: 'gerrit'
+          refspec: '$GERRIT_REFSPEC'
+          branches:
+            - 'origin/$BRANCH'
+          timeout: 15
+          credentials-id: '$SSH_CREDENTIAL_ID'
+          url: '$GIT_BASE'
+          skip-tag: true
+          wipe-workspace: false
 
     triggers:
       - gerrit:
@@ -71,6 +81,8 @@
     builders:
       - shell: |
           cp $HOME/config.env $WORKSPACE/dashboard
+          cp $HOME/rsa.pub $WORKSPACE/dashboard
+          cp $HOME/rsa.pem $WORKSPACE/dashboard
           cd $WORKSPACE/dashboard
           sudo docker-compose build
           sudo docker-compose up -d
index e9e929d..0ac5520 100644 (file)
                 comment-contains-value: 'reverify'
           projects:
             - project-compare-type: 'REG_EXP'
-              project-pattern: 'functest|sdnvpn|qtip|daisy|sfc|escalator'
+              project-pattern: 'functest|functest-kubernetes|sdnvpn|qtip|daisy|sfc|escalator'
               branches:
                 - branch-compare-type: 'ANT'
                   branch-pattern: '**/{branch}'
index b12f663..fb3bab4 100644 (file)
@@ -8,6 +8,7 @@
       - 'archive-repositories'
       - 'check-status-of-slaves'
       - 'ansible-build-server'
+      - 'generate-artifacts-index-pages'
 
 ########################
 # job templates
@@ -88,7 +89,6 @@
     name: 'check-status-of-slaves'
 
     disabled: false
-
     concurrent: true
 
     parameters:
           name: SLAVE_NAME
           description: 'script lives on master node'
           default-slaves:
-            - master
+            - lf-build1
           allowed-multiselect: false
           ignore-offline-nodes: true
+      - project-parameter:
+          project: releng
+          branch: master
+
+    scm:
+      - git-scm
 
     triggers:
       - timed: '@midnight'
 
     builders:
       - shell: |
-          cd /opt/jenkins-ci/slavemonitor
-          bash slave-monitor-0.1.sh | sort
+          cd $WORKSPACE/utils/
+          bash slave-monitor-0.1.sh
 
 - job-template:
     name: 'ansible-build-server'
     builders:
       - shell: |
           # run playbook
-          sudo ansible-playbook -C -D -i \
+          sudo ansible-playbook -i \
           $WORKSPACE/utils/build-server-ansible/inventory.ini \
           $WORKSPACE/utils/build-server-ansible/main.yml
+
+
+- job-template:
+    name: 'generate-artifacts-index-pages'
+
+    project-type: freestyle
+
+    disabled: false
+
+    concurrent: false
+
+    parameters:
+      - node:
+          name: SLAVE_NAME
+          description: Build Servers
+          default-slaves:
+            - lf-build2
+      - project-parameter:
+          project: releng
+          branch: master
+
+    scm:
+      - git-scm
+
+    triggers:
+      - timed: '@hourly'
+
+    builders:
+      - generate-artifact-html
+
+
+- builder:
+    name: generate-artifact-html
+    builders:
+      - shell: |
+          cd $WORKSPACE/utils/
+          ./artifacts.opnfv.org.sh
index ef99f5a..6fb6804 100644 (file)
     builders:
       - shell: |
               source /opt/virtualenv/jenkins-job-builder/bin/activate
-              cd /opt/jenkins-ci/releng
-              git pull
               jenkins-jobs update -r --delete-old jjb/
index 0bca26a..fbe2e20 100644 (file)
           name: SLAVE_LABEL
           default: '{slave-label}'
       - string:
-          name: XCI_LOOP
+          name: CI_LOOP
           default: 'periodic'
 
     wrappers:
index e4c2d0e..7e01175 100644 (file)
           use-build-blocker: true
           blocking-jobs:
             - 'xci-verify-{distro}-.*'
+            - 'xci-.*-{distro}-merge-.*'
             - '.*-bifrost-verify.*-{type}'
+      - throttle:
+          max-per-node: 2
+          max-total: 10
+          categories:
+            - xci-verify-virtual
+          option: category
 
           block-level: 'NODE'
 
           name: SLAVE_LABEL
           default: 'xci-virtual'
       - string:
-          name: XCI_LOOP
+          name: CI_LOOP
           default: 'verify'
 
     scm:
     wrappers:
       - fix-workspace-permissions
       - build-timeout:
-          timeout: 90
+          timeout: 180
 
     publishers:
       # yamllint disable rule:line-length
index 451f33b..d3a37ce 100755 (executable)
@@ -28,7 +28,7 @@ set -o pipefail
 
 cd ~/bifrost
 # provision 3 VMs; xcimaster, controller, and compute
-./scripts/bifrost-provision.sh
+./scripts/bifrost-provision.sh | ts
 
 sudo -H -E virsh list
 EOF
index a953749..a92e490 100644 (file)
           name: SLAVE_LABEL
           default: '{slave-label}'
       - string:
-          name: XCI_LOOP
+          name: CI_LOOP
           default: 'daily'
 
     triggers:
             predefined-parameters: |
               DEPLOY_SCENARIO=$DEPLOY_SCENARIO
               XCI_FLAVOR=$XCI_FLAVOR
-              XCI_LOOP=$XCI_LOOP
+              CI_LOOP=$CI_LOOP
             same-node: true
             block: true
       - trigger-builds:
             predefined-parameters: |
               DEPLOY_SCENARIO=$DEPLOY_SCENARIO
               XCI_FLAVOR=$XCI_FLAVOR
-              XCI_LOOP=$XCI_LOOP
+              CI_LOOP=$CI_LOOP
             same-node: true
             block: true
             block-thresholds:
           name: SLAVE_LABEL
           default: '{slave-label}'
       - string:
-          name: XCI_LOOP
+          name: CI_LOOP
           default: 'daily'
       - string:
           name: INSTALLER_TYPE
index 211d282..71cf96d 100755 (executable)
@@ -15,14 +15,14 @@ cd $WORKSPACE/xci
 
 # for daily jobs, we want to use working versions
 # for periodic jobs, we will use whatever is set in the job, probably master
-if [[ "$XCI_LOOP" == "daily" ]]; then
+if [[ "$CI_LOOP" == "daily" ]]; then
     # source pinned-vars to get releng version
     source ./config/pinned-versions
 
     # checkout the version
     git checkout -q $OPNFV_RELENG_VERSION
     echo "Info: Using $OPNFV_RELENG_VERSION"
-elif [[ "$XCI_LOOP" == "periodic" ]]; then
+elif [[ "$CI_LOOP" == "periodic" ]]; then
     echo "Info: Using $OPNFV_RELENG_VERSION"
 fi
 
@@ -31,7 +31,7 @@ fi
 # to take this into account while deploying anyways
 # clone openstack-ansible
 # stable/ocata already use pinned versions so this is only valid for master
-if [[ "$XCI_LOOP" == "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
+if [[ "$CI_LOOP" == "periodic" && "$OPENSTACK_OSA_VERSION" == "master" ]]; then
     cd $WORKSPACE
     # get the url to openstack-ansible git
     source ./config/env-vars
index 351fe22..492348d 100644 (file)
           use-build-blocker: true
           blocking-jobs:
             - 'xci-verify-{distro}-.*'
-            - 'bifrost-verify-{distro}-.*'
-            - 'bifrost-periodic-{distro}-.*'
+            - 'xci-.*-{distro}-merge-.*'
+            - 'openstack-bifrost-verify-{distro}-.*'
             - 'xci-osa-verify-{distro}-.*'
             - 'xci-osa-periodic-{distro}-.*'
             - 'xci-(os|k8s).*?-virtual-{distro}-.*'
           block-level: 'NODE'
+      - throttle:
+          max-per-node: 2
+          max-total: 10
+          categories:
+            - xci-verify-virtual
+          option: category
 
     wrappers:
       - ssh-agent-wrapper
index b72c339..b38ebe5 100755 (executable)
@@ -35,6 +35,7 @@ cd $WORKSPACE
 # yourself.
 cat > xci_test.sh<<EOF
 #!/bin/bash
+set -o pipefail
 export DISTRO=$DISTRO
 export DEPLOY_SCENARIO=$DEPLOY_SCENARIO
 export FUNCTEST_MODE=$FUNCTEST_MODE
@@ -53,7 +54,7 @@ if [[ ! -z ${WORKSPACE+x} && $GERRIT_PROJECT != "releng-xci" ]]; then
 fi
 
 cd xci
-./xci-deploy.sh
+./xci-deploy.sh | ts
 EOF
 chmod a+x xci_test.sh
 
index 2423f90..3e56ef4 100644 (file)
@@ -66,8 +66,8 @@
           use-build-blocker: true
           blocking-jobs:
             - 'xci-verify-{distro}-.*'
-            - 'bifrost-verify-{distro}-.*'
-            - 'bifrost-periodic-{distro}-.*'
+            - 'xci-.*-{distro}-merge-.*'
+            - 'openstack-bifrost-verify-{distro}-.*'
             - 'xci-osa-verify-{distro}-.*'
             - 'xci-osa-periodic-{distro}-.*'
           block-level: 'NODE'
diff --git a/utils/artifacts.opnfv.org.sh b/utils/artifacts.opnfv.org.sh
new file mode 100755 (executable)
index 0000000..1984b49
--- /dev/null
@@ -0,0 +1,162 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#export PATH=${PATH}:/root/gsutil
+
+#Step Generate index.html
+if [ -f index.html ] ; then
+      rm -f index.html
+fi
+
+OUTPUT="index.html"
+
+for index in $(gsutil ls -l gs://artifacts.opnfv.org | grep -v logs | grep -v review | awk 'NF==1'| sed s,gs://artifacts.opnfv.org/,,)
+do
+echo $index
+  echo "<LI><a href=\"${index%/*}.html\">"$index"</a></LI>" >> $OUTPUT
+done
+
+#functest logs##########################
+
+for project in functest vswitchperf
+do
+
+    for index in $(gsutil ls -l gs://artifacts.opnfv.org/logs/"$project"/ |awk 'NF==1'| sed s,gs://artifacts.opnfv.org/,, )
+    do
+    index="$(echo ${index%/*} | sed s,/,_,g)"
+      echo "<LI><a href=\"http://artifacts.opnfv.org/${index%/*}.html\">"$index"</a></LI>" >> $OUTPUT
+    done
+
+done
+#End step 1
+#####################################
+
+
+#genrate html files for all project except vswitchperf
+for index in $(gsutil ls -l gs://artifacts.opnfv.org | grep -v logs |awk 'NF==1'| sed s,gs://artifacts.opnfv.org/,,)
+do
+OUTPUT=${index%/*}.html
+rm -f $OUTPUT
+
+
+    for filepath in $(gsutil ls -R gs://artifacts.opnfv.org/"$index" | sed s,gs://artifacts.opnfv.org/,, | grep -v "favicon.ico" | grep -v "gsutil" ); do
+    echo $filepath
+
+    if [[ $filepath =~ "/:" ]]; then
+      path=$(echo $filepath| sed s,/:,,g)
+      echo "<UL>" >> $OUTPUT
+      echo "<LI>$path</LI>" >> $OUTPUT
+      echo "</UL>" >> $OUTPUT
+    else
+      echo "<LI><a href=\"http://artifacts.opnfv.org/$filepath\">"$filepath"</a></LI>" >> $OUTPUT
+    fi
+done
+
+gsutil cp $OUTPUT gs://artifacts.opnfv.org/
+
+gsutil -m setmeta \
+     -h "Content-Type:text/html" \
+     -h "Cache-Control:private, max-age=0, no-transform" \
+      gs://artifacts.opnfv.org/$OUTPUT \
+
+done
+
+
+
+#generate file for vswitch perf (I dont know what happend here but there is a wierd character in this bucket)
+
+index=vswitchperf
+OUTPUT=${index%/*}.html
+rm -f $OUTPUT
+
+        for filepath in $(gsutil ls -R gs://artifacts.opnfv.org/"$index" | sed s,gs://artifacts.opnfv.org/,, | grep -v "favicon.ico" | grep -v "gsutil" ); do
+        echo $filepath
+
+        if [[ $filepath =~ "/:" ]]; then
+          path=$(echo $filepath| sed s,/:,,g)
+          echo "<UL>" >> $OUTPUT
+          echo "<LI>$path</LI>" >> $OUTPUT
+          echo "</UL>" >> $OUTPUT
+        else
+          echo "<LI><a href=\"http://artifacts.opnfv.org/$filepath\">"$filepath"</a></LI>" >> $OUTPUT
+        fi
+
+done
+
+
+gsutil cp $OUTPUT gs://artifacts.opnfv.org/
+
+gsutil -m setmeta \
+     -h "Content-Type:text/html" \
+     -h "Cache-Control:private, max-age=0, no-transform" \
+      gs://artifacts.opnfv.org/$OUTPUT \
+
+# Gerate html for logs
+
+for project in functest vswitchperf
+do
+    for index in $(gsutil ls -l gs://artifacts.opnfv.org/logs/"$project"/ |awk 'NF==1'| sed s,gs://artifacts.opnfv.org/,, )
+    do
+
+        OUTPUT="$(echo ${index%/*}.html | sed s,/,_,g)"
+        echo $OUTPUT
+        rm -f $OUTPUT
+
+
+            for filepath in $(gsutil ls -R gs://artifacts.opnfv.org/"$index" | sed s,gs://artifacts.opnfv.org/,, | grep -v "favicon.ico" | grep -v "gsutil" ); do
+            echo $filepath
+
+            if [[ $filepath =~ "/:" ]]; then
+              path=$(echo $filepath| sed s,/:,,g)
+              echo "<UL>" >> $OUTPUT
+              echo "<LI>$path</LI>" >> $OUTPUT
+              echo "</UL>" >> $OUTPUT
+            else
+              echo "<LI><a href=\"http://artifacts.opnfv.org/$filepath\">"$filepath"</a></LI>" >> $OUTPUT
+            fi
+
+
+            done
+
+
+        gsutil cp $OUTPUT gs://artifacts.opnfv.org/
+
+        gsutil -m setmeta \
+             -h "Content-Type:text/html" \
+             -h "Cache-Control:private, max-age=0, no-transform" \
+              gs://artifacts.opnfv.org/$OUTPUT \
+
+
+    done
+done
+
+
+
+OUTPUT="index.html"
+echo "<p> Generated on $(date) </p>" >> $OUTPUT
+
+cat <<EOF >> $OUTPUT
+<script>
+(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+ga('create', 'UA-831873-26', 'auto');
+ga('send', 'pageview');
+</script>
+EOF
+
+#copy and uplad index file genrated in first step, last
+gsutil cp $OUTPUT gs://artifacts.opnfv.org/
+
+gsutil -m setmeta \
+     -h "Content-Type:text/html" \
+     -h "Cache-Control:private, max-age=0, no-transform" \
+      gs://artifacts.opnfv.org/$OUTPUT \
index cd81f29..f7c6769 100755 (executable)
@@ -103,7 +103,7 @@ if does not exist then exec "$mkdir -p /var/run/$jenkinsuser"
 if failed uid $jenkinsuser then exec "$chown $jenkinsuser /var/run/$jenkinsuser"
 if failed gid $jenkinsuser then exec "$chown :$jenkinsuser /var/run/$jenkinsuser"
 
-check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid
+check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid every 2 cycles
 start program = "/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $jenkinshome; export started_monit=true; $0 $@' with timeout 60 seconds"
 stop program = "/bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'"
 depends on jenkins_piddir
@@ -118,7 +118,7 @@ if does not exist then exec \"$mkdir -p /var/run/$jenkinsuser\"
 if failed uid $jenkinsuser then exec \"$chown $jenkinsuser /var/run/$jenkinsuser\"
 if failed gid $jenkinsuser then exec \"$chown :$jenkinsuser /var/run/$jenkinsuser\"
 
-check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid
+check process jenkins with pidfile /var/run/$jenkinsuser/jenkins_jnlp_pid every 2 cycles
 start program = \"/usr/bin/sudo -u $jenkinsuser /bin/bash -c 'cd $jenkinshome; export started_monit=true; $0 $@' with timeout 60 seconds\"
 stop program = \"/bin/bash -c '/bin/kill \$(/bin/cat /var/run/$jenkinsuser/jenkins_jnlp_pid)'\"
 depends on jenkins_piddir\
old mode 100644 (file)
new mode 100755 (executable)
index 161aaef..5201f93
@@ -8,9 +8,8 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-#This will put a bunch of files in the pwd. you have been warned.
 #Counts how long slaves have been online or offline
-
+#exec 2>/dev/null
 
 #Yes I know about jq
 curlcommand() {
@@ -25,74 +24,66 @@ curl -s "https://build.opnfv.org/ci/computer/api/json?tree=computer\[displayName
     | sed s,\",,g
 }
 
-if [ -f podoutput-current ]; then
-  cp podoutput-current podoutput-lastiteration
-fi
-
-curlcommand > podoutput-current
+curlcommand > /tmp/podoutput-current
 
-declare -A slavescurrent slaveslastiteration
+declare -A slavescurrent
 
 while read -r name status ; do
             slavescurrent["$name"]="$status"
-done < <(cat podoutput-current)
-
-while read -r name status ; do
-            slaveslastiteration["$name"]=$status
-done < <(cat podoutput-lastiteration)
-
+done < <(cat /tmp/podoutput-current)
+
+#haste bin stopped allowing post :(
+#files=(*online)
+#for ((i=0; i<${#files[@]}; i+=9)); do
+#./eplot -d -r [-1:74][-1:30] -m    ${files[i]} ${files[i+1]} ${files[i+2]} ${files[i+3]} ${files[i+4]} ${files[i+5]}  ${files[i+6]} ${files[i+7]} ${files[i+8]} ${files[i+9]}
+#done  | ./haste.bash
+##
 main () {
+
 for slavename in "${!slavescurrent[@]}"; do
-    #Slave is online. Mark it down.
+
+  #Slave is online. Mark it down.
     if [ "${slavescurrent[$slavename]}" == "false" ]; then
 
-        if  [ -f "$slavename"-offline ]; then
-            echo "removing offline status from $slavename slave was offline for $(cat "$slavename"-offline ) iterations"
-            rm "$slavename"-offline
-        fi
-
-        if  ! [ -f "$slavename"-online ]; then
-            echo "1" > "$slavename"-online
-        elif [ -f "$slavename"-online ]; then
-            #read and increment slavename
-            read -r -d $'\x04' var < "$slavename"-online
-            ((var++))
-            echo -n "ONLINE $slavename "
-            echo "for $var iterations"
-            echo "$var" > "$slavename"-online
-        fi
-    fi
+      if  ! [ -f /tmp/"$slavename"-online ]; then
+        echo "1" > /tmp/"$slavename"-online
+                echo "new online slave file created $slavename ${slavescurrent[$slavename]} up for 1 iterations"
+          fi
 
-    #went offline since last iteration.
-    if [ "${slavescurrent[$slavename]}" == "false" ] && [ "${slaveslastiteration[$slavename]}" == "true" ];  then
-        echo "JUST WENT OFFLINE $slavename "
-        if  [ -f "$slavename"-online ]; then
-            echo "removing online status from $slavename. slave was online for $(cat "$slavename"-online ) iterations"
-            rm "$slavename"-online
-        fi
+                #read and increment slavename
+                var="$(cat /tmp/"$slavename"-online |tail -n 1)"
+                if [[ "$var" == "0" ]]; then
+                    echo "slave $slavename ${slavescurrent[$slavename]} back up for $var iterations"
+                fi
+                ((var++))
+                echo "$var" >> /tmp/"$slavename"-online
+                unset var
+                echo "$slavename up $(cat /tmp/$slavename-online | tail -n 10 | xargs)"
 
     fi
 
-    #slave is offline
+    #slave is offline remove all points
     if [ "${slavescurrent[$slavename]}" == "true" ]; then
-        if  ! [ -f "$slavename"-offline ]; then
-            echo "1" > "$slavename"-offline
-        fi
-
-        if [ -f "$slavename"-offline ]; then
-            #read and increment slavename
-            read -r -d $'\x04' var < "$slavename"-offline
-            ((var++))
-            echo "$var" > "$slavename"-offline
-                if  [ "$var" -gt "30" ]; then
-                    echo "OFFLINE FOR $var ITERATIONS REMOVE $slavename "
-                else
-                    echo "OFFLINE $slavename FOR $var ITERATIONS "
-                fi
-        fi
+      if  ! [ -f /tmp/"$slavename"-online ]; then
+        echo "0" > /tmp/"$slavename"-online
+                echo "new offline slave file created $slavename ${slavescurrent[$slavename]} up for 0 iterations"
+
+          fi
+          var="$(cat /tmp/"$slavename"-online |tail -n 1)"
+
+            if [[ "$var" != "0" ]]; then
+                    echo "slave $slavename ${slavescurrent[$slavename]} was up for $var iterations"
+                echo "slave $slavename ${slavescurrent[$slavename]} has gone offline, was $var iterations now reset to 0"
+            fi
+
+        echo "0" >> /tmp/"$slavename"-online
+            echo "$slavename down $(cat /tmp/$slavename-online | tail -n 10 | xargs)"
+            unset var
+
     fi
 
+
 done
 }
 
-main
+main | sort | column -t