Merge "KVMFORNFV: Enable artifact download and testing for daily job"
authorFatih Degirmenci <fatih.degirmenci@ericsson.com>
Thu, 8 Sep 2016 10:55:09 +0000 (10:55 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Thu, 8 Sep 2016 10:55:09 +0000 (10:55 +0000)
32 files changed:
jjb/armband/armband-ci-jobs.yml
jjb/armband/armband-deploy.sh
jjb/fuel/fuel-ci-jobs.yml
jjb/fuel/fuel-deploy.sh
jjb/fuel/fuel-project-jobs.yml
jjb/functest/functest-ci-jobs.yml
jjb/infra/infra-daily-jobs.yml
jjb/infra/infra-provision.sh
jjb/kvmfornfv/kvmfornfv-upload-artifact.sh
jjb/opnfv/opnfv-docker.sh
jjb/opnfv/slave-params.yml
jjb/qtip/qtip-ci-jobs.yml
jjb/qtip/qtip-cleanup.sh [new file with mode: 0644]
jjb/qtip/qtip-daily-ci.sh [new file with mode: 0644]
jjb/releng-macros.yaml
jjb/yardstick/yardstick-ci-jobs.yml
prototypes/bifrost/README.md
prototypes/bifrost/scripts/destroy-env.sh [moved from prototypes/bifrost/scripts/destroy_env.sh with 89% similarity]
prototypes/bifrost/scripts/test-bifrost-deployment.sh
prototypes/puppet-infracloud/README.md
prototypes/puppet-infracloud/hiera/common.yaml
prototypes/puppet-infracloud/manifests/site.pp
prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp
prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp
utils/push-test-logs.sh
utils/test/reporting/functest/index.html
utils/test/reporting/yardstick/index.html
utils/test/reporting/yardstick/reportingConf.py
utils/test/scripts/backup-db.sh
utils/test/scripts/create_kibana_dashboards.py
utils/test/scripts/mongo_to_elasticsearch.py
utils/test/scripts/testcases.yaml [new file with mode: 0644]

index 6ea73e1..f100a46 100644 (file)
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
 #--------------------------------
-#        brahmaputra
+# CI POD's
+#--------------------------------
+#        colorado
 #--------------------------------
     pod:
-        - arm-pod1:
+        - armband-baremetal:
+            slave-label: armband-baremetal
             installer: fuel
             <<: *colorado
-        - arm-pod2:
+        - armband-virtual:
+            slave-label: armband-virtual
             installer: fuel
             <<: *colorado
 #--------------------------------
 #        master
 #--------------------------------
-    pod:
-        - arm-pod1:
+        - armband-baremetal:
+            slave-label: armband-baremetal
             installer: fuel
             <<: *master
+        - armband-virtual:
+            slave-label: armband-virtual
+            installer: fuel
+            <<: *master
+#--------------------------------
+# NONE-CI POD's
+#--------------------------------
+#        colorado
+#--------------------------------
+        - arm-pod2:
+            slave-label: arm-pod2
+            installer: fuel
+            <<: *colorado
+#--------------------------------
+#        master
+#--------------------------------
         - arm-pod2:
+            slave-label: arm-pod2
             installer: fuel
             <<: *master
 #--------------------------------
     scenario:
         # HA scenarios
         - 'os-nosdn-nofeature-ha':
-            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l2-nofeature-ha':
-            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l3-nofeature-ha':
-            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
         - 'os-odl_l2-bgpvpn-ha':
-            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
 
         # NOHA scenarios
         - 'os-odl_l2-nofeature-noha':
-            auto-trigger-name: 'armband-{installer}-{scenario}-{pod}-{stream}-trigger'
+            auto-trigger-name: '{installer}-{scenario}-{pod}-{stream}-trigger'
 
     jobs:
-        - 'armband-{installer}-{scenario}-{pod}-daily-{stream}'
-        - 'armband-{installer}-deploy-{pod}-daily-{stream}'
+        - '{installer}-{scenario}-{pod}-daily-{stream}'
+        - '{installer}-deploy-{pod}-daily-{stream}'
 
 ########################
 # job templates
 ########################
 - job-template:
-    name: 'armband-{installer}-{scenario}-{pod}-daily-{stream}'
+    name: '{installer}-{scenario}-{pod}-daily-{stream}'
 
     concurrent: false
 
@@ -75,7 +96,7 @@
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'armband-{installer}-os-.*?-{pod}-daily-.*'
+                - '{installer}-os-.*?-{pod}-daily-.*'
             block-level: 'NODE'
 
     wrappers:
         - project-parameter:
             project: '{project}'
         - '{installer}-defaults'
-        - '{pod}-defaults':
+        - '{slave-label}-defaults':
             installer: '{installer}'
         - string:
             name: DEPLOY_SCENARIO
 
     builders:
         - trigger-builds:
-            - project: 'armband-{installer}-deploy-{pod}-daily-{stream}'
+            - project: '{installer}-deploy-{pod}-daily-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
                 build-step-failure-threshold: 'never'
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
+        - trigger-builds:
+            - project: 'yardstick-{installer}-{pod}-daily-{stream}'
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO={scenario}
+              block: true
+              same-node: true
+              block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
 
 - job-template:
-    name: 'armband-{installer}-deploy-{pod}-daily-{stream}'
+    name: '{installer}-deploy-{pod}-daily-{stream}'
 
     concurrent: false
 
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'armband-{installer}-deploy-{pod}-daily-{stream}'
-                - 'armband-{installer}-deploy-generic-daily-.*'
+                - '{installer}-deploy-{pod}-daily-{stream}'
+                - '{installer}-deploy-generic-daily-.*'
             block-level: 'NODE'
 
     parameters:
         - project-parameter:
             project: '{project}'
         - '{installer}-defaults'
-        - '{pod}-defaults':
+        - '{slave-label}-defaults':
             installer: '{installer}'
         - string:
             name: DEPLOY_SCENARIO
 # trigger macros
 ########################
 # CI PODs
-#----------------------------------------------------------
-# Enea Armband POD 1 Triggers running against master branch
-#----------------------------------------------------------
+#-----------------------------------------------------------------
+# Enea Armband CI Baremetal Triggers running against master branch
+#-----------------------------------------------------------------
 - trigger:
-    name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod1-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-master-trigger'
     triggers:
-        - timed: '0 3 * * 1,4'
+        - timed: '0 3,15 * * 1'
 - trigger:
-    name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod1-master-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-master-trigger'
     triggers:
-        - timed: '0 15 * * 1,4'
+        - timed: '0 3,15 * * 2'
 - trigger:
-    name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod1-master-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-master-trigger'
     triggers:
-        - timed: '0 3 * * 2,5'
+        - timed: '0 3,15 * * 3'
 - trigger:
-    name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod1-master-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-master-trigger'
     triggers:
-        - timed: '0 15 * * 2,5'
+        - timed: '0 3,15 * * 4'
 - trigger:
-    name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod1-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-master-trigger'
     triggers:
-        - timed: '0 3 * * 3,6'
+        - timed: '0 3,15 * * 5'
+#----------------------------------------------------------------------
+# Enea Armband CI Baremetal Triggers running against colorado branch
+#----------------------------------------------------------------------
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-armband-baremetal-colorado-trigger'
+    triggers:
+        - timed: '0 4,16 * * 1'
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-armband-baremetal-colorado-trigger'
+    triggers:
+        - timed: '0 4,16 * * 2'
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-armband-baremetal-colorado-trigger'
+    triggers:
+        - timed: '0 4,16 * * 3'
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-armband-baremetal-colorado-trigger'
+    triggers:
+        - timed: '0 4,16 * * 4'
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-armband-baremetal-colorado-trigger'
+    triggers:
+        - timed: '0 4,16 * * 5'
 #---------------------------------------------------------------
-# Enea Armband POD 1 Triggers running against brahmaputra branch
+# Enea Armband CI Virtual Triggers running against master branch
 #---------------------------------------------------------------
 - trigger:
-    name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod1-colorado-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-master-trigger'
+    triggers:
+        - timed: ''
+#--------------------------------------------------------------------
+# Enea Armband CI Virtual Triggers running against colorado branch
+#--------------------------------------------------------------------
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-armband-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod1-colorado-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-armband-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod1-colorado-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-armband-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod1-colorado-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-armband-virtual-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod1-colorado-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-armband-virtual-colorado-trigger'
     triggers:
         - timed: ''
 #----------------------------------------------------------
 # Enea Armband POD 2 Triggers running against master branch
 #----------------------------------------------------------
-# No triggers for master for now
 - trigger:
-    name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod2-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod2-master-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod2-master-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod2-master-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod2-master-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod2-master-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod2-master-trigger'
     triggers:
         - timed: ''
 #---------------------------------------------------------------
-# Enea Armband POD 2 Triggers running against brahmaputra branch
+# Enea Armband POD 2 Triggers running against colorado branch
 #---------------------------------------------------------------
 - trigger:
-    name: 'armband-fuel-os-odl_l2-nofeature-ha-arm-pod2-colorado-trigger'
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod2-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-nosdn-nofeature-ha-arm-pod2-colorado-trigger'
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod2-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l3-nofeature-ha-arm-pod2-colorado-trigger'
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod2-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l2-bgpvpn-ha-arm-pod2-colorado-trigger'
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod2-colorado-trigger'
     triggers:
         - timed: ''
 - trigger:
-    name: 'armband-fuel-os-odl_l2-nofeature-noha-arm-pod2-colorado-trigger'
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod2-colorado-trigger'
     triggers:
         - timed: ''
index 901f845..fb4c1ea 100755 (executable)
@@ -50,6 +50,13 @@ if [[ $LAB_CONFIG_URL =~ ^(git|ssh):// ]]; then
     echo "cloning $LAB_CONFIG_URL"
     git clone --quiet --branch ${GIT_BRANCH##origin/} $LAB_CONFIG_URL lab-config
     LAB_CONFIG_URL=file://${WORKSPACE}/lab-config
+
+    # Source local_env if present, which contains POD-specific config
+    local_env="${WORKSPACE}/lab-config/labs/$LAB_NAME/$POD_NAME/fuel/config/local_env"
+    if [ -e $local_env ]; then
+        echo "-- Sourcing local environment file"
+        source $local_env
+    fi
 fi
 
 # releng wants us to use nothing else but opnfv.iso for now. We comply.
index 0d31c99..056b2cc 100644 (file)
         - zte-pod3:
             slave-label: zte-pod3
             <<: *master
+        - zte-pod1:
+            slave-label: zte-pod1
+            <<: *colorado
+        - zte-pod3:
+            slave-label: zte-pod3
+            <<: *colorado
 #--------------------------------
 #       scenarios
 #--------------------------------
 
     publishers:
         - email:
-            recipients: peter.barabas@ericsson.com
+            recipients: peter.barabas@ericsson.com fzhadaev@mirantis.com
 
 - job-template:
     name: 'fuel-deploy-{pod}-daily-{stream}'
 
     publishers:
         - email:
-            recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com peter.barabas@ericsson.com
+            recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com peter.barabas@ericsson.com fzhadaev@mirantis.com
 
 ########################
 # parameter macros
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-zte-pod1-daily-colorado-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 2 * * *'
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-ha-zte-pod1-daily-colorado-trigger'
     triggers:
 - trigger:
     name: 'fuel-os-nosdn-kvm-ha-zte-pod3-daily-colorado-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 18 * * *'
 - trigger:
     name: 'fuel-os-nosdn-ovs-ha-zte-pod3-daily-colorado-trigger'
     triggers:
index 730f0d1..136aac8 100755 (executable)
@@ -121,7 +121,7 @@ export FUEL_MASTER_IP=10.20.0.2
 export TACKER_SCRIPT_URL="https://git.opnfv.org/cgit/fuel/plain/prototypes/sfc_tacker/poc.tacker-up.sh?h=${GIT_BRANCH#*/}"
 export CONTROLLER_NODE_IP=$(sshpass -pr00tme /usr/bin/ssh -o UserKnownHostsFile=/dev/null \
     -o StrictHostKeyChecking=no root@$FUEL_MASTER_IP 'fuel node list' | \
-    grep opendaylight | cut -d'|' -f5)
+    grep controller | head -1 | cut -d'|' -f5)
 
 # we can't do much if we do not have the controller IP
 if [[ ! "$CONTROLLER_NODE_IP" =~ "10.20.0" ]]; then
@@ -149,8 +149,6 @@ send "/bin/mkdir -p /root/sfc-poc && cd /root/sfc-poc\r"
 expect "# "
 send "git clone https://gerrit.opnfv.org/gerrit/fuel && cd fuel\r"
 expect "# "
-send "git fetch https://gerrit.opnfv.org/gerrit/fuel refs/changes/97/10597/2 && git checkout FETCH_HEAD\r"
-expect "# "
 send "/bin/bash /root/sfc-poc/fuel/prototypes/sfc_tacker/poc.tacker-up.sh\r"
 expect "# "
 send "exit\r"
index cf89383..588ab0c 100644 (file)
@@ -79,7 +79,7 @@
 
     publishers:
         - email:
-            recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com
+            recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com fzhadaev@mirantis.com
 
 - job-template:
     name: 'fuel-merge-build-{stream}'
 
     publishers:
         - email:
-            recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com
+            recipients: jonas.bjurel@ericsson.com stefan.k.berg@ericsson.com fzhadaev@mirantis.com
 
 - job-template:
     name: 'fuel-deploy-generic-daily-{stream}'
index 3070c25..0f0caba 100644 (file)
             slave-label: '{pod}'
             installer: apex
             <<: *colorado
+# armband CI PODs
+        - armband-baremetal:
+            slave-label: armband-baremetal
+            installer: fuel
+            <<: *master
+        - armband-virtual:
+            slave-label: armband-virtual
+            installer: fuel
+            <<: *master
+        - armband-baremetal:
+            slave-label: armband-baremetal
+            installer: fuel
+            <<: *colorado
+        - armband-virtual:
+            slave-label: armband-virtual
+            installer: fuel
+            <<: *colorado
 #--------------------------------
 #        None-CI PODs
 #--------------------------------
             slave-label: '{pod}'
             installer: apex
             <<: *master
-        - arm-pod1:
+        - arm-pod2:
             slave-label: '{pod}'
             installer: fuel
             <<: *master
             slave-label: '{pod}'
             installer: fuel
             <<: *master
+        - zte-pod1:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *colorado
         - zte-pod2:
             slave-label: '{pod}'
             installer: fuel
             slave-label: '{pod}'
             installer: fuel
             <<: *master
-        - arm-pod1:
+        - zte-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *colorado
+        - arm-pod2:
             slave-label: '{pod}'
             installer: fuel
             <<: *colorado
         - 'functest-cleanup'
         - 'set-functest-env'
         - 'functest-suite'
-        - 'functest-exit'
 
 - builder:
     name: functest-daily
     name: functest-exit
     builders:
         - shell:
-            !include-raw: ./functest-exit.sh
\ No newline at end of file
+            !include-raw: ./functest-exit.sh
index 64c2fc0..a066e7d 100644 (file)
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
 
+    publishers:
+        - email:
+            recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn
+
 - job-template:
     name: 'infra-{phase}-{pod}-daily-{stream}'
 
         - string:
             name: DEPLOY_SCENARIO
             default: 'os-nosdn-nofeature-noha'
+        - string:
+            name: CLEAN_DIB_IMAGES
+            default: 'false'
 
     scm:
         - git-scm:
         - shell: |
             #!/bin/bash
 
-            sudo $WORKSPACE/jjb/infra/infra-provision.sh
+            echo "Not activated!"
 - builder:
     name: 'infra-smoketest-daily-builder'
     builders:
         - shell: |
             #!/bin/bash
 
-            sudo $WORKSPACE/jjb/infra/infra-provision.sh
+            echo "Not activated!"
index 5ddbaf9..45ed3b9 100755 (executable)
@@ -21,7 +21,7 @@ cp -R /opt/releng/prototypes/bifrost/* /opt/bifrost/
 
 # cleanup remnants of previous deployment
 cd /opt/bifrost
-./scripts/destroy_env.sh
+./scripts/destroy-env.sh
 
 # provision 3 VMs; jumphost, controller, and compute
 cd /opt/bifrost
index 327ea97..6f8fff3 100755 (executable)
@@ -11,6 +11,7 @@ fi
 
 case "$JOB_TYPE" in
     verify)
+        OPNFV_ARTIFACT_VERSION="gerrit-$GERRIT_CHANGE_NUMBER"
         GS_UPLOAD_LOCATION="gs://artifacts.opnfv.org/$PROJECT/review/$GERRIT_CHANGE_NUMBER"
         echo "Removing outdated artifacts produced for the previous patch for the change $GERRIT_CHANGE_NUMBER"
         gsutil ls $GS_UPLOAD_LOCATION > /dev/null 2>&1 && gsutil rm -r $GS_UPLOAD_LOCATION
@@ -26,11 +27,32 @@ case "$JOB_TYPE" in
         exit 1
 esac
 
+# save information regarding artifacts into file
+(
+    echo "OPNFV_ARTIFACT_VERSION=$OPNFV_ARTIFACT_VERSION"
+    echo "OPNFV_GIT_URL=$(git config --get remote.origin.url)"
+    echo "OPNFV_GIT_SHA1=$(git rev-parse HEAD)"
+    echo "OPNFV_ARTIFACT_URL=$GS_UPLOAD_LOCATION"
+    echo "OPNFV_BUILD_URL=$BUILD_URL"
+) > $WORKSPACE/opnfv.properties
+source $WORKSPACE/opnfv.properties
+
+# upload artifacts
 gsutil cp -r $WORKSPACE/build_output/* $GS_UPLOAD_LOCATION > $WORKSPACE/gsutil.log 2>&1
 gsutil -m setmeta -r \
     -h "Cache-Control:private, max-age=0, no-transform" \
     $GS_UPLOAD_LOCATION > /dev/null 2>&1
 
+# upload metadata file for the artifacts built by daily job
+if [[ "$JOB_TYPE" == "daily" ]]; then
+    gsutil cp $WORKSPACE/opnfv.properties $GS_UPLOAD_LOCATION/opnfv.properties > $WORKSPACE/gsutil.log 2>&1
+    gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > $WORKSPACE/gsutil.log 2>&1
+    gsutil -m setmeta -r \
+        -h "Cache-Control:private, max-age=0, no-transform" \
+        $GS_UPLOAD_LOCATION/opnfv.properties \
+        gs://$GS_URL/latest.properties > /dev/null 2>&1
+fi
+
 gsutil ls $GS_UPLOAD_LOCATION > /dev/null 2>&1
 if [[ $? -ne 0 ]]; then
     echo "Problem while uploading artifacts!"
index ef47384..07198c6 100644 (file)
@@ -110,7 +110,12 @@ echo "Tag version to be build and pushed: $DOCKER_TAG"
 # Start the build
 echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG"
 
-docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG .
+if [[ $DOCKER_REPO_NAME == *"functest"* ]]; then
+    docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG --build-arg BRANCH=$branch .
+else
+    docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG .
+fi
+
 echo "Creating tag '$DOCKER_TAG'..."
 docker tag -f $DOCKER_REPO_NAME:$DOCKER_BRANCH_TAG $DOCKER_REPO_NAME:$DOCKER_TAG
 
index 59348e4..c7ec6aa 100644 (file)
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
+- parameter:
+    name: 'armband-baremetal-defaults'
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'armband-baremetal'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: LAB_CONFIG_URL
+            default: ssh://git@git.enea.com/pharos/lab-config
+            description: 'Base URI to the configuration directory'
 - parameter:
     name: 'joid-baremetal-defaults'
     parameters:
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
+- parameter:
+    name: 'armband-virtual-defaults'
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'armband-virtual'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: LAB_CONFIG_URL
+            default: ssh://git@git.enea.com/pharos/lab-config
+            description: 'Base URI to the configuration directory'
 - parameter:
     name: 'joid-virtual-defaults'
     parameters:
             name: SSH_KEY
             default: /root/.ssh/id_rsa
             description: 'SSH key to use for Apex'
-- parameter:
-    name: 'arm-pod1-defaults'
-    parameters:
-        - node:
-            name: SLAVE_NAME
-            description: 'Slave name on Jenkins'
-            allowed-slaves:
-                - arm-pod1
-            default-slaves:
-                - arm-pod1
-        - string:
-            name: GIT_BASE
-            default: https://gerrit.opnfv.org/gerrit/$PROJECT
-            description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: DEFAULT_BRIDGE
-            default: 'admin6_br0,public6_br0'
-            desciption: 'The bridge to use for Fuel PXE booting. It can be a comma sparated list of bridges, in which case the first is the PXE boot bridge, and all subsequent interfaces that will be added to the VM. If left empty, most deploy scripts will default to pxebr.'
-        - string:
-            name: DEPLOY_TIMEOUT
-            default: '360'
-            description: 'Deployment timeout in minutes'
-        - string:
-            name: LAB_CONFIG_URL
-            default: ssh://git@git.enea.com/pharos/lab-config
-            description: 'Base URI to the configuration directory'
 - parameter:
     name: 'arm-pod2-defaults'
     parameters:
             name: GIT_BASE
             default: https://gerrit.opnfv.org/gerrit/$PROJECT
             description: 'Git URL to use on this Jenkins Slave'
-        - string:
-            name: DEFAULT_BRIDGE
-            default: 'admin_br0,public_br0'
-            desciption: 'The bridge to use for Fuel PXE booting. It can be a comma sparated list of bridges, in which case the first is the PXE boot bridge, and all subsequent interfaces that will be added to the VM. If left empty, most deploy scripts will default to pxebr.'
-        - string:
-            name: DEPLOY_TIMEOUT
-            default: '360'
-            description: 'Deployment timeout in minutes'
         - string:
             name: LAB_CONFIG_URL
             default: ssh://git@git.enea.com/pharos/lab-config
index d454b0f..d0d6b47 100644 (file)
@@ -13,6 +13,7 @@
         stream: master
         branch: '{stream}'
         gs-pathname: ''
+        docker-tag: 'latest'
 #--------------------------------
 # POD, INSTALLER, AND BRANCH MAPPING
 #--------------------------------
         - string:
             name: DEPLOY_SCENARIO
             default: 'os-nosdn-nofeature-ha'
+        - string:
+            name: DOCKER_TAG
+            default: '{docker-tag}'
+            description: 'Tag to pull docker image'
 
     scm:
         - git-scm:
@@ -72,9 +77,7 @@
 
     builders:
         - 'qtip-cleanup'
-        - 'qtip-set-env'
-        - 'qtip-run-suite'
-        - 'qtip-pushtoDB'
+        - 'qtip-daily-ci'
 
     publishers:
         - email:
 #biuilder macros
 ###########################
 - builder:
-    name: qtip-set-env
-    builders:
-        - shell: |
-            #!/bin/bash
-            echo "Qtip: Start Docker and prepare environment"
-            envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} -e NODE_NAME=${NODE_NAME}"
-            suite="TEST_CASE=all"
-            dir_imgstore="${HOME}/imgstore"
-            img_volume="${dir_imgstore}:/home/opnfv/imgstore"
-            docker pull opnfv/qtip:latest
-            cmd=" docker run -id -e $envs -e $suite -v ${img_volume} opnfv/qtip:latest /bin/bash"
-            echo "Qtip: Running docker run command: ${cmd}"
-            ${cmd}
-            docker ps -a
-            container_id=$(docker ps | grep 'opnfv/qtip:latest' | awk '{print $1}' | head -1)
-             if [ $(docker ps | grep 'opnfv/qtip' | wc -l) == 0 ]; then
-                echo "The container opnfv/qtip with ID=${container_id} has not been properly started. Exiting..."
-                exit 1
-            fi
-- builder:
-    name: qtip-run-suite
-    builders:
-        - shell: |
-            #!/bin/bash
-            container_id=$(docker ps | grep 'opnfv/qtip:latest' | awk '{print $1}' | head -1)
-            if [[ ! -z ${container_id} ]]; then
-                echo "The container ID is: ${container_id}"
-                QTIP_REPO=/home/opnfv/repos/qtip
-                docker exec -t ${container_id} $QTIP_REPO/docker/run_qtip.sh
-            else
-                echo "Container ID not available"
-            fi
-
-- builder:
-    name: qtip-pushtoDB
+    name: qtip-daily-ci
     builders:
-        - shell: |
-            #!/bin/bash
-
-            echo "Pushing available results to DB"
-            echo "The container id is:"
-            container_id=$(docker ps | grep 'opnfv/qtip:latest' | awk '{print $1}' | head -1)
-            if [[ ! -z ${container_id} ]]; then
-                echo "The condiner ID is: ${container_id}"
-                QTIP_REPO=/home/opnfv/repos/qtip
-                docker exec -t ${container_id} $QTIP_REPO/docker/push_db.sh
-            else
-                echo "Container ID not available"
-            fi
+        - shell:
+            !include-raw: ./qtip-daily-ci.sh
 
 - builder:
     name: qtip-cleanup
     builders:
-        - shell: |
-            #!/bin/bash
-
-            echo "Cleaning up QTIP  docker containers/images..."
-            # Remove previous running containers if exist
-            if [[ ! -z $(docker ps -a | grep opnfv/qtip) ]]; then
-                echo "Removing existing opnfv/qtip containers..."
-                running_containers=$(docker ps | grep opnfv/qtip | awk '{print $1}')
-                docker stop ${running_containers}
-                all_containers=$(docker ps -a | grep opnfv/qtip | awk '{print $1}')
-                docker rm ${all_containers}
-            fi
-
-            # Remove existing images if exist
-            if [[ ! -z $(docker images | grep opnfv/qtip) ]]; then
-                echo "Docker images to remove:"
-                docker images | head -1 && docker images | grep opnfv/qtip
-                image_tags=($(docker images | grep opnfv/qtip | awk '{print $2}'))
-                for tag in "${image_tags[@]}"; do
-                    echo "Removing docker image opnfv/qtip:$tag..."
-                    docker rmi opnfv/qtip:$tag
-                done
-            fi
+        - shell:
+            !include-raw: ./qtip-cleanup.sh
 
 #################
 #trigger macros
diff --git a/jjb/qtip/qtip-cleanup.sh b/jjb/qtip/qtip-cleanup.sh
new file mode 100644 (file)
index 0000000..b923aa2
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2016 ZTE and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+echo "Cleaning up QTIP  docker containers/images..."
+
+# Remove previous running containers if exist
+if [[ ! -z $(docker ps -a | grep opnfv/qtip) ]]; then
+    echo "Removing existing opnfv/qtip containers..."
+    running_containers=$(docker ps | grep opnfv/qtip | awk '{print $1}')
+    docker stop ${running_containers}
+    all_containers=$(docker ps -a | grep opnfv/qtip | awk '{print $1}')
+    docker rm ${all_containers}
+fi
+
+# Remove existing images if exist
+if [[ ! -z $(docker images | grep opnfv/qtip) ]]; then
+    echo "Docker images to remove:"
+    docker images | head -1 && docker images | grep opnfv/qtip
+    image_tags=($(docker images | grep opnfv/qtip | awk '{print $2}'))
+    for tag in "${image_tags[@]}"; do
+        echo "Removing docker image opnfv/qtip:$tag..."
+        docker rmi opnfv/qtip:$tag
+    done
+fi
+
diff --git a/jjb/qtip/qtip-daily-ci.sh b/jjb/qtip/qtip-daily-ci.sh
new file mode 100644 (file)
index 0000000..4fdc043
--- /dev/null
@@ -0,0 +1,38 @@
+#!/bin/bash
+##############################################################################
+# Copyright (c) 2016 ZTE and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+set -e
+
+envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} -e NODE_NAME=${NODE_NAME}"
+suite="TEST_CASE=all"
+dir_imgstore="${HOME}/imgstore"
+img_volume="${dir_imgstore}:/home/opnfv/imgstore"
+
+echo "Qtip: Pulling docker image: opnfv/qtip:${DOCKER_TAG}"
+docker pull opnfv/qtip:$DOCKER_TAG
+
+cmd=" docker run -id -e $envs -e $suite -v ${img_volume} opnfv/qtip:${DOCKER_TAG} /bin/bash"
+echo "Qtip: Running docker command: ${cmd}"
+${cmd}
+
+container_id=$(docker ps | grep "opnfv/qtip:${DOCKER_TAG}" | awk '{print $1}' | head -1)
+if [ $(docker ps | grep 'opnfv/qtip' | wc -l) == 0 ]; then
+    echo "The container opnfv/qtip with ID=${container_id} has not been properly started. Exiting..."
+    exit 1
+else
+    echo "The container ID is: ${container_id}"
+    QTIP_REPO=/home/opnfv/repos/qtip
+
+    echo "Run Qtip test"
+    docker exec -t ${container_id} $QTIP_REPO/docker/run_qtip.sh
+
+    echo "Pushing available results to DB"
+    docker exec -t ${container_id} $QTIP_REPO/docker/push_db.sh
+fi
+
+echo "Qtip done!"
index 8328aec..2aa775f 100644 (file)
@@ -47,6 +47,7 @@
                 - 'origin/$GERRIT_BRANCH'
             skip-tag: true
             choosing-strategy: '{choosing-strategy}'
+            timeout: 15
 
 - wrapper:
     name: build-timeout
             mv docs_output "$local_path"
             gsutil -m cp -r "$local_path" "gs://$gs_base"
 
-            if gsutil ls "gs://$gs_path" | grep -e 'html$' > /dev/null 2>&1 ; then
-                gsutil -m setmeta \
-                    -h "Content-Type:text/html" \
-                    -h "Cache-Control:private, max-age=0, no-transform" \
-                    "gs://$gs_path"/**.html
-            fi
+            gsutil -m setmeta \
+                -h "Content-Type:text/html" \
+                -h "Cache-Control:private, max-age=0, no-transform" \
+                "gs://$gs_path"/**.html > /dev/null 2>&1
 
             echo "Document link(s):" >> gerrit_comment.txt
             find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
             mv docs_output "$local_path"
             gsutil -m cp -r "$local_path" "gs://$GS_URL"
 
-            if gsutil ls "gs://$gs_path" | grep -e 'html$' > /dev/null 2>&1 ; then
-                gsutil -m setmeta \
-                    -h "Content-Type:text/html" \
-                    -h "Cache-Control:private, max-age=0, no-transform" \
-                    "gs://$gs_path"/**.html
-            fi
+            gsutil -m setmeta \
+                -h "Content-Type:text/html" \
+                -h "Cache-Control:private, max-age=0, no-transform" \
+                "gs://$gs_path"/**.html > /dev/null 2>&1
 
             echo "Document link(s):" >> gerrit_comment.txt
             find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
index d9fb435..dd88a52 100644 (file)
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *colorado
+# armband CI PODs
+        - armband-baremetal:
+            slave-label: armband-baremetal
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - armband-virtual:
+            slave-label: armband-virtual
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - armband-baremetal:
+            slave-label: armband-baremetal
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *colorado
+        - armband-virtual:
+            slave-label: armband-virtual
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *colorado
 # joid CI PODs
         - baremetal:
             slave-label: joid-baremetal
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
+        - zte-pod1:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *colorado
         - zte-pod2:
             slave-label: '{pod}'
             installer: fuel
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
-        - arm-pod1:
+        - zte-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *colorado
+        - arm-pod2:
             slave-label: '{pod}'
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             name: YARDSTICK_DB_BACKEND
             default: ''
             description: 'Arguments to use in order to choose the backend DB'
+- parameter:
+    name: 'yardstick-params-armband-baremetal'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
+- parameter:
+    name: 'yardstick-params-armband-virtual'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: ''
+            description: 'Arguments to use in order to choose the backend DB'
 - parameter:
     name: 'yardstick-params-joid-baremetal'
     parameters:
             description: 'Arguments to use in order to choose the backend DB'
 
 - parameter:
-    name: 'yardstick-params-arm-pod1'
+    name: 'yardstick-params-arm-pod2'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
index fffd1de..f50ffb2 100644 (file)
@@ -24,7 +24,7 @@ Please follow that steps:
 5. Run destroy script if you need to cleanup previous environment::
 
     cd /opt/bifrost
-    ./scripts/destroy_env.sh
+    ./scripts/destroy-env.sh
 
 6. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
 
similarity index 89%
rename from prototypes/bifrost/scripts/destroy_env.sh
rename to prototypes/bifrost/scripts/destroy-env.sh
index 819048b..4dffee6 100755 (executable)
@@ -26,9 +26,13 @@ echo "removing leases"
 echo "removing logs"
 rm -rf /var/log/libvirt/baremetal_logs/*.log
 
-# clean up images
-rm -rf /httpboot/*
-rm -rf /tftpboot/*
+# clean up dib images only if requested explicitly
+if [ $CLEAN_DIB_IMAGES = "true" ]; then
+    rm -rf /httpboot/*
+    rm -rf /tftpboot/*
+fi
+
+# remove VM disk images
 rm -rf /var/lib/libvirt/images/*.qcow2
 
 echo "restarting services"
index 66affe9..d796f35 100755 (executable)
@@ -71,17 +71,17 @@ set -x -o nounset
 cd $BIFROST_HOME/playbooks
 
 # Syntax check of dynamic inventory test path
-${ANSIBLE} -vvvv \
-       -i inventory/localhost \
-       test-bifrost-create-vm.yaml \
-       --syntax-check \
-       --list-tasks
-${ANSIBLE} -vvvv \
-       -i inventory/localhost \
-       ${TEST_PLAYBOOK} \
-       --syntax-check \
-       --list-tasks \
-       -e testing_user=${TESTING_USER}
+for task in syntax-check list-tasks; do
+    ${ANSIBLE} -vvvv \
+           -i inventory/localhost \
+           test-bifrost-create-vm.yaml \
+           --${task}
+    ${ANSIBLE} -vvvv \
+           -i inventory/localhost \
+           ${TEST_PLAYBOOK} \
+           --${task} \
+           -e testing_user=${TESTING_USER}
+done
 
 # Create the test VMS
 ${ANSIBLE} -vvvv \
index f3bd672..37d575c 100644 (file)
@@ -11,7 +11,7 @@ Once all the hardware is provisioned, enter in controller and compute nodes and
 
 2. Copy hiera to the right place::
 
-    cp /opt/releng/prototypes/puppet-infracloud/hiera/common.yaml /var/lib/hiera/    
+    cp /opt/releng/prototypes/puppet-infracloud/hiera/common.yaml /var/lib/hiera
 
 3. Install modules::
 
@@ -21,7 +21,7 @@ Once all the hardware is provisioned, enter in controller and compute nodes and
 4. Apply the infracloud manifest::
 
     cd /opt/releng/prototypes/puppet-infracloud
-    puppet apply --manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
+    puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
 
 5. Once you finish this operation on controller and compute nodes, you will have a functional OpenStack cloud.
 
@@ -31,7 +31,7 @@ In jumphost, follow that steps:
 
     git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
 
-2. Create OpenStack clouds config directory:
+2. Create OpenStack clouds config directory::
 
     mkdir -p /root/.config/openstack
 
@@ -39,14 +39,23 @@ In jumphost, follow that steps:
 
     cp /opt/releng/prototypes/puppet-infracloud/creds/clouds.yaml /root/.config/openstack/
 
-4. Install openstack-client:
+4. Install python-dev package as the installation of python-openstackclient depends on it
+
+    apt-get install -y python-dev
+
+5. Install openstack-client. (version 3.2.0 is known to work)::
 
     pip install python-openstackclient
 
-5. Export the desired cloud::
+6. Update /etc/hosts and add controller00::
+
+    192.168.122.3 controller00
+    192.168.122.3 controller00.opnfvlocal controller00
+
+7. Export the desired cloud::
 
     export OS_CLOUD=opnfv
 
-6. Start using it::
+8. Start using it::
 
-    openstack server list
+    openstack service list
index 6c28f19..7d6b440 100644 (file)
@@ -75,3 +75,6 @@ hosts:
     ip: 192.168.122.3
   compute00.opnfvlocal:
     ip: 192.168.122.4
+
+# br-eth0 for debian, br-ens3 for RHEL
+bridge_name: br-eth0
index e524918..1bbd282 100644 (file)
@@ -30,7 +30,7 @@ node 'controller00.opnfvlocal' {
     keystone_admin_token             => hiera('keystone_admin_token'),
     ssl_key_file_contents            => hiera('ssl_key_file_contents'),
     ssl_cert_file_contents           => hiera('ssl_cert_file_contents'),
-    br_name                          => 'br-eth0',
+    br_name                          => hiera('bridge_name'),
     controller_public_address        => $::fqdn,
     neutron_subnet_cidr              => '192.168.122.0/24',
     neutron_subnet_gateway           => '192.168.122.1',
@@ -55,9 +55,16 @@ node 'compute00.opnfvlocal' {
     neutron_admin_password           => hiera('neutron_admin_password'),
     ssl_cert_file_contents           => hiera('ssl_cert_file_contents'),
     ssl_key_file_contents            => hiera('ssl_key_file_contents'),
-    br_name                          => 'br-eth0',
+    br_name                          => hiera('bridge_name'),
     controller_public_address        => 'controller00.opnfvlocal',
     virt_type                        => 'qemu',
   }
 }
 
+node 'jumphost.opnfvlocal' {
+  class { 'opnfv::server':
+    sysadmins                 => hiera('sysadmins', []),
+    enable_unbound            => false,
+    purge_apt_sources         => false,
+  }
+}
index ca548a5..77908c0 100644 (file)
@@ -8,6 +8,14 @@ class opnfv::compute (
   $controller_public_address,
   $virt_type = 'kvm',
 ) {
+  # disable selinux if needed
+  if $::osfamily == 'RedHat' {
+    class { 'selinux':
+      mode   => 'permissive',
+      before => Class['::infracloud::compute'],
+    }
+  }
+
   class { '::infracloud::compute':
     nova_rabbit_password          => $nova_rabbit_password,
     neutron_rabbit_password       => $neutron_rabbit_password,
index 7522692..4bae42c 100644 (file)
@@ -30,6 +30,14 @@ class opnfv::controller (
   $opnfv_password,
   $opnfv_email = 'opnfvuser@gmail.com',
 ) {
+  # disable selinux if needed
+  if $::osfamily == 'RedHat' {
+    class { 'selinux':
+      mode   => 'permissive',
+      before => Class['::infracloud::controller'],
+    }
+  }
+
   class { '::infracloud::controller':
     keystone_rabbit_password         => $keystone_rabbit_password,
     neutron_rabbit_password          => $neutron_rabbit_password,
index 24b3281..0fa882b 100644 (file)
@@ -54,8 +54,14 @@ if [ -d "$dir_result" ]; then
             if [ $? != 0 ]; then
                 echo "Not possible to push results to artifact: gsutil not installed.";
             else
-                echo "copy result files to artifact $project_artifact"
-                gsutil -m cp -r "$dir_result" gs://artifacts.opnfv.org/"$project_artifact"/ >/dev/null 2>&1
+                echo "Uploading logs to artifact $project_artifact"
+                gsutil -m cp -r "$dir_result"/* gs://artifacts.opnfv.org/"$project_artifact"/ >/dev/null 2>&1
+                echo "Logs can be found in http://artifacts.opnfv.org/logs_${project}_${testbed}.html"
+                cd $dir_result
+                files=($(find . -name \* -print|sed 's/^\.//'|sed '/^\s*$/d'))
+                for f in ${files[@]}; do
+                    echo "http://artifacts.opnfv.org/${project_artifact}${f}"
+                done
             fi
         fi
     else
index af40335..bb1bce2 100644 (file)
         <h3 class="text-muted">Functest reporting page</h3>
         <nav>
           <ul class="nav nav-justified">
-            <li class="active"><a href="#">Home</a></li>
-            <li><a href="./index-status-apex.html">Status</a></li>
-            <li><a href="./index-tempest-apex.html">Tempest</a></li>
-            <li><a href="./index-vims-apex.html">vIMS</a></li>
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+            <li><a href="index-status-apex.html">Apex</a></li>
+            <li><a href="index-status-compass.html">Compass</a></li>
+            <li><a href="index-status-fuel.html">Fuel</a></li>
+            <li><a href="index-status-joid.html">Joid</a></li>
           </ul>
         </nav>
       </div>
index ec64bc8..488f142 100644 (file)
         <h3 class="text-muted">Yardstick reporting page</h3>
         <nav>
           <ul class="nav nav-justified">
-            <li class="active"><a href="#">Home</a></li>
-            <li><a href="./index-status-apex.html">Status</a></li>
+            <li class="active"><a href="http://testresults.opnfv.org/reporting/index.html">Home</a></li>
+            <li><a href="index-status-apex.html">Apex</a></li>
+            <li><a href="index-status-compass.html">Compass</a></li>
+            <li><a href="index-status-fuel.html">Fuel</a></li>
+            <li><a href="index-status-joid.html">Joid</a></li>
           </ul>
         </nav>
       </div>
@@ -45,4 +48,4 @@
         </div>
     </div>
     <div class="col-md-1"></div>
-</div>
\ No newline at end of file
+</div>
index af95cc0..9e34034 100644 (file)
@@ -11,7 +11,7 @@
 # ****************************************************
 installers = ["apex", "compass", "fuel", "joid"]
 
-versions = ["master"]
+versions = ["master", "stable/colorado"]
 
 # get data in the past 7 days
 PERIOD = 7
index aa36aa3..35c3fbe 100644 (file)
@@ -18,16 +18,16 @@ echo "Create Directory for backup"
 mkdir -p $TARGET_DIR
 
 echo "Export results"
-mongoexport -db test_results_collection -c test_results --out $TARGET_DIR/results.json
+mongoexport --db test_results_collection -c results --out $TARGET_DIR/backup-results.json
 echo "Export test cases"
-mongoexport --db test_results_collection -c test_cases --out $TARGET_DIR/backup-cases.json
+mongoexport --db test_results_collection -c testcases --out $TARGET_DIR/backup-cases.json
 echo "Export projects"
-mongoexport --db test_results_collection -c test_projects --out $TARGET_DIR/backup-projects.json
+mongoexport --db test_results_collection -c projects --out $TARGET_DIR/backup-projects.json
 echo "Export pods"
-mongoexport --db test_results_collection -c pod --out $TARGET_DIR/backup-pod.json
+mongoexport --db test_results_collection -c pods --out $TARGET_DIR/backup-pod.json
 
 echo "Create tar.gz"
-tar -cvzf $TEST_RESULT_DB_BACKUP $TARGET_DIR
+#tar -cvzf $TEST_RESULT_DB_BACKUP $TARGET_DIR
 
 echo "Delete temp directory"
-rm -Rf $TARGET_DIR
+#rm -Rf $TARGET_DIR
index 73f4ed9..abb9471 100644 (file)
 #! /usr/bin/env python
+import json
 import logging
+import urlparse
+
 import argparse
+import yaml
+
 import shared_utils
-import json
-import urlparse
 
 logger = logging.getLogger('create_kibana_dashboards')
 logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format('create_kibana_dashboards'))
+file_handler = logging.FileHandler('./{}.log'.format('create_kibana_dashboards'))
 file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
 logger.addHandler(file_handler)
 
 _installers = {'fuel', 'apex', 'compass', 'joid'}
 
-# see class VisualizationState for details on format
-_testcases = [
-    ('functest', 'tempest_smoke_serial',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.duration"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "tempest_smoke_serial duration",
-                 "test_family": "VIM"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.tests"
-                     }
-                 },
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.failures"
-                     }
-                 }
-             ],
-             "type": "histogram",
-             "metadata": {
-                 "label": "tempest_smoke_serial nr of tests/failures",
-                 "test_family": "VIM"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.success_percentage"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "tempest_smoke_serial success percentage",
-                 "test_family": "VIM"
-             }
-         }
-     ]
-     ),
-
-    ('functest', 'rally_sanity',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.duration"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "rally_sanity duration",
-                 "test_family": "VIM"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.tests"
-                     }
-                 }
-             ],
-             "type": "histogram",
-             "metadata": {
-                 "label": "rally_sanity nr of tests",
-                 "test_family": "VIM"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.success_percentage"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "rally_sanity success percentage",
-                 "test_family": "VIM"
-             }
-         }
-     ]
-     ),
-
-    ('functest', 'vping_ssh',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.duration"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "vPing duration",
-                 "test_family": "VIM"
-             }
-         }
-     ]
-     ),
-
-    ('functest', 'vping_userdata',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.duration"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "vPing_userdata duration",
-                 "test_family": "VIM"
-             }
-         }
-     ]
-     ),
-
-    ('functest', 'odl',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.tests"
-                     }
-                 },
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.failures"
-                     }
-                 }
-             ],
-             "type": "histogram",
-             "metadata": {
-                 "label": "ODL nr of tests/failures",
-                 "test_family": "Controller"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.success_percentage"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "ODL success percentage",
-                 "test_family": "Controller"
-             }
-         }
-     ]
-     ),
-
-    ('functest', 'onos',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.FUNCvirNet.duration"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "ONOS FUNCvirNet duration",
-                 "test_family": "Controller"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.FUNCvirNet.tests"
-                     }
-                 },
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.FUNCvirNet.failures"
-                     }
-                 }
-             ],
-             "type": "histogram",
-             "metadata": {
-                 "label": "ONOS FUNCvirNet nr of tests/failures",
-                 "test_family": "Controller"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.FUNCvirNetL3.duration"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "ONOS FUNCvirNetL3 duration",
-                 "test_family": "Controller"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.FUNCvirNetL3.tests"
-                     }
-                 },
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.FUNCvirNetL3.failures"
-                     }
-                 }
-             ],
-             "type": "histogram",
-             "metadata": {
-                 "label": "ONOS FUNCvirNetL3 nr of tests/failures",
-                 "test_family": "Controller"
-             }
-         }
-     ]
-     ),
-
-    ('functest', 'vims',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.sig_test.tests"
-                     }
-                 },
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.sig_test.failures"
-                     }
-                 },
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.sig_test.passed"
-                     }
-                 },
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.sig_test.skipped"
-                     }
-                 }
-             ],
-             "type": "histogram",
-             "metadata": {
-                 "label": "vIMS nr of tests/failures/passed/skipped",
-                 "test_family": "Features"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.vIMS.duration"
-                     }
-                 },
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.orchestrator.duration"
-                     }
-                 },
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.sig_test.duration"
-                     }
-                 }
-             ],
-             "type": "histogram",
-             "metadata": {
-                 "label": "vIMS/ochestrator/test duration",
-                 "test_family": "Features"
-             }
-         }
-     ]
-     ),
-
-    ('promise', 'promise',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.duration"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "promise duration",
-                 "test_family": "Features"
-             }
-         },
-
-         {
-             "metrics": [
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.tests"
-                     }
-                 },
-                 {
-                     "type": "sum",
-                     "params": {
-                         "field": "details.failures"
-                     }
-                 }
-             ],
-             "type": "histogram",
-             "metadata": {
-                 "label": "promise nr of tests/failures",
-                 "test_family": "Features"
-             }
-         }
-     ]
-     ),
-
-    ('doctor', 'doctor-notification',
-     [
-         {
-             "metrics": [
-                 {
-                     "type": "avg",
-                     "params": {
-                         "field": "details.duration"
-                     }
-                 }
-             ],
-             "type": "line",
-             "metadata": {
-                 "label": "doctor-notification duration",
-                 "test_family": "Features"
-             }
-         }
-     ]
-     )
-]
-
 
 class KibanaDashboard(dict):
-    def __init__(self, project_name, case_name, installer, pod, scenarios, visualization_detail):
+    def __init__(self, project_name, case_name, family, installer, pod, scenarios, visualization):
         super(KibanaDashboard, self).__init__()
         self.project_name = project_name
         self.case_name = case_name
+        self.family = family
         self.installer = installer
         self.pod = pod
         self.scenarios = scenarios
-        self.visualization_detail = visualization_detail
+        self.visualization = visualization
         self._visualization_title = None
         self._kibana_visualizations = []
         self._kibana_dashboard = None
@@ -439,7 +40,7 @@ class KibanaDashboard(dict):
                                                                    self.installer,
                                                                    self.pod,
                                                                    scenario,
-                                                                   self.visualization_detail))
+                                                                   self.visualization))
 
         self._visualization_title = self._kibana_visualizations[0].vis_state_title
 
@@ -512,7 +113,15 @@ class KibanaDashboard(dict):
             },
                 separators=(',', ':'))
         }
-        self['metadata'] = self.visualization_detail['metadata']
+
+        label = self.case_name
+        if 'label' in self.visualization:
+            label += " %s" % self.visualization.get('label')
+        label += " %s" % self.visualization.get('name')
+        self['metadata'] = {
+            "label": label,
+            "test_family": self.family
+        }
 
     def _publish(self):
         url = urlparse.urljoin(base_elastic_url, '/.kibana/dashboard/{}'.format(self.id))
@@ -546,58 +155,21 @@ class KibanaSearchSourceJSON(dict):
 
 
 class VisualizationState(dict):
-    def __init__(self, input_dict):
-        """
-        dict structure:
-            {
-            "metrics":
-                [
-                    {
-                        "type": type,           # default sum
-                        "params": {
-                            "field": field      # mandatory, no default
-                    },
-                    {metric2}
-                ],
-            "segments":
-                [
-                    {
-                        "type": type,           # default date_histogram
-                        "params": {
-                            "field": field      # default start_date
-                    },
-                    {segment2}
-                ],
-            "type": type,                       # default area
-            "mode": mode,                       # default grouped for type 'histogram', stacked for other types
-            "metadata": {
-                    "label": "tempest_smoke_serial duration",# mandatory, no default
-                    "test_family": "VIM"        # mandatory, no default
-                }
-            }
-
-        default modes:
-            type histogram: grouped
-            type area: stacked
-
-        :param input_dict:
-        :return:
-        """
+    def __init__(self, visualization):
         super(VisualizationState, self).__init__()
-        metrics = input_dict['metrics']
-        segments = [] if 'segments' not in input_dict else input_dict['segments']
-
-        graph_type = 'area' if 'type' not in input_dict else input_dict['type']
-        self['type'] = graph_type
-
-        if 'mode' not in input_dict:
-            if graph_type == 'histogram':
-                mode = 'grouped'
-            else:
-                # default
-                mode = 'stacked'
+        name = visualization.get('name')
+        fields = visualization.get('fields')
+
+        if name == 'tests_failures':
+            mode = 'grouped'
+            metric_type = 'sum'
+            self['type'] = 'histogram'
         else:
-            mode = input_dict['mode']
+            # duration or success_percentage
+            mode = 'stacked'
+            metric_type = 'avg'
+            self['type'] = 'line'
+
         self['params'] = {
             "shareYAxis": True,
             "addTooltip": True,
@@ -616,35 +188,18 @@ class VisualizationState(dict):
         self['aggs'] = []
 
         i = 1
-        for metric in metrics:
+        for field in fields:
             self['aggs'].append({
                 "id": str(i),
-                "type": 'sum' if 'type' not in metric else metric['type'],
+                "type": metric_type,
                 "schema": "metric",
                 "params": {
-                    "field": metric['params']['field']
+                    "field": field.get('field')
                 }
             })
             i += 1
 
-        if len(segments) > 0:
-            for segment in segments:
-                self['aggs'].append({
-                    "id": str(i),
-                    "type": 'date_histogram' if 'type' not in segment else segment['type'],
-                    "schema": "metric",
-                    "params": {
-                        "field": "start_date" if ('params' not in segment or 'field' not in segment['params'])
-                        else segment['params']['field'],
-                        "interval": "auto",
-                        "customInterval": "2h",
-                        "min_doc_count": 1,
-                        "extended_bounds": {}
-                    }
-                })
-                i += 1
-        else:
-            self['aggs'].append({
+        self['aggs'].append({
                 "id": str(i),
                 "type": 'date_histogram',
                 "schema": "segment",
@@ -663,7 +218,7 @@ class VisualizationState(dict):
 
 
 class KibanaVisualization(dict):
-    def __init__(self, project_name, case_name, installer, pod, scenario, detail):
+    def __init__(self, project_name, case_name, installer, pod, scenario, visualization):
         """
         We need two things
         1. filter created from
@@ -679,7 +234,7 @@ class KibanaVisualization(dict):
         :return:
         """
         super(KibanaVisualization, self).__init__()
-        vis_state = VisualizationState(detail)
+        vis_state = VisualizationState(visualization)
         self.vis_state_title = vis_state['title']
         self['title'] = '{} {} {} {} {} {}'.format(project_name,
                                                    case_name,
@@ -752,13 +307,25 @@ def construct_dashboards():
     :return: list of KibanaDashboards
     """
     kibana_dashboards = []
-    for project_name, case_name, visualization_details in _testcases:
-        for installer in _installers:
-            pods_and_scenarios = _get_pods_and_scenarios(project_name, case_name, installer)
-            for visualization_detail in visualization_details:
-                for pod, scenarios in pods_and_scenarios.iteritems():
-                    kibana_dashboards.append(KibanaDashboard(project_name, case_name, installer, pod, scenarios,
-                                                             visualization_detail))
+    with open('./testcases.yaml') as f:
+        testcases_yaml = yaml.safe_load(f)
+
+    for project, case_dicts in testcases_yaml.items():
+        for case in case_dicts:
+            case_name = case.get('name')
+            visualizations = case.get('visualizations')
+            family = case.get('test_family')
+            for installer in _installers:
+                pods_and_scenarios = _get_pods_and_scenarios(project, case_name, installer)
+                for visualization in visualizations:
+                    for pod, scenarios in pods_and_scenarios.iteritems():
+                        kibana_dashboards.append(KibanaDashboard(project,
+                                                                 case_name,
+                                                                 family,
+                                                                 installer,
+                                                                 pod,
+                                                                 scenarios,
+                                                                 visualization))
     return kibana_dashboards
 
 
@@ -821,4 +388,3 @@ if __name__ == '__main__':
 
     if generate_inputs:
         generate_js_inputs(input_file_path, kibana_url, dashboards)
-
index 2ffbc17..ded58ef 100644 (file)
@@ -1,13 +1,16 @@
 #! /usr/bin/env python
-import logging
-import argparse
-import shared_utils
+import datetime
 import json
-import urlparse
-import uuid
+import logging
 import os
 import subprocess
-import datetime
+import traceback
+import urlparse
+import uuid
+
+import argparse
+
+import shared_utils
 
 logger = logging.getLogger('mongo_to_elasticsearch')
 logger.setLevel(logging.DEBUG)
@@ -370,18 +373,21 @@ def modify_mongo_entry(testcase):
         project = testcase['project_name']
         case_name = testcase['case_name']
         logger.info("Processing mongo test case '{}'".format(case_name))
-        if project == 'functest':
-            if case_name == 'rally_sanity':
-                return modify_functest_rally(testcase)
-            elif case_name.lower() == 'odl':
-                return modify_functest_odl(testcase)
-            elif case_name.lower() == 'onos':
-                return modify_functest_onos(testcase)
-            elif case_name.lower() == 'vims':
-                return modify_functest_vims(testcase)
-            elif case_name == 'tempest_smoke_serial':
-                return modify_functest_tempest(testcase)
-        return modify_default_entry(testcase)
+        try:
+            if project == 'functest':
+                if case_name == 'rally_sanity':
+                    return modify_functest_rally(testcase)
+                elif case_name.lower() == 'odl':
+                    return modify_functest_odl(testcase)
+                elif case_name.lower() == 'onos':
+                    return modify_functest_onos(testcase)
+                elif case_name.lower() == 'vims':
+                    return modify_functest_vims(testcase)
+                elif case_name == 'tempest_smoke_serial':
+                    return modify_functest_tempest(testcase)
+            return modify_default_entry(testcase)
+        except Exception:
+            logger.error("Fail in modify testcase[%s]\nerror message: %s" % (testcase, traceback.format_exc()))
     else:
         return False
 
diff --git a/utils/test/scripts/testcases.yaml b/utils/test/scripts/testcases.yaml
new file mode 100644 (file)
index 0000000..12031ef
--- /dev/null
@@ -0,0 +1,129 @@
+functest:
+    -
+        name: tempest_smoke_serial
+        test_family: VIM
+        visualizations:
+            -
+                name: duration
+                fields:
+                    - field: details.duration
+            -
+                name: tests_failures
+                fields:
+                    - field: details.tests
+                    - field: details.failures
+            -
+                name: success_percentage
+                fields:
+                    - field: details.success_percentage
+    -
+        name: rally_sanity
+        test_family: VIM
+        visualizations:
+            -
+                name: duration
+                fields:
+                    - field: details.duration
+            -
+                name: tests_failures
+                fields:
+                    - field: details.tests
+            -
+                name: success_percentage
+                fields:
+                    - field: details.success_percentage
+    -
+        name: vping_ssh
+        test_family: VIM
+        visualizations:
+            -
+                name: duration
+                fields:
+                    - field: details.duration
+    -
+        name: vping_userdata
+        test_family: VIM
+        visualizations:
+            -
+                name: duration
+                fields:
+                    - field: details.duration
+    -
+        name: odl
+        test_family: Controller
+        visualizations:
+            -
+                name: tests_failures
+                fields:
+                    - field: details.tests
+                    - field: details.failures
+            -
+                name: success_percentage
+                fields:
+                    - field: details.success_percentage
+    -
+        name: onos
+        test_family: Controller
+        visualizations:
+            -
+                name: duration
+                label: FUNCvirNet
+                fields:
+                    - field: details.FUNCvirNet.duration
+            -
+                name: duration
+                label: FUNCvirNetL3
+                fields:
+                    - field: details.FUNCvirNetL3.duration
+            -
+                name: tests_failures
+                label: FUNCvirNet
+                fields:
+                    - field: details.FUNCvirNet.tests
+                    - field: details.FUNCvirNet.failures
+            -
+                name: tests_failures
+                label: FUNCvirNetL3
+                fields:
+                    - field: details.FUNCvirNetL3.tests
+                    - field: details.FUNCvirNetL3.failures
+    -
+        name: vims
+        test_family: Features
+        visualizations:
+            -
+                name: duration
+                fields:
+                    - field: details.vIMS.duration
+                    - field: details.orchestrator.duration
+                    - field: details.sig_test.duration
+            -
+                name: tests_failures
+                fields:
+                    - field: details.sig_test.tests
+                    - field: details.sig_test.failures
+                    - field: details.sig_test.passed
+                    - field: details.sig_test.skipped
+promise:
+    -
+        name: promise
+        test_family: Features
+        visualizations:
+            -
+                name: duration
+                fields:
+                    - field: details.duration
+            -
+                name: tests_failures
+                fields:
+                    - field: details.tests
+                    - field: details.failures
+doctor:
+    -
+        name: doctor-notification
+        test_family: Features
+        visualizations:
+            -
+                name: duration
+                fields:
+                    - field: details.duration