Merge "Add check for Requiretty add timeout"
authorAric Gardner <agardner@linuxfoundation.org>
Thu, 7 Jul 2016 13:38:26 +0000 (13:38 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Thu, 7 Jul 2016 13:38:26 +0000 (13:38 +0000)
47 files changed:
jjb/apex/apex-upload-artifact.sh
jjb/apex/apex.yml
jjb/armband/armband-ci-jobs.yml
jjb/armband/armband-project-jobs.yml
jjb/bottlenecks/bottlenecks-project-jobs.yml
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-project-jobs.yml
jjb/cperf/cperf-ci-jobs.yml
jjb/domino/domino.yml [new file with mode: 0644]
jjb/fastpathmetrics/fastpathmetrics.yml
jjb/fuel/fuel-ci-jobs.yml
jjb/fuel/fuel-project-jobs.yml
jjb/functest/functest-ci-jobs.yml
jjb/functest/functest-loop.sh [moved from jjb/functest/functest-daily.sh with 100% similarity]
jjb/functest/set-functest-env.sh
jjb/joid/joid-ci-jobs.yml
jjb/moon/moon-verify.sh [new file with mode: 0755]
jjb/moon/moon.yml [new file with mode: 0644]
jjb/opnfv/artifact-cleanup.yml [new file with mode: 0644]
jjb/opnfv/installer-params.yml
jjb/opnfv/slave-params.yml
jjb/opnfv/test-sign.yml [new file with mode: 0644]
jjb/sandbox/sandbox-verify-jobs.yml [new file with mode: 0644]
jjb/vswitchperf/vswitchperf.yml
jjb/yardstick/yardstick-ci-jobs.yml
utils/gpg_import_key.sh [new file with mode: 0644]
utils/push-test-logs.sh
utils/retention_script.sh [new file with mode: 0644]
utils/test-sign-artifact.sh [new file with mode: 0755]
utils/test/reporting/functest/reporting-status.py
utils/test/reporting/functest/reporting-tempest.py
utils/test/reporting/functest/reporting-vims.py
utils/test/reporting/functest/reportingConf.py
utils/test/reporting/functest/reportingUtils.py
utils/test/reporting/functest/template/index-status-tmpl.html
utils/test/reporting/functest/testCase.py
utils/test/result_collection_api/docker/Dockerfile [new file with mode: 0644]
utils/test/result_collection_api/docker/prepare-env.sh [new file with mode: 0755]
utils/test/result_collection_api/docker/start-server.sh [new file with mode: 0755]
utils/test/result_collection_api/etc/config.ini
utils/test/result_collection_api/opnfv_testapi/resources/handlers.py
utils/test/result_collection_api/opnfv_testapi/resources/result_handlers.py
utils/test/result_collection_api/opnfv_testapi/resources/result_models.py
utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py
utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py
utils/test/result_collection_api/opnfv_testapi/tests/unit/test_project.py
utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py

index 0598f56..d45c7c0 100755 (executable)
@@ -11,6 +11,38 @@ echo
 # source the opnfv.properties to get ARTIFACT_VERSION
 source $WORKSPACE/opnfv.properties
 
+# clone releng repository
+echo "Cloning releng repository..."
+[ -d releng ] && rm -rf releng
+git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null
+#this is where we import the siging key
+if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then 
+  source $WORKSPACE/releng/utils/gpg_import_key.sh
+fi
+
+signrpm () {
+for artifact in $RPM_LIST $SRPM_LIST; do
+  echo "Signing artifact: ${artifact}"
+  gpg2 -vvv --batch --yes --no-tty \
+    --default-key opnfv-helpdesk@rt.linuxfoundation.org \
+    --passphrase besteffort \
+    --detach-sig $artifact
+    gsutil cp "$artifact".sig gs://$GS_URL/$(basename "$artifact".sig)
+    echo "Upload complete for ${artifact} signature"
+done
+}
+
+signiso () {
+time gpg2 -vvv --batch --yes --no-tty \
+  --default-key opnfv-helpdesk@rt.linuxfoundation.org  \
+  --passphrase notreallysecure \
+  --detach-sig $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso
+
+gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso.sig gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso.sig 
+echo "ISO signature Upload Complete!"
+}
+
+uploadiso () {
 # upload artifact and additional files to google storage
 gsutil cp $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso > gsutil.iso.log
 echo "ISO Upload Complete!"
@@ -26,7 +58,10 @@ VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
 for pkg in common undercloud opendaylight-sfc onos; do
     SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
 done
+}
 
+uploadrpm () {
+#This is where we upload the rpms
 for artifact in $RPM_LIST $SRPM_LIST; do
   echo "Uploading artifact: ${artifact}"
   gsutil cp $artifact gs://$GS_URL/$(basename $artifact) > gsutil.iso.log
@@ -34,6 +69,18 @@ for artifact in $RPM_LIST $SRPM_LIST; do
 done
 gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.properties > gsutil.properties.log
 gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log
+}
+
+if gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
+  echo "Signing Key avaliable"
+  signiso
+  uploadiso
+  signrpm
+  uploadrpm
+else
+  uploadiso
+  uploadrpm
+fi
 
 echo
 echo "--------------------------------------------------------"
index 6c14a81..3ba8842 100644 (file)
         - throttle:
             max-per-node: 1
             max-total: 10
+            option: 'project'
 
     builders:
         - 'apex-unit-test'
             git-revision: false
             block: true
             same-node: true
-#        - trigger-builds:
-#          - project: 'functest-apex-{verify-slave}-suite-{stream1}'
-#            predefined-parameters: |
-#              DEPLOY_SCENARIO=os-nosdn-nofeature-ha
-#              FUNCTEST_SUITE_NAME=vping_userdata
-#            block: true
-#            same-node: true
+        - trigger-builds:
+          - project: 'functest-apex-{verify-slave}-suite-{stream1}'
+            predefined-parameters: |
+              DEPLOY_SCENARIO=os-nosdn-nofeature-ha
+              FUNCTEST_SUITE_NAME=healthcheck
+            block: true
+            same-node: true
         - trigger-builds:
           - project: 'apex-deploy-virtual-os-odl_l2-nofeature-ha-{stream1}'
             predefined-parameters: |
             git-revision: false
             block: true
             same-node: true
+        - trigger-builds:
+          - project: 'functest-apex-{verify-slave}-suite-{stream1}'
+            predefined-parameters: |
+              DEPLOY_SCENARIO=os-odl_l2-nofeature-ha
+              FUNCTEST_SUITE_NAME=healthcheck
+            block: true
+            same-node: true
         - 'apex-workspace-cleanup'
 
 - job-template:
         - throttle:
             max-per-node: 1
             max-total: 10
+            option: 'project'
 
     builders:
         - 'apex-build'
         - throttle:
             max-per-node: 1
             max-total: 10
+            option: 'project'
 
     builders:
         - 'apex-deploy'
                 build-step-failure-threshold: 'never'
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'apex-deploy-baremetal-os-onos-nofeature-ha-{stream1}'
+            predefined-parameters: |
+              BUILD_DIRECTORY=apex-build-{stream1}/build
+              OPNFV_CLEAN=yes
+            git-revision: true
+            same-node: true
+            block: true
+        - trigger-builds:
+          - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-onos-nofeature-ha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'yardstick-apex-{slave}-daily-{stream1}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-onos-nofeature-ha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
 
 - job-template:
     name: 'apex-gs-clean-{stream}'
index 7d39241..ba02af8 100644 (file)
@@ -23,6 +23,8 @@
     pod:
         - arm-pod1:
             <<: *brahmaputra
+        - arm-pod2:
+            <<: *brahmaputra
 #--------------------------------
 #        master
 #--------------------------------
 #--------------------------------
     scenario:
         # HA scenarios
+        - 'os-nosdn-nofeature-ha':
+            auto-trigger-name: 'daily-trigger-disabled'
         - 'os-odl_l2-nofeature-ha':
             auto-trigger-name: 'armband-{scenario}-{pod}-{stream}-trigger'
+        - 'os-odl_l3-nofeature-ha':
+            auto-trigger-name: 'daily-trigger-disabled'
+        - 'os-odl_l2-bgpvpn-ha':
+            auto-trigger-name: 'daily-trigger-disabled'
 
         # NOHA scenarios
         - 'os-odl_l2-nofeature-noha':
@@ -58,6 +66,7 @@
             enabled: true
             max-total: 1
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
             enabled: true
             max-total: 1
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
     name: 'armband-os-odl_l2-nofeature-ha-arm-pod1-brahmaputra-trigger'
     triggers:
         - timed: '0 18 * * *'
+#---------------------------------------------------------------
+# Enea Armband POD 2 Triggers running against brahmaputra branch
+#---------------------------------------------------------------
+- trigger:
+    name: 'armband-os-odl_l2-nofeature-ha-arm-pod2-brahmaputra-trigger'
+    triggers:
+        - timed: ''
index aa089e0..732a9ea 100644 (file)
@@ -71,6 +71,7 @@
             enabled: true
             max-total: 1
             max-per-node: 1
+            option: 'project'
 
     parameters:
         - project-parameter:
index a28e2a4..ea000d8 100644 (file)
             enabled: true
             max-total: 1
             max-per-node: 1
+            option: 'project'
 
     parameters:
         - project-parameter:
index 2ef25a7..52d6785 100644 (file)
@@ -77,6 +77,7 @@
         - throttle:
             enabled: true
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
         - '{auto-trigger-name}'
 
     builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
         - trigger-builds:
             - project: 'compass-deploy-{pod}-daily-{stream}'
               current-parameters: true
         - throttle:
             enabled: true
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
 
 
     builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
         - shell:
             !include-raw-escape: ./compass-download-artifact.sh
         - shell:
         - choice:
             name: COMPASS_OPENSTACK_VERSION
             choices:
-                - 'liberty'
                 - 'mitaka'
+                - 'liberty'
 
 ########################
 # trigger macros
index cb5b2e5..da28687 100644 (file)
@@ -43,6 +43,7 @@
         - throttle:
             enabled: true
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
             - project: 'functest-{slave-label}-suite-{stream}'
               current-parameters: true
               predefined-parameters:
-                FUNCTEST_SUITE_NAME=vping_userdata
+                FUNCTEST_SUITE_NAME=healthcheck
               same-node: true
               block: true
               block-thresholds:
         - throttle:
             enabled: true
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
             enabled: true
             max-total: 1
             max-per-node: 1
+            option: 'project'
 
     parameters:
         - project-parameter:
             enabled: true
             max-total: 1
             max-per-node: 1
+            option: 'project'
 
     parameters:
         - project-parameter:
         - choice:
             name: COMPASS_OPENSTACK_VERSION
             choices:
-                - 'liberty'
                 - 'mitaka'
+                - 'liberty'
         - choice:
             name: COMPASS_OS_VERSION
             choices:
index f6a8aca..d6c8601 100644 (file)
@@ -45,6 +45,7 @@
         - throttle:
             enabled: true
             max-per-node: 1
+            option: 'project'
 
     wrappers:
         - build-name:
diff --git a/jjb/domino/domino.yml b/jjb/domino/domino.yml
new file mode 100644 (file)
index 0000000..29e171b
--- /dev/null
@@ -0,0 +1,55 @@
+- project:
+    name: domino
+
+    project: '{name}'
+
+    jobs:
+        - 'domino-verify-{stream}'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+
+- job-template:
+    name: 'domino-verify-{stream}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+        - gerrit-parameter:
+            branch: '{branch}'
+        - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+        - gerrit-trigger-scm:
+            credentials-id: '{ssh-credentials}'
+            refspec: '$GERRIT_REFSPEC'
+            choosing-strategy: 'gerrit'
+
+    triggers:
+        - gerrit:
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+                - draft-published-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+                forbidden-file-paths:
+                  - compare-type: ANT
+                    pattern: 'docs/**|.gitignore'
+
+    builders:
+        - shell: |
+            #!/bin/bash
+            ./tests/run.sh
index db06afc..ad1b601 100644 (file)
@@ -83,6 +83,7 @@
             enabled: true
             max-total: 3
             max-per-node: 2
+            option: 'project'
 
     parameters:
         - project-parameter:
             enabled: true
             max-total: 3
             max-per-node: 2
+            option: 'project'
 
     parameters:
         - project-parameter:
index 8c7e72f..de7ca6a 100644 (file)
@@ -86,6 +86,7 @@
             enabled: true
             max-total: 4
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
             gs-pathname: '{gs-pathname}'
 
     builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
         - trigger-builds:
             - project: 'fuel-deploy-{pod}-daily-{stream}'
               current-parameters: false
             enabled: true
             max-total: 4
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
             name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
 
     builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
         - shell:
             !include-raw-escape: ./fuel-download-artifact.sh
         - shell:
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-zte-pod1-daily-master-trigger'
     triggers:
-        - timed: '0 12 * * *'
+        - timed: '15 9 * * *'
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-ha-zte-pod1-daily-master-trigger'
     triggers:
index 4e59c01..67343fb 100644 (file)
@@ -38,6 +38,7 @@
             enabled: true
             max-total: 1
             max-per-node: 1
+            option: 'project'
 
     parameters:
         - project-parameter:
             enabled: true
             max-total: 2
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
         - throttle:
             enabled: true
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
index 27e1ee3..f9cf011 100644 (file)
 
     testsuite:
         - 'daily'
+        - 'weekly'
         - 'suite'
 
     jobs:
         - throttle:
             enabled: true
             max-per-node: 1
+            option: 'project'
 
     wrappers:
         - build-name:
             branch: '{branch}'
 
     builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
         - 'functest-{testsuite}-builder'
 
 ########################
         - string:
             name: FUNCTEST_SUITE_NAME
             default: 'daily'
-            description: "Suite name to run"
+            description: "Daily suite name to run"
+- parameter:
+    name: functest-weekly-parameter
+    parameters:
+        - string:
+            name: FUNCTEST_SUITE_NAME
+            default: 'weekly'
+            description: "Weekly suite name to run"
 - parameter:
     name: functest-suite-parameter
     parameters:
             name: FUNCTEST_SUITE_NAME
             choices:
                 - 'healthcheck'
-                - 'tempest'
-                - 'rally'
+                - 'vping_userdata'
+                - 'vping_ssh'
+                - 'tempest_smoke_serial'
+                - 'rally_sanity'
                 - 'odl'
                 - 'onos'
-                - 'ovno'
                 - 'promise'
                 - 'doctor'
+                - 'bgpvpn'
+                - 'security_scan'
+                - 'tempest_full_parallel'
+                - 'rally_full'
                 - 'vims'
-                - 'vping_userdata'
-                - 'vping_ssh'
 - parameter:
     name: functest-parameter
     parameters:
         - 'functest-daily'
         - 'functest-store-results'
 
+- builder:
+    name: functest-weekly-builder
+    builders:
+        - 'functest-cleanup'
+        - 'set-functest-env'
+        - 'functest-weekly'
+        - 'functest-store-results'
+
 - builder:
     name: functest-suite-builder
     builders:
         - 'functest-suite'
 
 - builder:
-    name: functest-suite
+    name: functest-daily
     builders:
         - shell:
-            !include-raw: ./functest-suite.sh
+            !include-raw: ./functest-loop.sh
 
 - builder:
-    name: functest-daily
+    name: functest-weekly
     builders:
         - shell:
-            !include-raw: ./functest-daily.sh
+            !include-raw: ./functest-loop.sh
+
+- builder:
+    name: functest-suite
+    builders:
+        - shell:
+            !include-raw: ./functest-suite.sh
 
 - builder:
     name: set-functest-env
index 0b8747a..d2e232d 100755 (executable)
@@ -27,6 +27,7 @@ if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
     if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
         sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable
     fi
+
 elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
     # If production lab then creds may be retrieved dynamically
     # creds are on the jumphost, always in the same folder
@@ -34,6 +35,12 @@ elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
     # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
     # replace the default one by the customized one provided by jenkins config
 fi
+
+# Set iptables rule to allow forwarding return traffic for container
+if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FORWARD | awk 'NR==3' | grep RETURN 2> ${redirect}; then
+    sudo iptables -I FORWARD -j RETURN
+fi
+
 echo "Functest: Start Docker and prepare environment"
 envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
     -e NODE_NAME=${NODE_NAME} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} \
index f6f1229..a1e5677 100644 (file)
@@ -91,6 +91,7 @@
             enabled: true
             max-total: 4
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
             default: '{scenario}'
 
     builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
         - trigger-builds:
             - project: 'joid-deploy-{pod}-daily-{stream}'
               current-parameters: true
               same-node: true
               block: true
         - trigger-builds:
-            - project: 'functest-joid-{pod}-daily-{stream}'
+            - project: 'yardstick-joid-{pod}-daily-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
-              same-node: true
               block: true
+              same-node: true
               block-thresholds:
                 build-step-failure-threshold: 'never'
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
         - trigger-builds:
-            - project: 'yardstick-joid-{pod}-daily-{stream}'
+            - project: 'functest-joid-{pod}-daily-{stream}'
               current-parameters: false
               predefined-parameters:
                 DEPLOY_SCENARIO={scenario}
             enabled: true
             max-total: 4
             max-per-node: 1
+            option: 'project'
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
             branch: '{branch}'
 
     builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
         - 'builder-macro'
 ########################
 # builder macros
diff --git a/jjb/moon/moon-verify.sh b/jjb/moon/moon-verify.sh
new file mode 100755 (executable)
index 0000000..23bf47c
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "Hello World"
diff --git a/jjb/moon/moon.yml b/jjb/moon/moon.yml
new file mode 100644 (file)
index 0000000..0044eb9
--- /dev/null
@@ -0,0 +1,54 @@
+- project:
+    name: moon
+
+    project: '{name}'
+
+    jobs:
+        - 'moon-verify-{stream}'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+
+- job-template:
+    name: 'moon-verify-{stream}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+        - gerrit-parameter:
+            branch: '{branch}'
+        - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+        - gerrit-trigger-scm:
+            credentials-id: '{ssh-credentials}'
+            refspec: '$GERRIT_REFSPEC'
+            choosing-strategy: 'gerrit'
+
+    triggers:
+        - gerrit:
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+                - draft-published-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+                forbidden-file-paths:
+                  - compare-type: ANT
+                    pattern: 'docs/**|.gitignore'
+
+    builders:
+        - shell:
+            !include-raw: ./moon-verify.sh
diff --git a/jjb/opnfv/artifact-cleanup.yml b/jjb/opnfv/artifact-cleanup.yml
new file mode 100644 (file)
index 0000000..b0f8191
--- /dev/null
@@ -0,0 +1,42 @@
+- project:
+    name: artifact-cleanup
+
+    project: 'releng'
+
+    jobs:
+        - 'artifact-cleanup-daily-{stream}'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+
+
+- job-template:
+    name: 'artifact-cleanup-daily-{stream}'
+
+    # Job template for daily builders
+    #
+    # Required Variables:
+    #     stream:    branch with - in place of / (eg. stable)
+    #     branch:    branch (eg. stable)
+    node: master
+
+    disabled: false
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+
+    scm:
+        - git-scm:
+            credentials-id: '{ssh-credentials}'
+            refspec: ''
+            branch: '{branch}'
+
+    triggers:
+        - timed: 'H H * * *'
+
+    builders:
+        - shell: |
+            $WORKSPACE/utils/retention_script.sh
index f95d79f..60fee92 100644 (file)
             name: CPU_ARCHITECTURE
             default: 'amd64'
             description: "CPU Architecture to use for Ubuntu distro "
+
+- parameter:
+    name: 'sandbox-defaults'
+    parameters:
+        - string:
+            name: INSTALLER_IP
+            default: '10.20.0.2'
+            description: 'IP of the installer'
+        - string:
+            name: INSTALLER_TYPE
+            default: sandbox
+            description: 'Installer used for deploying OPNFV on this POD'
+        - string:
+            name: EXTERNAL_NETWORK
+            default: 'admin_floating_net'
+            description: 'external network for test'
index da0808b..e5313c8 100644 (file)
             name: LAB_CONFIG_URL
             default: ssh://git@git.enea.com/pharos/lab-config
             description: 'Base URI to the configuration directory'
+
+- parameter:
+    name: 'arm-pod2-defaults'
+    parameters:
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - arm-pod2
+            default-slaves:
+                - arm-pod2
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: DEFAULT_BRIDGE
+            default: 'admin_br0,public_br0'
+            desciption: 'The bridge to use for Fuel PXE booting. It can be a comma sparated list of bridges, in which case the first is the PXE boot bridge, and all subsequent interfaces that will be added to the VM. If left empty, most deploy scripts will default to pxebr.'
+        - string:
+            name: DEPLOY_TIMEOUT
+            default: '360'
+            description: 'Deployment timeout in minutes'
+        - string:
+            name: LAB_CONFIG_URL
+            default: ssh://git@git.enea.com/pharos/lab-config
+            description: 'Base URI to the configuration directory'
+
 - parameter:
     name: 'opnfv-build-centos-defaults'
     parameters:
diff --git a/jjb/opnfv/test-sign.yml b/jjb/opnfv/test-sign.yml
new file mode 100644 (file)
index 0000000..b27d757
--- /dev/null
@@ -0,0 +1,42 @@
+- project:
+    name: test-sign
+
+    project: 'releng'
+
+    jobs:
+        - 'test-sign-daily-{stream}'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+
+
+- job-template:
+    name: 'test-sign-daily-{stream}'
+
+    # Job template for daily builders
+    #
+    # Required Variables:
+    #     stream:    branch with - in place of / (eg. stable)
+    #     branch:    branch (eg. stable)
+    node: master
+
+    disabled: false
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+
+    scm:
+        - git-scm:
+            credentials-id: '{ssh-credentials}'
+            refspec: ''
+            branch: '{branch}'
+
+    triggers:
+        - timed: 'H H * * *'
+
+    builders:
+        - shell: |
+            $WORKSPACE/utils/test-sign-artifact.sh
diff --git a/jjb/sandbox/sandbox-verify-jobs.yml b/jjb/sandbox/sandbox-verify-jobs.yml
new file mode 100644 (file)
index 0000000..b1e8e93
--- /dev/null
@@ -0,0 +1,280 @@
+- project:
+    name: 'sandbox-verify-jobs'
+
+    project: 'sandbox'
+
+    installer: 'sandbox'
+
+    stream:
+        - master:
+            branch: '{stream}'
+            gs-pathname: ''
+
+# what are the verification activities we do for this installer
+    activity:
+        - 'basic'
+        - 'build'
+        - 'deploy'
+        - 'test'
+
+    jobs:
+        - 'sandbox-verify-{stream}'
+        - 'sandbox-verify-{activity}-{stream}'
+
+- job-template:
+    name: 'sandbox-verify-{stream}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+        - gerrit-parameter:
+            branch: '{branch}'
+        - 'opnfv-build-ubuntu-defaults'
+
+    wrappers:
+        - ssh-agent-credentials:
+            users:
+                - '{ssh-credentials}'
+        - timeout:
+            timeout: 360
+            fail: true
+
+    triggers:
+        - gerrit:
+            trigger-on:
+                - patchset-created-event:
+                    exclude-drafts: 'false'
+                    exclude-trivial-rebase: 'false'
+                    exclude-no-code-change: 'false'
+                - draft-published-event
+                - comment-added-contains-event:
+                    comment-contains-value: 'recheck'
+                - comment-added-contains-event:
+                    comment-contains-value: 'reverify'
+            projects:
+              - project-compare-type: 'ANT'
+                project-pattern: '{project}'
+                branches:
+                  - branch-compare-type: 'ANT'
+                    branch-pattern: '**/{branch}'
+                forbidden-file-paths:
+                  - compare-type: ANT
+                    pattern: 'docs/**|.gitignore'
+            readable-message: true
+
+    builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
+        - '{project}-verify-builder'
+        - trigger-builds:
+            - project: 'sandbox-verify-basic-{stream}'
+              block: true
+        - trigger-builds:
+            - project: 'sandbox-verify-build-{stream}'
+              block: true
+        - trigger-builds:
+            - project: 'sandbox-verify-deploy-{stream}'
+              block: true
+        - trigger-builds:
+            - project: 'sandbox-verify-test-{stream}'
+              block: true
+
+- job-template:
+    name: 'sandbox-verify-{activity}-{stream}'
+
+    scm:
+        - gerrit-trigger-scm:
+            credentials-id: '{ssh-credentials}'
+            refspec: '$GERRIT_REFSPEC'
+            choosing-strategy: 'gerrit'
+
+    wrappers:
+        - ssh-agent-credentials:
+            users:
+                - '{ssh-credentials}'
+        - timeout:
+            timeout: 360
+            fail: true
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+        - gerrit-parameter:
+            branch: '{branch}'
+        - '{installer}-defaults'
+        - '{project}-verify-{activity}-parameter'
+
+    builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
+        - '{project}-verify-{activity}-builder'
+
+#####################################
+# parameter builders
+#####################################
+- parameter:
+    name: 'sandbox-verify-basic-parameter'
+    parameters:
+        - 'opnfv-build-ubuntu-defaults'
+
+- parameter:
+    name: 'sandbox-verify-build-parameter'
+    parameters:
+        - 'opnfv-build-ubuntu-defaults'
+
+- parameter:
+    name: 'sandbox-verify-deploy-parameter'
+    parameters:
+        - 'opnfv-build-centos-defaults'
+
+- parameter:
+    name: 'sandbox-verify-test-parameter'
+    parameters:
+        - 'opnfv-build-centos-defaults'
+#####################################
+# builder builders
+#####################################
+- builder:
+    name: 'sandbox-verify-builder'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            # this is the builder for the parent/upstream job which we do nothing
+            cd $WORKSPACE
+            echo
+            echo "Commit Message is"
+            echo "-------------------------------------"
+            echo $GERRIT_CHANGE_COMMIT_MESSAGE
+            echo "-------------------------------------"
+            echo
+            echo "Repo contents"
+            echo "-------------------------------------"
+            ls -al
+            echo "-------------------------------------"
+            echo
+            echo "Changed files are"
+            echo "-------------------------------------"
+            git diff origin/master --name-only
+            echo "-------------------------------------"
+            echo
+            echo "Change introduced"
+            echo "-------------------------------------"
+            git diff origin/master
+            echo "-------------------------------------"
+
+- builder:
+    name: 'sandbox-verify-basic-builder'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            # this is where we check the commit message, unit test, etc.
+            cd $WORKSPACE
+            echo
+            echo "Commit Message is"
+            echo "-------------------------------------"
+            echo $GERRIT_CHANGE_COMMIT_MESSAGE
+            echo "-------------------------------------"
+            echo
+            echo "Repo contents"
+            echo "-------------------------------------"
+            ls -al
+            echo "-------------------------------------"
+            echo
+            echo "Changed files are"
+            echo "-------------------------------------"
+            git diff origin/master --name-only
+            echo "-------------------------------------"
+            echo
+            echo "Change introduced"
+            echo "-------------------------------------"
+            git diff origin/master
+            echo "-------------------------------------"
+
+- builder:
+    name: 'sandbox-verify-build-builder'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            # this is where we do the build
+            cd $WORKSPACE
+            echo
+            echo "Commit Message is"
+            echo "-------------------------------------"
+            echo $GERRIT_CHANGE_COMMIT_MESSAGE
+            echo "-------------------------------------"
+            echo
+            echo "Repo contents"
+            echo "-------------------------------------"
+            ls -al
+            echo "-------------------------------------"
+            echo
+            echo "Changed files are"
+            echo "-------------------------------------"
+            git diff origin/master --name-only
+            echo "-------------------------------------"
+            echo
+            echo "Change introduced"
+            echo "-------------------------------------"
+            git diff origin/master
+            echo "-------------------------------------"
+
+- builder:
+    name: 'sandbox-verify-deploy-builder'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            # this is where we start the virtual deployment
+            cd $WORKSPACE
+            echo
+            echo "Commit Message is"
+            echo "-------------------------------------"
+            echo $GERRIT_CHANGE_COMMIT_MESSAGE
+            echo "-------------------------------------"
+            echo
+            echo "Repo contents"
+            echo "-------------------------------------"
+            ls -al
+            echo "-------------------------------------"
+            echo
+            echo "Changed files are"
+            echo "-------------------------------------"
+            git diff origin/master --name-only
+            echo "-------------------------------------"
+            echo
+            echo "Change introduced"
+            echo "-------------------------------------"
+            git diff origin/master
+            echo "-------------------------------------"
+
+- builder:
+    name: 'sandbox-verify-test-builder'
+    builders:
+        - shell: |
+            #!/bin/bash
+
+            # this is where we do functest smoketest
+            cd $WORKSPACE
+            echo
+            echo "Commit Message is"
+            echo "-------------------------------------"
+            echo $GERRIT_CHANGE_COMMIT_MESSAGE
+            echo "-------------------------------------"
+            echo
+            echo "Repo contents"
+            echo "-------------------------------------"
+            ls -al
+            echo "-------------------------------------"
+            echo
+            echo "Changed files are"
+            echo "-------------------------------------"
+            git diff origin/master --name-only
+            echo "-------------------------------------"
+            echo
+            echo "Change introduced"
+            echo "-------------------------------------"
+            git diff origin/master
+            echo "-------------------------------------"
index 0cf7f97..bf2fe8f 100644 (file)
@@ -59,6 +59,7 @@
             enabled: true
             max-total: 3
             max-per-node: 2
+            option: 'project'
 
     parameters:
         - project-parameter:
             enabled: true
             max-total: 3
             max-per-node: 2
+            option: 'project'
 
     parameters:
         - project-parameter:
index 48c335a..8b8ced1 100644 (file)
         - throttle:
             enabled: true
             max-per-node: 1
+            option: 'project'
 
     wrappers:
         - build-name:
             branch: '{branch}'
 
     builders:
+        - description-setter:
+            description: "POD: $NODE_NAME"
         - 'yardstick-cleanup'
         #- 'yardstick-fetch-os-creds'
         - 'yardstick-{testsuite}'
diff --git a/utils/gpg_import_key.sh b/utils/gpg_import_key.sh
new file mode 100644 (file)
index 0000000..3afeda8
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/bash -e
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 NEC and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+function isinstalled {
+if rpm -q "$@" >/dev/null 2>&1; then
+  true
+    else
+      echo installing "$1"
+      sudo yum install "$1"
+  false
+fi
+}
+
+if ! isinstalled gnupg2; then
+  echo "error with install"
+  exit 1
+fi
+
+if ! which gsutil;
+  then echo "error gsutil not installed";
+  exit 1
+fi
+
+if gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
+  echo "Key Already available"
+else
+  if [ -z "$NODE_NAME" ];
+    then echo "Cannot find node name"
+      exit 1
+    else echo "Importing key for '$NODE_NAME'";
+     gsutil cp gs://opnfv-signing-keys/"$NODE_NAME"-subkey .
+     gpg2 --import "$NODE_NAME"-subkey
+     rm -f "$NODE_NAME"-subkey
+   fi
+fi
index 7456450..964b419 100644 (file)
@@ -17,13 +17,13 @@ res_build_date=${1:-$(date -u +"%Y-%m-%d_%H-%M-%S")}
 project=$PROJECT
 branch=${GIT_BRANCH##*/}
 testbed=$NODE_NAME
-dir_result="${HOME}/opnfv/$project/results"
+dir_result="${HOME}/opnfv/$project/results/${branch}"
 # src: https://wiki.opnfv.org/display/INF/Hardware+Infrastructure
 # + intel-pod3 (vsperf)
 node_list=(\
 'lf-pod1' 'lf-pod2' 'intel-pod2' 'intel-pod3' \
 'intel-pod5' 'intel-pod6' 'intel-pod7' 'intel-pod8' \
-'ericsson-pod2' 'huawei-pod1')
+'ericsson-pod2' 'huawei-pod1' 'huawei-pod2' 'huawei-virtual1' 'huawei-virtual2' 'huawei-virtual3' 'huawei-virtual4')
 
 if [[ ! " ${node_list[@]} " =~ " ${testbed} " ]]; then
     echo "This is not a CI POD. Aborting pushing the logs to artifacts."
diff --git a/utils/retention_script.sh b/utils/retention_script.sh
new file mode 100644 (file)
index 0000000..9a8986c
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2016 The Linux Foundation and others
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##############################################################################
+
+#These are the only projects that generate artifacts
+for x in armband ovsnfv fuel apex compass4nfv
+do
+
+  echo "Looking at artifacts for project $x"
+
+  while IFS= read -r artifact; do
+
+    artifact_date="$(gsutil ls -L $artifact | grep "Creation time:" | awk '{print $4,$5,$6}')"
+    age=$(($(date +%s)-$(date -d"$artifact_date" +%s)))
+    daysold=$(($age/86400))
+
+    if [[ "$daysold" -gt "10" ]]; then
+      echo "$daysold Days old Deleting: $(basename $artifact)"
+    else
+      echo "$daysold Days old Retaining: $(basename $artifact)"
+    fi
+
+  done < <(gsutil ls gs://artifacts.opnfv.org/"$x" |grep -v "/$")
+done
diff --git a/utils/test-sign-artifact.sh b/utils/test-sign-artifact.sh
new file mode 100755 (executable)
index 0000000..f09b7f4
--- /dev/null
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+export PATH=$PATH:/usr/local/bin/
+
+# clone releng repository
+echo "Cloning releng repository..."
+[ -d releng ] && rm -rf releng
+git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null
+#this is where we import the siging key
+if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then 
+  source $WORKSPACE/releng/utils/gpg_import_key.sh
+fi
+
+artifact="foo"
+echo foo > foo
+
+testsign () {
+  echo "Signing artifact: ${artifact}"
+  gpg2 -vvv --batch \
+    --default-key opnfv-helpdesk@rt.linuxfoundation.org  \
+    --passphrase besteffort \
+    --detach-sig $artifact
+}
+
+testsign
+
index 2ce5efb..622c375 100644 (file)
@@ -8,7 +8,6 @@
 #
 import datetime
 import jinja2
-import os
 import requests
 import sys
 import time
@@ -19,7 +18,11 @@ import reportingConf as conf
 import testCase as tc
 import scenarioResult as sr
 
-testCases4Validation = []
+# Logger
+logger = utils.getLogger("Status")
+
+# Initialization
+testValid = []
 otherTestCases = []
 
 # init just tempest to get the list of scenarios
@@ -28,16 +31,16 @@ tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
 
 # Retrieve the Functest configuration to detect which tests are relevant
 # according to the installer, scenario
-# cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
-cf = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
+cf = conf.TEST_CONF
 response = requests.get(cf)
+
 functest_yaml_config = yaml.load(response.text)
 
-print "****************************************"
-print "*   Generating reporting.....          *"
-print ("*   Data retention = %s days           *" % conf.PERIOD)
-print "*                                      *"
-print "****************************************"
+logger.info("*******************************************")
+logger.info("*   Generating reporting scenario status  *")
+logger.info("*   Data retention = %s days              *" % conf.PERIOD)
+logger.info("*                                         *")
+logger.info("*******************************************")
 
 # Retrieve test cases of Tier 1 (smoke)
 config_tiers = functest_yaml_config.get("tiers")
@@ -50,19 +53,22 @@ config_tiers = functest_yaml_config.get("tiers")
 for tier in config_tiers:
     if tier['order'] > 0 and tier['order'] < 3:
         for case in tier['testcases']:
-            testCases4Validation.append(tc.TestCase(case['name'],
-                                                    "functest",
-                                                    case['dependencies']))
+            if case['name'] not in conf.blacklist:
+                testValid.append(tc.TestCase(case['name'],
+                                             "functest",
+                                             case['dependencies']))
     elif tier['order'] == 3:
         for case in tier['testcases']:
-            testCases4Validation.append(tc.TestCase(case['name'],
-                                                    case['name'],
-                                                    case['dependencies']))
+            if case['name'] not in conf.blacklist:
+                testValid.append(tc.TestCase(case['name'],
+                                             case['name'],
+                                             case['dependencies']))
     elif tier['order'] > 3:
         for case in tier['testcases']:
-            otherTestCases.append(tc.TestCase(case['name'],
-                                              "functest",
-                                              case['dependencies']))
+            if case['name'] not in conf.blacklist:
+                otherTestCases.append(tc.TestCase(case['name'],
+                                                  "functest",
+                                                  case['dependencies']))
 
 # For all the versions
 for version in conf.versions:
@@ -84,27 +90,32 @@ for version in conf.versions:
             # Check if test case is runnable / installer, scenario
             # for the test case used for Scenario validation
             try:
-                print ("---------------------------------")
-                print ("installer %s, version %s, scenario %s:" %
-                       (installer, version, s))
+                logger.info("---------------------------------")
+                logger.info("installer %s, version %s, scenario %s:" %
+                            (installer, version, s))
 
                 # 1) Manage the test cases for the scenario validation
                 # concretely Tiers 0-3
-                for test_case in testCases4Validation:
+                for test_case in testValid:
                     test_case.checkRunnable(installer, s,
                                             test_case.getConstraints())
-                    print ("testcase %s is %s" % (test_case.getName(),
-                                                  test_case.isRunnable))
+                    logger.debug("testcase %s is %s" %
+                                 (test_case.getDisplayName(),
+                                  test_case.isRunnable))
                     time.sleep(1)
                     if test_case.isRunnable:
                         dbName = test_case.getDbName()
                         name = test_case.getName()
+                        displayName = test_case.getDisplayName()
                         project = test_case.getProject()
                         nb_test_runnable_for_this_scenario += 1
-                        print (" Searching results for case %s " %
-                               (dbName))
+                        logger.info(" Searching results for case %s " %
+                                    (displayName))
                         result = utils.getResult(dbName, installer, s, version)
-                        print " >>>> Test result=" + str(result)
+                        # if no result set the value to 0
+                        if result < 0:
+                            result = 0
+                        logger.info(" >>>> Test score = " + str(result))
                         test_case.setCriteria(result)
                         test_case.setIsRunnable(True)
                         testCases2BeDisplayed.append(tc.TestCase(name,
@@ -120,30 +131,35 @@ for version in conf.versions:
                 for test_case in otherTestCases:
                     test_case.checkRunnable(installer, s,
                                             test_case.getConstraints())
-                    print ("testcase %s is %s" % (test_case.getName(),
-                                                  test_case.isRunnable))
+                    logger.info("testcase %s is %s" %
+                                (test_case.getName(), test_case.isRunnable))
                     time.sleep(1)
                     if test_case.isRunnable:
                         dbName = test_case.getDbName()
                         name = test_case.getName()
+                        displayName = test_case.getDisplayName()
                         project = test_case.getProject()
-                        print (" Searching results for case %s " %
-                               (dbName))
+                        logger.info(" Searching results for case %s " %
+                                    (displayName))
                         result = utils.getResult(dbName, installer, s, version)
-                        test_case.setCriteria(result)
-                        test_case.setIsRunnable(True)
-                        testCases2BeDisplayed.append(tc.TestCase(name,
-                                                                 project,
-                                                                 "",
-                                                                 result,
-                                                                 True,
-                                                                 4))
+                        # at least 1 result for the test
+                        if result > -1:
+                            test_case.setCriteria(result)
+                            test_case.setIsRunnable(True)
+                            testCases2BeDisplayed.append(tc.TestCase(name,
+                                                                     project,
+                                                                     "",
+                                                                     result,
+                                                                     True,
+                                                                     4))
+                        else:
+                            logger.debug("No results found")
 
                     items[s] = testCases2BeDisplayed
             except:
-                print ("Error: installer %s, version %s, scenario %s" %
-                       (installer, version, s))
-                print "No data available , error %s " % (sys.exc_info()[0])
+                logger.error("Error: installer %s, version %s, scenario %s" %
+                             (installer, version, s))
+                logger.error("No data available: %s " % (sys.exc_info()[0]))
 
             # **********************************************
             # Evaluate the results for scenario validation
@@ -158,13 +174,13 @@ for version in conf.versions:
             s_score = str(scenario_score) + "/" + str(scenario_criteria)
             s_status = "KO"
             if scenario_score < scenario_criteria:
-                print (">>>> scenario not OK, score = %s/%s" %
-                       (scenario_score, scenario_criteria))
+                logger.info(">>>> scenario not OK, score = %s/%s" %
+                            (scenario_score, scenario_criteria))
                 s_status = "KO"
             else:
-                print ">>>>> scenario OK, save the information"
+                logger.info(">>>>> scenario OK, save the information")
                 s_status = "OK"
-                path_validation_file = ("./release/" + version +
+                path_validation_file = (conf.REPORTING_PATH + "/release/" + version +
                                         "/validated_scenario_history.txt")
                 with open(path_validation_file, "a") as f:
                     time_format = "%Y-%m-%d %H:%M"
@@ -173,14 +189,12 @@ for version in conf.versions:
                     f.write(info)
 
             scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
-            print "--------------------------"
+            logger.info("--------------------------")
 
-        templateLoader = jinja2.FileSystemLoader(os.path.dirname
-                                                 (os.path.abspath
-                                                  (__file__)))
+        templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
         templateEnv = jinja2.Environment(loader=templateLoader)
 
-        TEMPLATE_FILE = "./template/index-status-tmpl.html"
+        TEMPLATE_FILE = "/template/index-status-tmpl.html"
         template = templateEnv.get_template(TEMPLATE_FILE)
 
         outputText = template.render(scenario_stats=scenario_stats,
@@ -190,6 +204,6 @@ for version in conf.versions:
                                      period=conf.PERIOD,
                                      version=version)
 
-        with open("./release/" + version +
+        with open(conf.REPORTING_PATH + "/release/" + version +
                   "/index-status-" + installer + ".html", "wb") as fh:
             fh.write(outputText)
index a065ef4..e3f4e33 100644 (file)
@@ -1,28 +1,44 @@
 from urllib2 import Request, urlopen, URLError
 import json
 import jinja2
-import os
+import reportingConf as conf
+import reportingUtils as utils
 
-installers = ["apex", "compass", "fuel", "joid"]
+installers = conf.installers
 items = ["tests", "Success rate", "duration"]
 
-PERIOD = 7
-print "Generate Tempest automatic reporting"
+PERIOD = conf.PERIOD
+criteria_nb_test = 165
+criteria_duration = 1800
+criteria_success_rate = 90
+
+logger = utils.getLogger("Tempest")
+logger.info("************************************************")
+logger.info("*   Generating reporting Tempest_smoke_serial  *")
+logger.info("*   Data retention = %s days                   *" % PERIOD)
+logger.info("*                                              *")
+logger.info("************************************************")
+
+logger.info("Success criteria:")
+logger.info("nb tests executed > %s s " % criteria_nb_test)
+logger.info("test duration < %s s " % criteria_duration)
+logger.info("success rate > %s " % criteria_success_rate)
+
 for installer in installers:
     # we consider the Tempest results of the last PERIOD days
-    url = "http://testresults.opnfv.org/test/api/v1/results?case=tempest_smoke_serial"
-    request = Request(url + '&period=' + str(PERIOD)
-                      + '&installer=' + installer + '&version=master')
-
+    url = conf.URL_BASE + "?case=tempest_smoke_serial"
+    request = Request(url + '&period=' + str(PERIOD) +
+                      '&installer=' + installer + '&version=master')
+    logger.info("Search tempest_smoke_serial results for installer %s"
+                % installer)
     try:
         response = urlopen(request)
         k = response.read()
         results = json.loads(k)
     except URLError, e:
-        print 'No kittez. Got an error code:', e
+        logger.error("Error code: %s" % e)
 
     test_results = results['results']
-    test_results.reverse()
 
     scenario_results = {}
     criteria = {}
@@ -48,8 +64,8 @@ for installer in installers:
             nb_tests_run = result['details']['tests']
             nb_tests_failed = result['details']['failures']
             if nb_tests_run != 0:
-                success_rate = 100*(int(nb_tests_run)
-                                    - int(nb_tests_failed))/int(nb_tests_run)
+                success_rate = 100*(int(nb_tests_run) -
+                                    int(nb_tests_failed)) / int(nb_tests_run)
             else:
                 success_rate = 0
 
@@ -63,40 +79,49 @@ for installer in installers:
             crit_time = False
 
             # Expect that at least 165 tests are run
-            if nb_tests_run >= 165:
+            if nb_tests_run >= criteria_nb_test:
                 crit_tests = True
 
             # Expect that at least 90% of success
-            if success_rate >= 90:
+            if success_rate >= criteria_success_rate:
                 crit_rate = True
 
             # Expect that the suite duration is inferior to 30m
-            if result['details']['duration'] < 1800:
+            if result['details']['duration'] < criteria_duration:
                 crit_time = True
 
             result['criteria'] = {'tests': crit_tests,
                                   'Success rate': crit_rate,
                                   'duration': crit_time}
-            # error management
+            try:
+                logger.debug("Scenario %s, Installer %s"
+                             % (s_result[1]['scenario'], installer))
+                logger.debug("Nb Test run: %s" % nb_tests_run)
+                logger.debug("Test duration: %s"
+                             % result['details']['duration'])
+                logger.debug("Success rate: %s" % success_rate)
+            except:
+                logger.error("Data format error")
+
+            # Error management
             # ****************
             try:
                 errors = result['details']['errors']
                 result['errors'] = errors.replace('{0}', '')
             except:
-                print "Error field not present (Brahamputra runs?)"
+                logger.error("Error field not present (Brahamputra runs?)")
 
-    mypath = os.path.abspath(__file__)
-    tplLoader = jinja2.FileSystemLoader(os.path.dirname(mypath))
-    templateEnv = jinja2.Environment(loader=tplLoader)
+    templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+    templateEnv = jinja2.Environment(loader=templateLoader)
 
-    TEMPLATE_FILE = "./template/index-tempest-tmpl.html"
+    TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
     template = templateEnv.get_template(TEMPLATE_FILE)
 
     outputText = template.render(scenario_results=scenario_results,
                                  items=items,
                                  installer=installer)
 
-    with open("./release/master/index-tempest-" +
+    with open(conf.REPORTING_PATH + "/release/master/index-tempest-" +
               installer + ".html", "wb") as fh:
         fh.write(outputText)
-print "Tempest automatic reporting Done"
+logger.info("Tempest automatic reporting succesfully generated.")
index 4033687..d0436ed 100644 (file)
@@ -1,7 +1,11 @@
 from urllib2 import Request, urlopen, URLError
 import json
 import jinja2
-import os
+import reportingConf as conf
+import reportingUtils as utils
+
+logger = utils.getLogger("vIMS")
+
 
 def sig_test_format(sig_test):
     nbPassed = 0
@@ -9,7 +13,7 @@ def sig_test_format(sig_test):
     nbSkipped = 0
     for data_test in sig_test:
         if data_test['result'] == "Passed":
-            nbPassed+= 1
+            nbPassed += 1
         elif data_test['result'] == "Failed":
             nbFailures += 1
         elif data_test['result'] == "Skipped":
@@ -20,21 +24,29 @@ def sig_test_format(sig_test):
     total_sig_test_result['skipped'] = nbSkipped
     return total_sig_test_result
 
-installers = ["fuel", "compass", "joid", "apex"]
-step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+logger.info("****************************************")
+logger.info("*   Generating reporting vIMS          *")
+logger.info("*   Data retention = %s days           *" % conf.PERIOD)
+logger.info("*                                      *")
+logger.info("****************************************")
 
+installers = conf.installers
+step_order = ["initialisation", "orchestrator", "vIMS", "sig_test"]
+logger.info("Start processing....")
 for installer in installers:
-    request = Request('http://testresults.opnfv.org/test/api/v1/results?case=vims&installer=' + installer)
+    logger.info("Search vIMS results for installer %s" % installer)
+    request = Request(conf.URL_BASE + '?case=vims&installer=' + installer)
 
     try:
         response = urlopen(request)
         k = response.read()
         results = json.loads(k)
     except URLError, e:
-        print 'No kittez. Got an error code:', e
+        logger.error("Error code: %s" % e)
 
     test_results = results['results']
-    test_results.reverse()
+
+    logger.debug("Results found: %s" % test_results)
 
     scenario_results = {}
     for r in test_results:
@@ -44,6 +56,7 @@ for installer in installers:
 
     for s, s_result in scenario_results.items():
         scenario_results[s] = s_result[0:5]
+        logger.debug("Search for success criteria")
         for result in scenario_results[s]:
             result["start_date"] = result["start_date"].split(".")[0]
             sig_test = result['details']['sig_test']['result']
@@ -67,17 +80,34 @@ for installer in installers:
             result['pr_step_ok'] = 0
             if nb_step != 0:
                 result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100
-
-
-    templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
-    templateEnv = jinja2.Environment( loader=templateLoader )
-
-    TEMPLATE_FILE = "./template/index-vims-tmpl.html"
-    template = templateEnv.get_template( TEMPLATE_FILE )
-
-    outputText = template.render( scenario_results = scenario_results, step_order = step_order, installer = installer)
-
-    with open("./release/master/index-vims-" + installer + ".html", "wb") as fh:
+            try:
+                logger.debug("Scenario %s, Installer %s"
+                             % (s_result[1]['scenario'], installer))
+                logger.debug("Orchestrator deployment: %s s"
+                             % result['details']['orchestrator']['duration'])
+                logger.debug("vIMS deployment: %s s"
+                             % result['details']['vIMS']['duration'])
+                logger.debug("Signaling testing: %s s"
+                             % result['details']['sig_test']['duration'])
+                logger.debug("Signaling testing results: %s"
+                             % format_result)
+            except:
+                logger.error("Data badly formatted")
+            logger.debug("------------------------------------------------")
+
+    templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+    templateEnv = jinja2.Environment(loader=templateLoader)
+
+    TEMPLATE_FILE = "/template/index-vims-tmpl.html"
+    template = templateEnv.get_template(TEMPLATE_FILE)
+
+    outputText = template.render(scenario_results=scenario_results,
+                                 step_order=step_order,
+                                 installer=installer)
+
+    with open(conf.REPORTING_PATH +
+              "/release/master/index-vims-" +
+              installer + ".html", "wb") as fh:
         fh.write(outputText)
 
-
+logger.info("vIMS report succesfully generated")
index 649246d..a58eeec 100644 (file)
 #
 # ****************************************************
 installers = ["apex", "compass", "fuel", "joid"]
-# installers = ["compass"]
+# installers = ["apex"]
+# list of test cases declared in testcases.yaml but that must not be
+# taken into account for the scoring
+blacklist = ["odl", "ovno", "security_scan", "copper", "moon"]
 # versions = ["brahmaputra", "master"]
 versions = ["master"]
 PERIOD = 10
 MAX_SCENARIO_CRITERIA = 18
+# get the last 5 test results to determinate the success criteria
+NB_TESTS = 5
+# REPORTING_PATH = "/usr/share/nginx/html/reporting/functest"
+REPORTING_PATH = "."
 URL_BASE = 'http://testresults.opnfv.org/test/api/v1/results'
+TEST_CONF = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
+LOG_LEVEL = "ERROR"
+LOG_FILE = REPORTING_PATH + "/reporting.log"
index 0db570f..5051ffa 100644 (file)
@@ -7,8 +7,26 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 #
 from urllib2 import Request, urlopen, URLError
+import logging
 import json
-import reportingConf
+import reportingConf as conf
+
+
+def getLogger(module):
+    logFormatter = logging.Formatter("%(asctime)s [" +
+                                     module +
+                                     "] [%(levelname)-5.5s]  %(message)s")
+    logger = logging.getLogger()
+
+    fileHandler = logging.FileHandler("{0}/{1}".format('.', conf.LOG_FILE))
+    fileHandler.setFormatter(logFormatter)
+    logger.addHandler(fileHandler)
+
+    consoleHandler = logging.StreamHandler()
+    consoleHandler.setFormatter(logFormatter)
+    logger.addHandler(consoleHandler)
+    logger.setLevel(conf.LOG_LEVEL)
+    return logger
 
 
 def getApiResults(case, installer, scenario, version):
@@ -19,9 +37,10 @@ def getApiResults(case, installer, scenario, version):
     # urllib2.install_opener(opener)
     # url = "http://127.0.0.1:8000/results?case=" + case + \
     #       "&period=30&installer=" + installer
-    url = (reportingConf.URL_BASE + "?case=" + case +
-           "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
-           "&scenario=" + scenario + "&version=" + version)
+    url = (conf.URL_BASE + "?case=" + case +
+           "&period=" + str(conf.PERIOD) + "&installer=" + installer +
+           "&scenario=" + scenario + "&version=" + version +
+           "&last=" + str(conf.NB_TESTS))
     request = Request(url)
 
     try:
@@ -37,9 +56,8 @@ def getApiResults(case, installer, scenario, version):
 def getScenarios(case, installer, version):
 
     case = case.getName()
-    print case
-    url = (reportingConf.URL_BASE + "?case=" + case +
-           "&period=" + str(reportingConf.PERIOD) + "&installer=" + installer +
+    url = (conf.URL_BASE + "?case=" + case +
+           "&period=" + str(conf.PERIOD) + "&installer=" + installer +
            "&version=" + version)
     request = Request(url)
 
@@ -104,7 +122,7 @@ def getResult(testCase, installer, scenario, version):
         # print "nb of results:" + str(len(test_results))
 
         for r in test_results:
-            # print r["creation_date"]
+            # print r["start_date"]
             # print r["criteria"]
             scenario_results.append({r["start_date"]: r["criteria"]})
         # sort results
@@ -114,11 +132,16 @@ def getResult(testCase, installer, scenario, version):
         # 2: <4 successful consecutive runs but passing the criteria
         # 1: close to pass the success criteria
         # 0: 0% success, not passing
+        # -1: no run available
         test_result_indicator = 0
         nbTestOk = getNbtestOk(scenario_results)
-        # print "Nb test OK:"+ str(nbTestOk)
+
+        # print "Nb test OK (last 10 days):"+ str(nbTestOk)
         # check that we have at least 4 runs
-        if nbTestOk < 1:
+        if len(scenario_results) < 1:
+            # No results available     
+            test_result_indicator = -1
+        elif nbTestOk < 1:
             test_result_indicator = 0
         elif nbTestOk < 2:
             test_result_indicator = 1
@@ -126,7 +149,9 @@ def getResult(testCase, installer, scenario, version):
             # Test the last 4 run
             if (len(scenario_results) > 3):
                 last4runResults = scenario_results[-4:]
-                if getNbtestOk(last4runResults):
+                nbTestOkLast4 = getNbtestOk(last4runResults)
+                # print "Nb test OK (last 4 run):"+ str(nbTestOkLast4)
+                if nbTestOkLast4 > 3:
                     test_result_indicator = 3
                 else:
                     test_result_indicator = 2
index 89a1d15..0c3fa94 100644 (file)
@@ -76,7 +76,7 @@
                             {% for test in items[scenario] -%}
                             <th>
                             {% if test.getCriteria() > -1 -%}
-                            {{test.getDbName() }}
+                            {{test.getDisplayName() }}
                             {%- endif %}
                                                        {% if test.getTier() > 3 -%}
                             *
index f0e8f59..e19853a 100644 (file)
@@ -19,6 +19,28 @@ class TestCase(object):
         self.criteria = criteria
         self.isRunnable = isRunnable
         self.tier = tier
+        display_name_matrix = {'healthcheck': 'healthcheck',
+                               'vping_ssh': 'vPing (ssh)',
+                               'vping_userdata': 'vPing (userdata)',
+                               'odl': 'ODL',
+                               'onos': 'ONOS',
+                               'ocl': 'OCL',
+                               'tempest_smoke_serial': 'Tempest (smoke)',
+                               'tempest_full_parallel': 'Tempest (full)',
+                               'rally_sanity': 'Rally (smoke)',
+                               'bgpvpn': 'bgpvpn',
+                               'rally_full': 'Rally (full)',
+                               'vims': 'vIMS',
+                               'doctor': 'Doctor',
+                               'promise': 'Promise',
+                               'moon': 'moon',
+                               'copper': 'copper',
+                               'security_scan': 'security'
+                               }
+        try:
+            self.displayName = display_name_matrix[self.name]
+        except:
+            self.displayName = "unknown"
 
     def getName(self):
         return self.name
@@ -74,10 +96,10 @@ class TestCase(object):
         self.isRunnable = is_runnable
 
     def toString(self):
-        testcase = ("Name=" + self.name + ";Criteria=" + str(self.criteria)
-                    + ";Project=" + self.project + ";Constraints="
-                    + str(self.constraints) + ";IsRunnable"
-                    + str(self.isRunnable))
+        testcase = ("Name=" + self.name + ";Criteria=" +
+                    str(self.criteria) + ";Project=" + self.project +
+                    ";Constraints=" + str(self.constraints) +
+                    ";IsRunnable" + str(self.isRunnable))
         return testcase
 
     def getDbName(self):
@@ -98,31 +120,15 @@ class TestCase(object):
                              'rally_full': 'rally_full',
                              'vims': 'vims',
                              'doctor': 'doctor-notification',
-                             'promise': 'promise'
+                             'promise': 'promise',
+                             'moon': 'moon',
+                             'copper': 'copper',
+                             'security_scan': 'security'
                              }
         try:
             return test_match_matrix[self.name]
         except:
             return "unknown"
 
-    def getTestDisplayName(self):
-        # Correspondance name of the test case / name in the DB
-        test_match_matrix = {'healthcheck': 'healthcheck',
-                             'vping_ssh': 'vPing (ssh)',
-                             'vping_userdata': 'vPing (userdata)',
-                             'odl': 'ODL',
-                             'onos': 'ONOS',
-                             'ocl': 'OCL',
-                             'tempest_smoke_serial': 'Tempest (smoke)',
-                             'tempest_full_parallel': 'Tempest (full)',
-                             'rally_sanity': 'Rally (smoke)',
-                             'bgpvpn': 'bgpvpn',
-                             'rally_full': 'Rally (full)',
-                             'vims': 'vIMS',
-                             'doctor': 'Doctor',
-                             'promise': 'Promise'
-                             }
-        try:
-            return test_match_matrix[self.name]
-        except:
-            return "unknown"
+    def getDisplayName(self):
+        return self.displayName
diff --git a/utils/test/result_collection_api/docker/Dockerfile b/utils/test/result_collection_api/docker/Dockerfile
new file mode 100644 (file)
index 0000000..ffee4c2
--- /dev/null
@@ -0,0 +1,52 @@
+#######################################################
+#   Docker container for OPNFV-TESTAPI
+#######################################################
+# Purpose: run opnfv-testapi for gathering test results
+#
+# Maintained by SerenaFeng
+# Build:
+#    $ docker build -t opnfv/testapi:tag .
+#
+# Execution:
+#    $ docker run -dti -p 8000:8000 \
+#      -e "swagger_url=http://10.63.243.17:8000" \
+#      -e "mongodb_url=mongodb://10.63.243.17:27017/" \
+#      -e "api_port=8000"
+#      opnfv/testapi:tag
+#
+# NOTE: providing swagger_url, api_port, mongodb_url is optional.
+#       If not provided, it will use the default one
+#       configured in config.ini
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+FROM ubuntu:14.04
+MAINTAINER SerenaFeng <feng.xiaowei@zte.com.cn>
+LABEL version="v1" description="OPNFV TestAPI Docker container"
+
+ENV HOME /home
+
+# Packaged dependencies
+RUN apt-get update && apt-get install -y \
+curl \
+git \
+gcc \
+wget \
+python-dev \
+python-pip \
+crudini \
+--no-install-recommends
+
+RUN pip install --upgrade pip
+
+RUN git config --global http.sslVerify false
+RUN git clone https://gerrit.opnfv.org/gerrit/releng /home/releng
+
+WORKDIR /home/releng/utils/test/result_collection_api/
+RUN pip install -r requirements.txt
+RUN python setup.py install
+CMD ["bash", "docker/start-server.sh"]
diff --git a/utils/test/result_collection_api/docker/prepare-env.sh b/utils/test/result_collection_api/docker/prepare-env.sh
new file mode 100755 (executable)
index 0000000..99433cc
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/bash
+FILE=/etc/opnfv_testapi/config.ini
+
+
+if [ "$mongodb_url" != "" ]; then
+    sudo crudini --set --existing $FILE mongo url $mongodb_url
+fi
+
+if [ "$swagger_url" != "" ]; then
+    sudo crudini --set --existing $FILE swagger base_url $swagger_url
+fi
+
+if [ "$api_port" != "" ];then
+    sudo crudini --set --existing $FILE api port $api_port
+fi
+
diff --git a/utils/test/result_collection_api/docker/start-server.sh b/utils/test/result_collection_api/docker/start-server.sh
new file mode 100755 (executable)
index 0000000..8bf6084
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+bash docker/prepare-env.sh
+opnfv-testapi
index 16346bf..0edb73a 100644 (file)
@@ -13,4 +13,4 @@ port = 8000
 debug = True
 
 [swagger]
-base_url = http://testresults.opnfv.org/test
\ No newline at end of file
+base_url = http://localhost:8000
index 8737011..f98c35e 100644 (file)
@@ -198,9 +198,8 @@ class GenericApiHandler(RequestHandler):
         comparing values
         """
         if not (new_value is None):
-            if len(new_value) > 0:
-                if new_value != old_value:
-                    edit_request[key] = new_value
+            if new_value != old_value:
+                edit_request[key] = new_value
 
         return edit_request
 
index 56bed6c..400b84a 100644 (file)
@@ -45,7 +45,7 @@ class GenericResultHandler(GenericApiHandler):
                     obj = {"$gte": str(period)}
                     query['start_date'] = obj
             elif k == 'trust_indicator':
-                query[k] = float(v)
+                query[k + '.current'] = float(v)
             elif k != 'last':
                 query[k] = v
         return query
@@ -112,12 +112,12 @@ class ResultsCLHandler(GenericResultHandler):
             @type period: L{string}
             @in period: query
             @required period: False
-            @param last: last days
+            @param last: last records stored until now
             @type last: L{string}
             @in last: query
             @required last: False
-            @param trust_indicator: must be int/long/float
-            @type trust_indicator: L{string}
+            @param trust_indicator: must be float
+            @type trust_indicator: L{float}
             @in trust_indicator: query
             @required trust_indicator: False
         """
@@ -180,3 +180,19 @@ class ResultsGURHandler(GenericResultHandler):
         query = dict()
         query["_id"] = ObjectId(result_id)
         self._get_one(query)
+
+    @swagger.operation(nickname="update")
+    def put(self, result_id):
+        """
+            @description: update a single result by _id
+            @param body: fields to be updated
+            @type body: L{ResultUpdateRequest}
+            @in body: body
+            @rtype: L{Result}
+            @return 200: update success
+            @raise 404: result not exist
+            @raise 403: nothing to update
+        """
+        query = {'_id': ObjectId(result_id)}
+        db_keys = []
+        self._update(query, db_keys)
index fb6a809..dd1e3dc 100644 (file)
@@ -9,8 +9,70 @@
 from opnfv_testapi.tornado_swagger import swagger
 
 
+@swagger.model()
+class TIHistory(object):
+    """
+        @ptype step: L{float}
+    """
+    def __init__(self, date=None, step=0):
+        self.date = date
+        self.step = step
+
+    def format(self):
+        return {
+            "date": self.date,
+            "step": self.step
+        }
+
+    @staticmethod
+    def from_dict(a_dict):
+        if a_dict is None:
+            return None
+
+        return TIHistory(a_dict.get('date'), a_dict.get('step'))
+
+
+@swagger.model()
+class TI(object):
+    """
+        @property histories: trust_indicator update histories
+        @ptype histories: C{list} of L{TIHistory}
+        @ptype current: L{float}
+    """
+    def __init__(self, current=0):
+        self.current = current
+        self.histories = list()
+
+    def format(self):
+        hs = []
+        for h in self.histories:
+            hs.append(h.format())
+
+        return {
+            "current": self.current,
+            "histories": hs
+        }
+
+    @staticmethod
+    def from_dict(a_dict):
+        if a_dict is None:
+            return None
+        t = TI()
+        t.current = a_dict.get('current')
+        if 'histories' in a_dict.keys():
+            for history in a_dict.get('histories', None):
+                t.histories.append(TIHistory.from_dict(history))
+        else:
+            t.histories = []
+        return t
+
+
 @swagger.model()
 class ResultCreateRequest(object):
+    """
+        @property trust_indicator:
+        @ptype trust_indicator: L{TI}
+    """
     def __init__(self,
                  pod_name=None,
                  project_name=None,
@@ -50,15 +112,30 @@ class ResultCreateRequest(object):
             "build_tag": self.build_tag,
             "scenario": self.scenario,
             "criteria": self.criteria,
-            "trust_indicator": self.trust_indicator
+            "trust_indicator": self.trust_indicator.format()
+        }
+
+
+@swagger.model()
+class ResultUpdateRequest(object):
+    """
+        @property trust_indicator:
+        @ptype trust_indicator: L{TI}
+    """
+    def __init__(self, trust_indicator=None):
+        self.trust_indicator = trust_indicator
+
+    def format(self):
+        return {
+            "trust_indicator": self.trust_indicator.format(),
         }
 
 
 @swagger.model()
 class TestResult(object):
     """
-        @property trust_indicator: must be int/long/float
-        @ptype trust_indicator: L{float}
+        @property trust_indicator: used for long duration test case
+        @ptype trust_indicator: L{TI}
     """
     def __init__(self, _id=None, case_name=None, project_name=None,
                  pod_name=None, installer=None, version=None,
@@ -90,7 +167,6 @@ class TestResult(object):
         t.case_name = a_dict.get('case_name')
         t.pod_name = a_dict.get('pod_name')
         t.project_name = a_dict.get('project_name')
-        t.description = a_dict.get('description')
         t.start_date = str(a_dict.get('start_date'))
         t.stop_date = str(a_dict.get('stop_date'))
         t.details = a_dict.get('details')
@@ -99,19 +175,7 @@ class TestResult(object):
         t.build_tag = a_dict.get('build_tag')
         t.scenario = a_dict.get('scenario')
         t.criteria = a_dict.get('criteria')
-        # 0 < trust indicator < 1
-        # if bad value =>  set this indicator to 0
-        t.trust_indicator = a_dict.get('trust_indicator')
-        if t.trust_indicator is not None:
-            if isinstance(t.trust_indicator, (int, long, float)):
-                if t.trust_indicator < 0:
-                    t.trust_indicator = 0
-                elif t.trust_indicator > 1:
-                    t.trust_indicator = 1
-            else:
-                t.trust_indicator = 0
-        else:
-            t.trust_indicator = 0
+        t.trust_indicator = TI.from_dict(a_dict.get('trust_indicator'))
         return t
 
     def format(self):
@@ -119,7 +183,6 @@ class TestResult(object):
             "case_name": self.case_name,
             "project_name": self.project_name,
             "pod_name": self.pod_name,
-            "description": self.description,
             "start_date": str(self.start_date),
             "stop_date": str(self.stop_date),
             "version": self.version,
@@ -128,7 +191,7 @@ class TestResult(object):
             "build_tag": self.build_tag,
             "scenario": self.scenario,
             "criteria": self.criteria,
-            "trust_indicator": self.trust_indicator
+            "trust_indicator": self.trust_indicator.format()
         }
 
     def format_http(self):
@@ -137,7 +200,6 @@ class TestResult(object):
             "case_name": self.case_name,
             "project_name": self.project_name,
             "pod_name": self.pod_name,
-            "description": self.description,
             "start_date": str(self.start_date),
             "stop_date": str(self.stop_date),
             "version": self.version,
@@ -146,7 +208,7 @@ class TestResult(object):
             "build_tag": self.build_tag,
             "scenario": self.scenario,
             "criteria": self.criteria,
-            "trust_indicator": self.trust_indicator
+            "trust_indicator": self.trust_indicator.format()
         }
 
 
index 6ab98c7..4509692 100644 (file)
@@ -116,8 +116,8 @@ class MemDb(object):
                 if k == 'start_date':
                     if not MemDb._compare_date(v, content.get(k)):
                         return False
-                elif k == 'trust_indicator':
-                    if float(content.get(k)) != float(v):
+                elif k == 'trust_indicator.current':
+                    if content.get('trust_indicator').get('current') != v:
                         return False
                 elif content.get(k, None) != v:
                     return False
@@ -173,7 +173,6 @@ class MemDb(object):
 
     def _check_keys(self, doc):
         for key in doc.keys():
-            print('key', key, 'value', doc.get(key))
             if '.' in key:
                 raise NameError('key {} must not contain .'.format(key))
             if key.startswith('$'):
index 27382f0..9a1253e 100644 (file)
@@ -8,9 +8,9 @@
 ##############################################################################
 import unittest
 
-from tornado.web import Application
 from tornado import gen
 from tornado.testing import AsyncHTTPTestCase, gen_test
+from tornado.web import Application
 
 import fake_pymongo
 
index d473060..327ddf7 100644 (file)
@@ -10,7 +10,7 @@ import unittest
 
 from test_base import TestBase
 from opnfv_testapi.resources.project_models import ProjectCreateRequest, \
-    Project, Projects
+    Project, Projects, ProjectUpdateRequest
 from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \
     HTTP_FORBIDDEN, HTTP_NOT_FOUND
 
@@ -112,7 +112,7 @@ class TestProjectUpdate(TestProjectBase):
         code, body = self.get(self.req_d.name)
         _id = body._id
 
-        req = ProjectCreateRequest('newName', 'new description')
+        req = ProjectUpdateRequest('newName', 'new description')
         code, body = self.update(req, self.req_d.name)
         self.assertEqual(code, HTTP_OK)
         self.assertEqual(_id, body._id)
index bba3b22..98ef7c0 100644 (file)
@@ -6,15 +6,16 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import unittest
 import copy
+import unittest
+from datetime import datetime, timedelta
 
 from opnfv_testapi.common.constants import HTTP_OK, HTTP_BAD_REQUEST, \
     HTTP_NOT_FOUND
 from opnfv_testapi.resources.pod_models import PodCreateRequest
 from opnfv_testapi.resources.project_models import ProjectCreateRequest
 from opnfv_testapi.resources.result_models import ResultCreateRequest, \
-    TestResult, TestResults
+    TestResult, TestResults, ResultUpdateRequest, TI, TIHistory
 from opnfv_testapi.resources.testcase_models import TestcaseCreateRequest
 from test_base import TestBase
 
@@ -55,9 +56,11 @@ class TestResultBase(TestBase):
         self.build_tag = 'v3.0'
         self.scenario = 'odl-l2'
         self.criteria = 'passed'
-        self.trust_indicator = 0.7
+        self.trust_indicator = TI(0.7)
         self.start_date = "2016-05-23 07:16:09.477097"
         self.stop_date = "2016-05-23 07:16:19.477097"
+        self.update_date = "2016-05-24 07:16:19.477097"
+        self.update_step = -0.05
         super(TestResultBase, self).setUp()
         self.details = Details(timestart='0', duration='9s', status='OK')
         self.req_d = ResultCreateRequest(pod_name=self.pod,
@@ -74,6 +77,7 @@ class TestResultBase(TestBase):
                                          trust_indicator=self.trust_indicator)
         self.get_res = TestResult
         self.list_res = TestResults
+        self.update_res = TestResult
         self.basePath = '/api/v1/results'
         self.req_pod = PodCreateRequest(self.pod, 'metal', 'zte pod 1')
         self.req_project = ProjectCreateRequest(self.project, 'vping test')
@@ -103,10 +107,19 @@ class TestResultBase(TestBase):
         self.assertEqual(result.build_tag, req.build_tag)
         self.assertEqual(result.scenario, req.scenario)
         self.assertEqual(result.criteria, req.criteria)
-        self.assertEqual(result.trust_indicator, req.trust_indicator)
         self.assertEqual(result.start_date, req.start_date)
         self.assertEqual(result.stop_date, req.stop_date)
         self.assertIsNotNone(result._id)
+        ti = result.trust_indicator
+        self.assertEqual(ti.current, req.trust_indicator.current)
+        if ti.histories:
+            history = ti.histories[0]
+            self.assertEqual(history.date, self.update_date)
+            self.assertEqual(history.step, self.update_step)
+
+    def _create_d(self):
+        _, res = self.create_d()
+        return res.href.split('/')[-1]
 
 
 class TestResultCreate(TestResultBase):
@@ -172,8 +185,7 @@ class TestResultCreate(TestResultBase):
 
 class TestResultGet(TestResultBase):
     def test_getOne(self):
-        _, res = self.create_d()
-        _id = res.href.split('/')[-1]
+        _id = self._create_d()
         code, body = self.get(_id)
         self.assert_res(code, body)
 
@@ -266,8 +278,6 @@ class TestResultGet(TestResultBase):
                 self.assert_res(code, result, req)
 
     def _create_changed_date(self, **kwargs):
-        import copy
-        from datetime import datetime, timedelta
         req = copy.deepcopy(self.req_d)
         req.start_date = datetime.now() + timedelta(**kwargs)
         req.stop_date = str(req.start_date + timedelta(minutes=10))
@@ -276,13 +286,36 @@ class TestResultGet(TestResultBase):
         return req
 
     def _set_query(self, *args):
+        def get_value(arg):
+            return eval('self.' + arg) \
+                if arg != 'trust_indicator' else self.trust_indicator.current
         uri = ''
         for arg in args:
             if '=' in arg:
                 uri += arg + '&'
             else:
-                uri += '{}={}&'.format(arg, eval('self.' + arg))
+                uri += '{}={}&'.format(arg, get_value(arg))
         return uri[0: -1]
 
+
+class TestResultUpdate(TestResultBase):
+    def test_success(self):
+        _id = self._create_d()
+
+        new_ti = copy.deepcopy(self.trust_indicator)
+        new_ti.current += self.update_step
+        new_ti.histories.append(TIHistory(self.update_date, self.update_step))
+        new_data = copy.deepcopy(self.req_d)
+        new_data.trust_indicator = new_ti
+        update = ResultUpdateRequest(trust_indicator=new_ti)
+        code, body = self.update(update, _id)
+        self.assertEqual(_id, body._id)
+        self.assert_res(code, body, new_data)
+
+        code, new_body = self.get(_id)
+        self.assertEqual(_id, new_body._id)
+        self.assert_res(code, new_body, new_data)
+
+
 if __name__ == '__main__':
     unittest.main()