Merge "[compass] use new pod to run compass jobs"
authorJun Li <matthew.lijun@huawei.com>
Sun, 18 Sep 2016 03:58:10 +0000 (03:58 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Sun, 18 Sep 2016 03:58:10 +0000 (03:58 +0000)
53 files changed:
jjb/doctor/doctor.yml
jjb/infra/infra-daily-jobs.yml
jjb/infra/openstack-bifrost-verify.sh
jjb/kvmfornfv/kvmfornfv-download-artifact.sh
jjb/yardstick/yardstick-ci-jobs.yml
prototypes/bifrost/README.md
prototypes/bifrost/playbooks/test-bifrost-infracloud.yaml
prototypes/bifrost/scripts/destroy-env.sh
prototypes/bifrost/scripts/test-bifrost-deployment.sh
prototypes/puppet-infracloud/hiera/common.yaml
prototypes/puppet-infracloud/modules/opnfv/manifests/compute.pp
prototypes/puppet-infracloud/modules/opnfv/manifests/controller.pp
utils/lab-reconfiguration/reconfigUcsNet.py
utils/push-test-logs.sh
utils/test/dashboard/js/opnfv_dashboard_tests_conf.js
utils/test/reporting/functest/img/gauge_0.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_100.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_16.7.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_25.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_33.3.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_41.7.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_50.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_58.3.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_66.7.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_75.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_8.3.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_83.3.png [new file with mode: 0644]
utils/test/reporting/functest/img/gauge_91.7.png [new file with mode: 0644]
utils/test/reporting/functest/reporting-status.py
utils/test/reporting/functest/scenarioResult.py
utils/test/reporting/functest/template/index-status-tmpl.html
utils/test/reporting/yardstick/reporting-status.py
utils/test/result_collection_api/opnfv_testapi/dashboard/bottlenecks2Dashboard.py
utils/test/result_collection_api/opnfv_testapi/dashboard/dashboard_utils.py
utils/test/result_collection_api/opnfv_testapi/dashboard/doctor2Dashboard.py
utils/test/result_collection_api/opnfv_testapi/dashboard/functest2Dashboard.py
utils/test/result_collection_api/opnfv_testapi/dashboard/promise2Dashboard.py
utils/test/result_collection_api/opnfv_testapi/dashboard/yardstick2Dashboard.py
utils/test/result_collection_api/opnfv_testapi/resources/handlers.py
utils/test/result_collection_api/opnfv_testapi/tests/unit/fake_pymongo.py
utils/test/result_collection_api/opnfv_testapi/tests/unit/test_dashboard.py
utils/test/result_collection_api/opnfv_testapi/tests/unit/test_fake_pymongo.py
utils/test/result_collection_api/opnfv_testapi/tests/unit/test_result.py
utils/test/result_collection_api/update/templates/changes_in_mongodb.py
utils/test/result_collection_api/update/templates/update_mongodb.py
utils/test/scripts/conf_utils.py [new file with mode: 0644]
utils/test/scripts/create_kibana_dashboards.py
utils/test/scripts/kibana_cleanup.py
utils/test/scripts/logger_utils.py [new file with mode: 0644]
utils/test/scripts/mongo2elastic_format.py [new file with mode: 0644]
utils/test/scripts/mongo_to_elasticsearch.py
utils/test/scripts/shared_utils.py
utils/test/scripts/testcases.yaml

index ead6c00..b53082e 100644 (file)
@@ -92,7 +92,9 @@
             description: 'OpenStack credentials'
         - '{installer}-defaults'
         - '{slave-label}-defaults'
-        - 'functest-suite-parameter'
+        - 'functest-suite-parameter':
+            FUNCTEST_SUITE_NAME: '{project}'
+            TESTCASE_OPTIONS: '-e INSPECTOR_TYPE={inspector} -v $WORKSPACE:$HOME/opnfv/repos/{project}'
         - string:
             name: DEPLOY_SCENARIO
             default: 'os-odl_l2-nofeature-ha'
                 file-paths:
                   - compare-type: ANT
                     pattern: 'tests/**'
+            skip-vote:
+                successful: true
+                failed: true
+                unstable: true
+                notbuilt: true
 
     builders:
-        - 'functest-suite-builder':
-            TESTCASE_OPTIONS: '-e INSPECTOR_TYPE=$INSPECTOR_TYPE -v $WORKSPACE:$HOME/opnfv/repos/doctor'
+        - 'functest-suite-builder'
index df90c6d..d779d56 100644 (file)
         - shell: |
             #!/bin/bash
 
-            echo "Not activated!"
+            sudo $WORKSPACE/jjb/infra/infra-deploy.sh
 - builder:
     name: 'infra-smoketest-daily-builder'
     builders:
         - shell: |
             #!/bin/bash
 
-            echo "Not activated!"
+            sudo $WORKSPACE/jjb/infra/infra-smoketest.sh
index a4653f9..c17cb88 100755 (executable)
@@ -11,6 +11,16 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
+trap fix_ownership EXIT
+
+function fix_ownership() {
+    if [ -z "${JOB_URL+x}" ]; then
+        echo "Not running as part of Jenkins. Handle the logs manually."
+    else
+        chown -R jenkins:jenkins $WORKSPACE
+    fi
+}
+
 # check distro to see if we support it
 # we will have centos and suse supported in future
 case "$DISTRO" in
@@ -24,7 +34,7 @@ case "$DISTRO" in
 esac
 
 # remove previously cloned repos
-/bin/rm -rf /opt/bifrost /opt/puppet-infracloud /opt/releng
+/bin/rm -rf /opt/bifrost /opt/puppet-infracloud /opt/stack /opt/releng
 
 # clone upstream bifrost repo and checkout the patch to verify
 git clone https://git.openstack.org/openstack/bifrost /opt/bifrost
index c8bdb9c..ea37eb2 100755 (executable)
@@ -27,8 +27,10 @@ case "$JOB_TYPE" in
         exit 1
 esac
 
+GS_GUESTIMAGE_LOCATION="gs://artifacts.opnfv.org/$PROJECT/guest-image"
 /bin/mkdir -p $WORKSPACE/build_output
 gsutil cp -r $GS_UPLOAD_LOCATION/* $WORKSPACE/build_output > $WORKSPACE/gsutil.log 2>&1
+gsutil cp $GS_GUESTIMAGE_LOCATION/guest1.sha512 $WORKSPACE/build_output > $WORKSPACE/gsutil.log 2>&1
 
 echo "--------------------------------------------------------"
 ls -al $WORKSPACE/build_output
index 286ca6d..fb2d8f1 100644 (file)
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *master
+        - arm-pod2:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *colorado
         - orange-pod2:
             slave-label: '{pod}'
             installer: joid
index f50ffb2..df34f9c 100644 (file)
@@ -7,29 +7,31 @@ Please follow that steps:
 
 1. Clone bifrost::
 
-    git clone https://git.openstack.org/openstack/bifrost /opt/bifrost
+    sudo git clone https://git.openstack.org/openstack/bifrost /opt/bifrost
 
 2. Clone releng::
 
-    git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
+    sudo git clone https://gerrit.opnfv.org/gerrit/releng /opt/releng
 
 3. Clone infracloud::
 
-    git clone https://git.openstack.org/openstack-infra/puppet-infracloud /opt/puppet-infracloud
+    sudo git clone https://git.openstack.org/openstack-infra/puppet-infracloud /opt/puppet-infracloud
 
 4. Combine releng scripts and playbooks with bifrost::
 
-    cp -R /opt/releng/prototypes/bifrost/* /opt/bifrost/
+    sudo cp -R /opt/releng/prototypes/bifrost/* /opt/bifrost/
 
-5. Run destroy script if you need to cleanup previous environment::
+5. If you are on a RHEL/CentOS box, ensure that selinux is disabled
+
+6. Run destroy script if you need to cleanup previous environment::
 
     cd /opt/bifrost
-    ./scripts/destroy-env.sh
+    sudo ./scripts/destroy-env.sh
 
-6. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
+7. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
 
     cd /opt/bifrost
-    ./scripts/test-bifrost-deployment.sh
+    sudo ./scripts/test-bifrost-deployment.sh
 
 It is likely that the script will show some errors due to timeout. Please ignore the errors, and wait until the vms are completely bootstrapped. To verify it you can check with ironic::
 
@@ -39,10 +41,10 @@ It is likely that the script will show some errors due to timeout. Please ignore
 
 And wait until all the vms are in **active** Provisioning State.
 
-7. Check the IPs assigned to each of the VMS. You can check it by looking at inventory:
+8. Check the IPs assigned to each of the VMS. You can check it by looking at inventory:
 
     cat /tmp/baremetal.csv
 
-8. You can enter into the vms with devuser login/pass:
+9. You can enter into the vms with devuser login/pass:
 
     ssh devuser@192.168.122.2
index ba548b3..b4dffdc 100644 (file)
     # NOTE(TheJulia): While the next step creates a ramdisk, some elements
     # do not support ramdisk-image-create as they invoke steps to cleanup
     # the ramdisk which causes ramdisk-image-create to believe it failed.
-    - { role: bifrost-create-dib-image, dib_imagename: "{{ http_boot_folder }}/ipa", build_ramdisk: false, dib_os_element: "{{ ipa_dib_os_element|default('debian') }}", dib_os_release: "jessie", dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}", when: create_ipa_image | bool == true }
-    - { role: bifrost-create-dib-image, dib_imagetype: "qcow2", dib_imagename: "{{deploy_image}}", dib_os_element: "ubuntu-minimal", dib_os_release: "trusty", dib_elements: "vm serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements|default('') }}", dib_packages: "openssh-server,vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl", when: create_image_via_dib | bool == true and transform_boot_image | bool == false }
+    - role: bifrost-create-dib-image
+      dib_imagename: "{{ http_boot_folder }}/ipa"
+      build_ramdisk: false
+      dib_os_element: "{{ ipa_dib_os_element|default('debian') }}"
+      dib_os_release: "jessie"
+      dib_elements: "ironic-agent {{ ipa_extra_dib_elements | default('') }}"
+      when: create_ipa_image | bool == true
+    - role: bifrost-create-dib-image
+      dib_imagetype: "qcow2"
+      dib_imagename: "{{deploy_image}}"
+      dib_os_element: "{{ lookup('env','DIB_OS_ELEMENT') }}"
+      dib_os_release: "{{ lookup('env', 'DIB_OS_RELEASE') }}"
+      dib_elements: "vm serial-console simple-init devuser infra-cloud-bridge puppet growroot {{ extra_dib_elements|default('') }}"
+      dib_packages: "{{ lookup('env', 'DIB_OS_PACKAGES') }}"
+      when: create_image_via_dib | bool == true and transform_boot_image | bool == false
   environment:
     http_proxy: "{{ lookup('env','http_proxy') }}"
     https_proxy: "{{ lookup('env','https_proxy') }}"
index 4dffee6..86d7bc4 100755 (executable)
@@ -7,6 +7,13 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+
+# We need to execute everything as root
+if [[ $(whoami) != "root" ]]; then
+    echo "Error: This script must be run as root!"
+    exit 1
+fi
+
 virsh destroy jumphost.opnfvlocal || true
 virsh destroy controller00.opnfvlocal || true
 virsh destroy compute00.opnfvlocal || true
@@ -22,11 +29,13 @@ mysql -u root ironic --execute "delete from node_tags;"
 mysql -u root ironic --execute "delete from nodes;"
 mysql -u root ironic --execute "delete from conductors;"
 echo "removing leases"
-> /var/lib/dnsmasq/dnsmasq.leases
+[[ -e /var/lib/misc/dnsmasq/dnsmasq.leases ]] && > /var/lib/misc/dnsmasq/dnsmasq.leases
 echo "removing logs"
 rm -rf /var/log/libvirt/baremetal_logs/*.log
 
 # clean up dib images only if requested explicitly
+CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
+
 if [ $CLEAN_DIB_IMAGES = "true" ]; then
     rm -rf /httpboot/*
     rm -rf /tftpboot/*
@@ -36,6 +45,7 @@ fi
 rm -rf /var/lib/libvirt/images/*.qcow2
 
 echo "restarting services"
+service dnsmasq restart
 service libvirtd restart
 service ironic-api restart
 service ironic-conductor start
index d796f35..fb49afc 100755 (executable)
@@ -57,6 +57,13 @@ export ELEMENTS_PATH=/usr/share/diskimage-builder/elements:/opt/puppet-infraclou
 export DIB_DEV_USER_PWDLESS_SUDO=yes
 export DIB_DEV_USER_PASSWORD=devuser
 
+# settings for distro: trusty/ubuntu-minimal, 7/centos-minimal
+export DIB_OS_RELEASE=${DIB_OS_RELEASE:-trusty}
+export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
+
+# for centos 7: "openssh-server,vim,less,bridge-utils,iputils,rsyslog,curl"
+export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"openssh-server,vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl"}
+
 # Source Ansible
 # NOTE(TheJulia): Ansible stable-1.9 source method tosses an error deep
 # under the hood which -x will detect, so for this step, we need to suspend
index 7d6b440..1fcde2f 100644 (file)
@@ -76,5 +76,5 @@ hosts:
   compute00.opnfvlocal:
     ip: 192.168.122.4
 
-# br-eth0 for debian, br-ens3 for RHEL
+# br-eth0 for debian, br_ens3 for RHEL
 bridge_name: br-eth0
index 77908c0..ca548a5 100644 (file)
@@ -8,14 +8,6 @@ class opnfv::compute (
   $controller_public_address,
   $virt_type = 'kvm',
 ) {
-  # disable selinux if needed
-  if $::osfamily == 'RedHat' {
-    class { 'selinux':
-      mode   => 'permissive',
-      before => Class['::infracloud::compute'],
-    }
-  }
-
   class { '::infracloud::compute':
     nova_rabbit_password          => $nova_rabbit_password,
     neutron_rabbit_password       => $neutron_rabbit_password,
index 4bae42c..7522692 100644 (file)
@@ -30,14 +30,6 @@ class opnfv::controller (
   $opnfv_password,
   $opnfv_email = 'opnfvuser@gmail.com',
 ) {
-  # disable selinux if needed
-  if $::osfamily == 'RedHat' {
-    class { 'selinux':
-      mode   => 'permissive',
-      before => Class['::infracloud::controller'],
-    }
-  }
-
   class { '::infracloud::controller':
     keystone_rabbit_password         => $keystone_rabbit_password,
     neutron_rabbit_password          => $neutron_rabbit_password,
index 45a72a3..4c08f3d 100755 (executable)
@@ -157,7 +157,7 @@ def read_yaml_file(yamlFile):
     """
     # TODO: add check if vnic templates specified in file exist on UCS
     with open(yamlFile, 'r') as stream:
-        return yaml.load(stream)
+        return yaml.safe_load(stream)
 
 
 def set_network(handle=None, yamlFile=None):
index 0fa882b..f24d884 100644 (file)
@@ -24,7 +24,7 @@ node_list=(\
 'lf-pod1' 'lf-pod2' 'intel-pod2' 'intel-pod3' \
 'intel-pod5' 'intel-pod6' 'intel-pod7' 'intel-pod8' \
 'ericsson-pod2' \
-'arm-pod1' \
+'arm-pod1' 'arm-pod3' \
 'huawei-pod1' 'huawei-pod2' 'huawei-virtual1' 'huawei-virtual2' 'huawei-virtual3' 'huawei-virtual4')
 
 
index 870357c..a463325 100644 (file)
@@ -56,7 +56,7 @@ var opnfv_dashboard_testcases = {
 var opnfv_dashboard_installers_pods = {};\r
 opnfv_dashboard_installers_pods['apex'] = ['all','intel-pod7','opnfv-jump-1'];\r
 opnfv_dashboard_installers_pods['compass'] = ['all','huawei-us-deploy-bare-1','huawei-us-deploy-vm-1','huawei-us-deploy-vm2','intel-pod8'];\r
-opnfv_dashboard_installers_pods['fuel'] = ['all','ericsson-pod2','opnfv-jump-2','arm-pod1','zte-pod1'];\r
+opnfv_dashboard_installers_pods['fuel'] = ['all','ericsson-pod2','opnfv-jump-2','arm-pod1','arm-pod3','zte-pod1'];
 opnfv_dashboard_installers_pods['joid'] = ['all','intel-pod5','intel-pod6','orange-fr-pod2'];\r
 \r
 var opnfv_dashboard_installers_pods_print = {};\r
diff --git a/utils/test/reporting/functest/img/gauge_0.png b/utils/test/reporting/functest/img/gauge_0.png
new file mode 100644 (file)
index 0000000..ecefc0e
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_0.png differ
diff --git a/utils/test/reporting/functest/img/gauge_100.png b/utils/test/reporting/functest/img/gauge_100.png
new file mode 100644 (file)
index 0000000..e199e15
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_100.png differ
diff --git a/utils/test/reporting/functest/img/gauge_16.7.png b/utils/test/reporting/functest/img/gauge_16.7.png
new file mode 100644 (file)
index 0000000..3e3993c
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_16.7.png differ
diff --git a/utils/test/reporting/functest/img/gauge_25.png b/utils/test/reporting/functest/img/gauge_25.png
new file mode 100644 (file)
index 0000000..4923659
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_25.png differ
diff --git a/utils/test/reporting/functest/img/gauge_33.3.png b/utils/test/reporting/functest/img/gauge_33.3.png
new file mode 100644 (file)
index 0000000..364574b
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_33.3.png differ
diff --git a/utils/test/reporting/functest/img/gauge_41.7.png b/utils/test/reporting/functest/img/gauge_41.7.png
new file mode 100644 (file)
index 0000000..8c3e910
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_41.7.png differ
diff --git a/utils/test/reporting/functest/img/gauge_50.png b/utils/test/reporting/functest/img/gauge_50.png
new file mode 100644 (file)
index 0000000..2874b9f
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_50.png differ
diff --git a/utils/test/reporting/functest/img/gauge_58.3.png b/utils/test/reporting/functest/img/gauge_58.3.png
new file mode 100644 (file)
index 0000000..beedc8a
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_58.3.png differ
diff --git a/utils/test/reporting/functest/img/gauge_66.7.png b/utils/test/reporting/functest/img/gauge_66.7.png
new file mode 100644 (file)
index 0000000..93f44d1
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_66.7.png differ
diff --git a/utils/test/reporting/functest/img/gauge_75.png b/utils/test/reporting/functest/img/gauge_75.png
new file mode 100644 (file)
index 0000000..9fc261f
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_75.png differ
diff --git a/utils/test/reporting/functest/img/gauge_8.3.png b/utils/test/reporting/functest/img/gauge_8.3.png
new file mode 100644 (file)
index 0000000..59f8657
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_8.3.png differ
diff --git a/utils/test/reporting/functest/img/gauge_83.3.png b/utils/test/reporting/functest/img/gauge_83.3.png
new file mode 100644 (file)
index 0000000..27ae4ec
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_83.3.png differ
diff --git a/utils/test/reporting/functest/img/gauge_91.7.png b/utils/test/reporting/functest/img/gauge_91.7.png
new file mode 100644 (file)
index 0000000..2808657
Binary files /dev/null and b/utils/test/reporting/functest/img/gauge_91.7.png differ
index e9e167d..ef567f1 100755 (executable)
@@ -34,7 +34,7 @@ tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
 cf = conf.TEST_CONF
 response = requests.get(cf)
 
-functest_yaml_config = yaml.load(response.text)
+functest_yaml_config = yaml.safe_load(response.text)
 
 logger.info("*******************************************")
 logger.info("*   Generating reporting scenario status  *")
@@ -175,6 +175,8 @@ for version in conf.versions:
                 scenario_criteria = conf.MAX_SCENARIO_CRITERIA
 
             s_score = str(scenario_score) + "/" + str(scenario_criteria)
+            s_score_percent = float(
+                scenario_score) / float(scenario_criteria) * 100
             s_status = "KO"
             if scenario_score < scenario_criteria:
                 logger.info(">>>> scenario not OK, score = %s/%s" %
@@ -191,11 +193,13 @@ for version in conf.versions:
                             ";" + installer + ";" + s + "\n")
                     f.write(info)
 
-            scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
+            scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score,
+                                                            s_score_percent)
             logger.info("--------------------------")
 
         templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
-        templateEnv = jinja2.Environment(loader=templateLoader, autoescape=True)
+        templateEnv = jinja2.Environment(
+            loader=templateLoader, autoescape=True)
 
         TEMPLATE_FILE = "/template/index-status-tmpl.html"
         template = templateEnv.get_template(TEMPLATE_FILE)
index 743346a..c6c3373 100644 (file)
@@ -9,12 +9,17 @@
 
 
 class ScenarioResult(object):
-    def __init__(self, status, score=0):
+
+    def __init__(self, status, score=0, score_percent=0):
         self.status = status
         self.score = score
+        self.score_percent = score_percent
 
     def getStatus(self):
         return self.status
 
     def getScore(self):
         return self.score
+
+    def getScorePercent(self):
+        return self.score_percent
\ No newline at end of file
index da2213b..96240de 100644 (file)
                         {% for scenario,iteration in scenario_stats.iteritems() -%}
                             <tr class="tr-ok">
                                 <td>{{scenario}}</td>
-                                <td>{%if scenario_results[scenario].getStatus() is sameas "OK" -%}
-                                                                       <img src="../../img/icon-ok.png"> 
-                                                                       {%- else -%}
-                                                                       <img src="../../img/icon-nok.png">
-                                                                       {%- endif %}</td>
+                                <td>{%if scenario_results[scenario].getScorePercent() < 8.3 -%}
+                                        <img src="../../img/gauge_0.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 16.7 -%}
+                                        <img src="../../img/gauge_8.3.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 25 -%}
+                                        <img src="../../img/gauge_16.7.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 33.3 -%}
+                                        <img src="../../img/gauge_25.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 41.7 -%}
+                                        <img src="../../img/gauge_33.3.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 50 -%}
+                                        <img src="../../img/gauge_41.7.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 58.3 -%}
+                                        <img src="../../img/gauge_50.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 66.7 -%}
+                                        <img src="../../img/gauge_58.3.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 75 -%}
+                                        <img src="../../img/gauge_66.7.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 83.3 -%}
+                                        <img src="../../img/gauge_75.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 91.7 -%}
+                                        <img src="../../img/gauge_83.3.png">
+                                    {%elif scenario_results[scenario].getScorePercent() < 100 -%}
+                                        <img src="../../img/gauge_91.7.png">
+                                    {%- else -%}
+                                        <img src="../../img/gauge_100.png">
+                                {%- endif %}</td>
                                 <td>{{scenario_results[scenario].getScore()}}</td>
                                 <td>{{iteration}}</td>
                             </tr>
                             {% if test.getCriteria() > -1 -%}
                             {{test.getDisplayName() }}
                             {%- endif %}
-                                                       {% if test.getTier() > 3 -%}
+                            {% if test.getTier() > 3 -%}
                             *
                             {%- endif %}
-                             </th>                           
+                             </th>
                             {%- endfor %}
                         </tr>
                         <tr class="tr-weather-weather">
-                            {% for test in items[scenario] -%}                          
+                            {% for test in items[scenario] -%}
                             {% if test.getCriteria() > 2 -%}
                                 <td><img src="../../img/weather-clear.png"></td>
                             {%- elif test.getCriteria() > 1 -%}
index 546bf08..57a9594 100644 (file)
@@ -32,6 +32,14 @@ for version in conf.versions:
     for installer in conf.installers:
         # get scenarios results data
         scenario_results = utils.getScenarioStatus(installer, version)
+        if 'colorado' == version:
+            stable_result = utils.getScenarioStatus(installer, 'stable/colorado')
+            for k,v in stable_result.items():
+                if not scenario_results.has_key(k):
+                    scenario_results[k] = []
+                scenario_results[k] += stable_result[k]
+        for k,v in scenario_results.items():
+            scenario_results[k] = v[0:conf.LASTEST_TESTS]
         scenario_result_criteria = {}
 
         # From each scenarios get results list
@@ -44,10 +52,10 @@ for version in conf.versions:
             scenario_score = 0
 
             for v in s_result:
-                if v['details'] == 'SUCCESS':
+                if v['criteria'] == 'SUCCESS':
                     scenario_score += 1
 
-            if scenario_score == scenario_criteria:
+            if scenario_score == scenario_criteria and scenario_criteria == 4:
                 s_status = 'OK'
                 logger.info(">>>>> scenario OK, save the information")
             else:
index 2e106be..f5e3d9a 100755 (executable)
@@ -48,8 +48,8 @@ def format_bottlenecks_for_dashboard(case, results):
     then build the call to the specific method
     """
     if check_bottlenecks_case_exist(case):
-        cmd = "format_" + case + "_for_dashboard(results)"
-        res = eval(cmd)
+        cmd = "format_" + case + "_for_dashboard"
+        res = globals()[cmd](results)
     else:
         res = []
         print "Test cases not declared"
index 121875d..42c6358 100644 (file)
@@ -14,7 +14,6 @@
 #
 # v0.1: basic example
 #
-import os
 import re
 import sys
 from functest2Dashboard import format_functest_for_dashboard, \
@@ -47,8 +46,8 @@ def check_dashboard_ready_project(test_project):
 
 
 def check_dashboard_ready_case(project, case):
-    cmd = "check_" + project + "_case_exist(case)"
-    return eval(cmd)
+    cmd = "check_" + project + "_case_exist"
+    return globals()[cmd](case)
 
 
 def get_dashboard_projects():
@@ -73,6 +72,5 @@ def get_dashboard_result(project, case, results=None):
     # project: project name
     # results: array of raw results pre-filterded
     # according to the parameters of the request
-    cmd = "format_" + project + "_for_dashboard(case,results)"
-    res = eval(cmd)
-    return res
+    cmd = "format_" + project + "_for_dashboard"
+    return globals()[cmd](case, results)
index 38b23ab..5b1f190 100644 (file)
@@ -36,8 +36,8 @@ def format_doctor_for_dashboard(case, results):
         # note we add _case because testcase and project had the same name
         # TODO refactoring...looks fine at the beginning wit only 1 project
         # not very ugly now and clearly not optimized...
-        cmd = "format_" + case.replace('-','_') + "_case_for_dashboard(results)"
-        res = eval(cmd)
+        cmd = "format_" + case.replace('-','_') + "_case_for_dashboard"
+        res = globals()[cmd](results)
     else:
         res = []
     return res
index 86521b9..01697f7 100644 (file)
@@ -34,8 +34,8 @@ def format_functest_for_dashboard(case, results):
     then build the call to the specific method
     """
     if check_functest_case_exist(case):
-        cmd = "format_" + case + "_for_dashboard(results)"
-        res = eval(cmd)
+        cmd = "format_" + case + "_for_dashboard"
+        res = globals()[cmd](results)
     else:
         res = []
         print "Test cases not declared"
index 84f43a7..c96341f 100644 (file)
@@ -14,9 +14,6 @@
 # a new method format_<Test_case>_for_dashboard(results)
 # v0.1: basic example with methods for odl, Tempest, Rally and vPing
 #
-import re
-import datetime
-
 
 def get_promise_cases():
     """
@@ -36,8 +33,8 @@ def format_promise_for_dashboard(case, results):
         # note we add _case because testcase and project had the same name
         # TODO refactoring...looks fine at the beginning wit only 1 project
         # not very ugly now and clearly not optimized...
-        cmd = "format_" + case + "_case_for_dashboard(results)"
-        res = eval(cmd)
+        cmd = "format_" + case + "_case_for_dashboard"
+        res = globals()[cmd](results)
     else:
         res = []
         print "Test cases not declared"
index 4f022d5..4df4b50 100644 (file)
@@ -16,7 +16,6 @@
 #       Fio, Lmbench, Perf, Cyclictest.
 #
 
-
 def get_yardstick_cases():
     """
     get the list of the supported test cases
@@ -33,8 +32,8 @@ def format_yardstick_for_dashboard(case, results):
     then build the call to the specific method
     """
     if check_yardstick_case_exist(case):
-        cmd = "format_" + case + "_for_dashboard(results)"
-        res = eval(cmd)
+        cmd = "format_" + case + "_for_dashboard"
+        res = globals()[cmd](results)
     else:
         res = []
         print "Test cases not declared"
index f98c35e..5059f5d 100644 (file)
@@ -23,8 +23,8 @@
 import json
 from datetime import datetime
 
-from tornado.web import RequestHandler, asynchronous, HTTPError
 from tornado import gen
+from tornado.web import RequestHandler, asynchronous, HTTPError
 
 from models import CreateResponse
 from opnfv_testapi.common.constants import DEFAULT_REPRESENTATION, \
@@ -217,7 +217,8 @@ class GenericApiHandler(RequestHandler):
         return equal, query
 
     def _eval_db(self, table, method, *args, **kwargs):
-        return eval('self.db.%s.%s(*args, **kwargs)' % (table, method))
+        exec_collection = self.db.__getattr__(table)
+        return exec_collection.__getattribute__(method)(*args, **kwargs)
 
     def _eval_db_find_one(self, query, table=None):
         if table is None:
index 4509692..3dd87e6 100644 (file)
@@ -181,6 +181,10 @@ class MemDb(object):
                 self._check_keys(doc.get(key))
 
 
+def __getattr__(name):
+    return globals()[name]
+
+
 pods = MemDb()
 projects = MemDb()
 testcases = MemDb()
index 8f729c0..27ec763 100644 (file)
@@ -8,9 +8,10 @@
 ##############################################################################
 import unittest
 
-from test_result import TestResultBase
 from opnfv_testapi.common.constants import HTTP_NOT_FOUND, HTTP_OK
 
+from test_result import TestResultBase
+
 
 class TestDashboardBase(TestResultBase):
     def setUp(self):
@@ -63,7 +64,7 @@ class TestDashboardQuery(TestDashboardBase):
             if k == 'self' or k == 'uri':
                 continue
             if v is None:
-                v = eval('self.' + k)
+                v = self.__getattribute__(k)
             if v != 'missing':
                 uri += '{}={}&'.format(k, v)
         uri += 'pod={}&'.format(self.pod)
index 9a1253e..5f50ba8 100644 (file)
@@ -115,7 +115,8 @@ class MyTest(AsyncHTTPTestCase):
             self.assertEqual(name_error, error)
 
     def _eval_pods_db(self, method, *args, **kwargs):
-        return eval('self.db.pods.%s(*args, **kwargs)' % method)
+        table_obj = vars(self.db)['pods']
+        return table_obj.__getattribute__(method)(*args, **kwargs)
 
 
 if __name__ == '__main__':
index eee06c6..8479b35 100644 (file)
@@ -305,7 +305,7 @@ class TestResultGet(TestResultBase):
 
     def _set_query(self, *args):
         def get_value(arg):
-            return eval('self.' + arg) \
+            return self.__getattribute__(arg) \
                 if arg != 'trust_indicator' else self.trust_indicator.current
         uri = ''
         for arg in args:
index 9744dd9..1a4d5a1 100644 (file)
@@ -45,6 +45,7 @@ docs_old2New = {
     #     ({'case_name': 'ovno'}, {'case_name': 'ocl'})
     # ]
     'results': [
-        ({'trust_indicator': 0}, {'trust_indicator': {'current': 0, 'histories': []}})
+        ({'trust_indicator': 0},
+         {'trust_indicator': {'current': 0, 'histories': []}})
     ]
 }
index b1e378d..ba4334a 100644 (file)
@@ -10,7 +10,8 @@ import argparse
 
 from pymongo import MongoClient
 
-from changes_in_mongodb import collections_old2New, fields_old2New, docs_old2New
+from changes_in_mongodb import collections_old2New, \
+    fields_old2New, docs_old2New
 from utils import main, parse_mongodb_url
 
 parser = argparse.ArgumentParser(description='Update MongoDBs')
@@ -54,11 +55,13 @@ def change_docs(a_dict):
 
 
 def eval_db(method, *args, **kwargs):
-    return eval('db.%s(*args, **kwargs)' % method)
+    exec_db = db.__getattribute__(method)
+    return exec_db(*args, **kwargs)
 
 
 def eval_collection(collection, method, *args, **kwargs):
-    return eval('db.%s.%s(*args, **kwargs)' % (collection, method))
+    exec_collection = db.__getattr__(collection)
+    return exec_collection.__getattribute__(method)(*args, **kwargs)
 
 
 def collection_update(a_dict, operator):
diff --git a/utils/test/scripts/conf_utils.py b/utils/test/scripts/conf_utils.py
new file mode 100644 (file)
index 0000000..e35d5ed
--- /dev/null
@@ -0,0 +1,20 @@
+import yaml
+
+
+with open('./testcases.yaml') as f:
+    testcases_yaml = yaml.safe_load(f)
+f.close()
+
+
+def get_format(project, case):
+    testcases = testcases_yaml.get(project)
+    if isinstance(testcases, list):
+        for case_dict in testcases:
+            if case_dict['name'] == case:
+                return 'format_' + case_dict['format'].strip()
+    return None
+
+
+if __name__ == '__main__':
+    fmt = get_format('functest', 'vping_ssh')
+    print fmt
\ No newline at end of file
index abb9471..efa6e17 100644 (file)
@@ -1,18 +1,14 @@
 #! /usr/bin/env python
 import json
-import logging
 import urlparse
 
 import argparse
-import yaml
 
+import conf_utils
+import logger_utils
 import shared_utils
 
-logger = logging.getLogger('create_kibana_dashboards')
-logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('./{}.log'.format('create_kibana_dashboards'))
-file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
-logger.addHandler(file_handler)
+logger = logger_utils.KibanaDashboardLogger('elastic2kibana').get
 
 _installers = {'fuel', 'apex', 'compass', 'joid'}
 
@@ -277,7 +273,7 @@ def _get_pods_and_scenarios(project_name, case_name, installer):
         }
     })
 
-    elastic_data = shared_utils.get_elastic_data(urlparse.urljoin(base_elastic_url, '/test_results/mongo2elastic'),
+    elastic_data = shared_utils.get_elastic_docs(urlparse.urljoin(base_elastic_url, '/test_results/mongo2elastic'),
                                                  es_creds, query_json)
 
     pods_and_scenarios = {}
@@ -307,10 +303,7 @@ def construct_dashboards():
     :return: list of KibanaDashboards
     """
     kibana_dashboards = []
-    with open('./testcases.yaml') as f:
-        testcases_yaml = yaml.safe_load(f)
-
-    for project, case_dicts in testcases_yaml.items():
+    for project, case_dicts in conf_utils.testcases_yaml.items():
         for case in case_dicts:
             case_name = case.get('name')
             visualizations = case.get('visualizations')
index e8d452a..e699db4 100644 (file)
@@ -13,7 +13,7 @@ logger.addHandler(file_handler)
 
 
 def delete_all(url, es_creds):
-    ids = shared_utils.get_elastic_data(url, es_creds, body=None, field='_id')
+    ids = shared_utils.get_elastic_docs(url, es_creds, body=None, field='_id')
     for id in ids:
         del_url = '/'.join([url, id])
         shared_utils.delete_request(del_url, es_creds)
diff --git a/utils/test/scripts/logger_utils.py b/utils/test/scripts/logger_utils.py
new file mode 100644 (file)
index 0000000..25d28a5
--- /dev/null
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+#
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Logging levels:
+#  Level     Numeric value
+#  CRITICAL  50
+#  ERROR     40
+#  WARNING   30
+#  INFO      20
+#  DEBUG     10
+#  NOTSET    0
+#
+# Usage:
+#  import functest_logger as fl
+#  logger = fl.Logger("script_name").getLogger()
+#  logger.info("message to be shown with - INFO - ")
+#  logger.debug("message to be shown with - DEBUG -")
+
+import logging
+import os
+
+
+class Logger(object):
+    file_path = '/var/log'
+    formatter = logging.Formatter('%(asctime)s - %(name)s - '
+                                  '%(levelname)s - %(message)s')
+
+    def __init__(self, logger_name):
+
+        IF_DEBUG = os.getenv('IF_DEBUG')
+
+        self.logger_name = logger_name
+        self.logger = logging.getLogger(logger_name)
+        self.logger.propagate = 0
+        self.logger.setLevel(logging.DEBUG)
+
+        ch = logging.StreamHandler()
+        ch.setFormatter(self.formatter)
+        if IF_DEBUG is not None and IF_DEBUG.lower() == "true":
+            ch.setLevel(logging.DEBUG)
+        else:
+            ch.setLevel(logging.INFO)
+        self.logger.addHandler(ch)
+
+        hdlr = logging.FileHandler('%s/%s.log' % (self.file_path, logger_name))
+        hdlr.setFormatter(self.formatter)
+        hdlr.setLevel(logging.DEBUG)
+        self.logger.addHandler(hdlr)
+
+    @property
+    def get(self):
+        return self.logger
+
+
+class KibanaDashboardLogger(Logger):
+    file_path = '/var/log/kibana_dashboard'
+
+    def __init__(self, logger_name):
+        super(KibanaDashboardLogger, self).__init__(logger_name)
+
diff --git a/utils/test/scripts/mongo2elastic_format.py b/utils/test/scripts/mongo2elastic_format.py
new file mode 100644 (file)
index 0000000..0b036e3
--- /dev/null
@@ -0,0 +1,179 @@
+#! /usr/bin/env python
+
+
+def _convert_value(value):
+    return value if value != '' else 0
+
+
+def _convert_duration(duration):
+    if (isinstance(duration, str) or isinstance(duration, unicode)) and ':' in duration:
+        hours, minutes, seconds = duration.split(":")
+        hours = _convert_value(hours)
+        minutes = _convert_value(minutes)
+        seconds = _convert_value(seconds)
+        int_duration = 3600 * int(hours) + 60 * int(minutes) + float(seconds)
+    else:
+        int_duration = duration
+    return int_duration
+
+
+def format_normal(testcase):
+    """
+    Look for these and leave any of those:
+        details.duration
+        details.tests
+        details.failures
+
+    If none are present, then return False
+    """
+    found = False
+    testcase_details = testcase['details']
+    fields = ['duration', 'tests', 'failures']
+    if isinstance(testcase_details, dict):
+        for key, value in testcase_details.items():
+            if key in fields:
+                found = True
+                if key == 'duration':
+                    testcase_details[key] = _convert_duration(value)
+            else:
+                del testcase_details[key]
+
+    if 'tests' in testcase_details and 'failures' in testcase_details:
+        testcase_tests = float(testcase_details['tests'])
+        testcase_failures = float(testcase_details['failures'])
+        if testcase_tests != 0:
+            testcase_details['success_percentage'] = 100 * (testcase_tests - testcase_failures) / testcase_tests
+        else:
+            testcase_details['success_percentage'] = 0
+
+
+    return found
+
+
+def format_rally(testcase):
+    """
+    Structure:
+        details.[{summary.duration}]
+        details.[{summary.nb success}]
+        details.[{summary.nb tests}]
+
+    Find data for these fields
+        -> details.duration
+        -> details.tests
+        -> details.success_percentage
+    """
+    summary = testcase['details']['summary']
+
+    testcase['details'] = {
+        'duration': summary['duration'],
+        'tests': summary['nb tests'],
+        'success_percentage': summary['nb success']
+    }
+    return True
+
+
+def _get_statistics(orig_data, stat_fields, stat_values=None):
+    test_results = {}
+    for stat_data in orig_data:
+        for field in stat_fields:
+            stat_value = stat_data[field]
+            if stat_value in test_results:
+                test_results[stat_value] += 1
+            else:
+                test_results[stat_value] = 1
+
+    if stat_values is not None:
+        for stat_value in stat_values:
+            if stat_value not in test_results:
+                test_results[stat_value] = 0
+
+    return test_results
+
+
+def format_onos(testcase):
+    """
+    Structure:
+        details.FUNCvirNet.duration
+        details.FUNCvirNet.status.[{Case result}]
+        details.FUNCvirNetL3.duration
+        details.FUNCvirNetL3.status.[{Case result}]
+
+    Find data for these fields
+        -> details.FUNCvirNet.duration
+        -> details.FUNCvirNet.tests
+        -> details.FUNCvirNet.failures
+        -> details.FUNCvirNetL3.duration
+        -> details.FUNCvirNetL3.tests
+        -> details.FUNCvirNetL3.failures
+    """
+    testcase_details = testcase['details']
+
+    if 'FUNCvirNet' not in testcase_details or 'FUNCvirNetL3' not in testcase_details:
+        return False
+
+    funcvirnet_details = testcase_details['FUNCvirNet']['status']
+    funcvirnet_stats = _get_statistics(funcvirnet_details, ('Case result',), ('PASS', 'FAIL'))
+    funcvirnet_passed = funcvirnet_stats['PASS']
+    funcvirnet_failed = funcvirnet_stats['FAIL']
+    funcvirnet_all = funcvirnet_passed + funcvirnet_failed
+
+    funcvirnetl3_details = testcase_details['FUNCvirNetL3']['status']
+    funcvirnetl3_stats = _get_statistics(funcvirnetl3_details, ('Case result',), ('PASS', 'FAIL'))
+    funcvirnetl3_passed = funcvirnetl3_stats['PASS']
+    funcvirnetl3_failed = funcvirnetl3_stats['FAIL']
+    funcvirnetl3_all = funcvirnetl3_passed + funcvirnetl3_failed
+
+    testcase_details['FUNCvirNet'] = {
+        'duration': _convert_duration(testcase_details['FUNCvirNet']['duration']),
+        'tests': funcvirnet_all,
+        'failures': funcvirnet_failed
+    }
+    testcase_details['FUNCvirNetL3'] = {
+        'duration': _convert_duration(testcase_details['FUNCvirNetL3']['duration']),
+        'tests': funcvirnetl3_all,
+        'failures': funcvirnetl3_failed
+    }
+    return True
+
+
+def format_vims(testcase):
+    """
+    Structure:
+        details.sig_test.result.[{result}]
+        details.sig_test.duration
+        details.vIMS.duration
+        details.orchestrator.duration
+
+    Find data for these fields
+        -> details.sig_test.duration
+        -> details.sig_test.tests
+        -> details.sig_test.failures
+        -> details.sig_test.passed
+        -> details.sig_test.skipped
+        -> details.vIMS.duration
+        -> details.orchestrator.duration
+    """
+    testcase_details = testcase['details']
+    test_results = _get_statistics(testcase_details['sig_test']['result'],
+                                   ('result',),
+                                   ('Passed', 'Skipped', 'Failed'))
+    passed = test_results['Passed']
+    skipped = test_results['Skipped']
+    failures = test_results['Failed']
+    all_tests = passed + skipped + failures
+    testcase['details'] = {
+        'sig_test': {
+            'duration': testcase_details['sig_test']['duration'],
+            'tests': all_tests,
+            'failures': failures,
+            'passed': passed,
+            'skipped': skipped
+        },
+        'vIMS': {
+            'duration': testcase_details['vIMS']['duration']
+        },
+        'orchestrator': {
+            'duration': testcase_details['orchestrator']['duration']
+        }
+    }
+    return True
index ded58ef..b722793 100644 (file)
@@ -1,7 +1,6 @@
 #! /usr/bin/env python
 import datetime
 import json
-import logging
 import os
 import subprocess
 import traceback
@@ -10,266 +9,33 @@ import uuid
 
 import argparse
 
+import conf_utils
+import logger_utils
+import mongo2elastic_format
 import shared_utils
 
-logger = logging.getLogger('mongo_to_elasticsearch')
-logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format('mongo_to_elasticsearch'))
-file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
-logger.addHandler(file_handler)
+logger = logger_utils.KibanaDashboardLogger('mongo2elastic').get
 
+parser = argparse.ArgumentParser(description='Modify and filter mongo json data for elasticsearch')
+parser.add_argument('-od', '--output-destination',
+                    default='elasticsearch',
+                    choices=('elasticsearch', 'stdout'),
+                    help='defaults to elasticsearch')
 
-def _get_dicts_from_list(testcase, dict_list, keys):
-    dicts = []
-    for dictionary in dict_list:
-        # iterate over dictionaries in input list
-        if not isinstance(dictionary, dict):
-            logger.info("Skipping non-dict details testcase '{}'".format(testcase))
-            continue
-        if keys == set(dictionary.keys()):
-            # check the dictionary structure
-            dicts.append(dictionary)
-    return dicts
+parser.add_argument('-ml', '--merge-latest', default=0, type=int, metavar='N',
+                    help='get entries old at most N days from mongodb and'
+                         ' parse those that are not already in elasticsearch.'
+                         ' If not present, will get everything from mongodb, which is the default')
 
+parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
+                    help='the url of elasticsearch, defaults to http://localhost:9200')
 
-def _get_results_from_list_of_dicts(list_of_dict_statuses, dict_indexes, expected_results=None):
-    test_results = {}
-    for test_status in list_of_dict_statuses:
-        status = test_status
-        for index in dict_indexes:
-            status = status[index]
-        if status in test_results:
-            test_results[status] += 1
-        else:
-            test_results[status] = 1
-
-    if expected_results is not None:
-        for expected_result in expected_results:
-            if expected_result not in test_results:
-                test_results[expected_result] = 0
-
-    return test_results
-
-
-def _convert_value(value):
-    return value if value != '' else 0
-
-
-def _convert_duration(duration):
-    if (isinstance(duration, str) or isinstance(duration, unicode)) and ':' in duration:
-        hours, minutes, seconds = duration.split(":")
-        hours = _convert_value(hours)
-        minutes = _convert_value(minutes)
-        seconds = _convert_value(seconds)
-        int_duration = 3600 * int(hours) + 60 * int(minutes) + float(seconds)
-    else:
-        int_duration = duration
-    return int_duration
-
-
-def modify_functest_tempest(testcase):
-    if modify_default_entry(testcase):
-        testcase_details = testcase['details']
-        testcase_tests = float(testcase_details['tests'])
-        testcase_failures = float(testcase_details['failures'])
-        if testcase_tests != 0:
-            testcase_details['success_percentage'] = 100 * (testcase_tests - testcase_failures) / testcase_tests
-        else:
-            testcase_details['success_percentage'] = 0
-        return True
-    else:
-        return False
-
-
-def modify_functest_vims(testcase):
-    """
-    Structure:
-        details.sig_test.result.[{result}]
-        details.sig_test.duration
-        details.vIMS.duration
-        details.orchestrator.duration
-
-    Find data for these fields
-        -> details.sig_test.duration
-        -> details.sig_test.tests
-        -> details.sig_test.failures
-        -> details.sig_test.passed
-        -> details.sig_test.skipped
-        -> details.vIMS.duration
-        -> details.orchestrator.duration
-    """
-    testcase_details = testcase['details']
-    sig_test_results = _get_dicts_from_list(testcase, testcase_details['sig_test']['result'],
-                                            {'duration', 'result', 'name', 'error'})
-    if len(sig_test_results) < 1:
-        logger.info("No 'result' from 'sig_test' found in vIMS details, skipping")
-        return False
-    else:
-        test_results = _get_results_from_list_of_dicts(sig_test_results, ('result',), ('Passed', 'Skipped', 'Failed'))
-        passed = test_results['Passed']
-        skipped = test_results['Skipped']
-        failures = test_results['Failed']
-        all_tests = passed + skipped + failures
-        testcase['details'] = {
-            'sig_test': {
-                'duration': testcase_details['sig_test']['duration'],
-                'tests': all_tests,
-                'failures': failures,
-                'passed': passed,
-                'skipped': skipped
-            },
-            'vIMS': {
-                'duration': testcase_details['vIMS']['duration']
-            },
-            'orchestrator': {
-                'duration': testcase_details['orchestrator']['duration']
-            }
-        }
-        return True
-
-
-def modify_functest_onos(testcase):
-    """
-    Structure:
-        details.FUNCvirNet.duration
-        details.FUNCvirNet.status.[{Case result}]
-        details.FUNCvirNetL3.duration
-        details.FUNCvirNetL3.status.[{Case result}]
-
-    Find data for these fields
-        -> details.FUNCvirNet.duration
-        -> details.FUNCvirNet.tests
-        -> details.FUNCvirNet.failures
-        -> details.FUNCvirNetL3.duration
-        -> details.FUNCvirNetL3.tests
-        -> details.FUNCvirNetL3.failures
-    """
-    testcase_details = testcase['details']
-
-    if 'FUNCvirNet' not in testcase_details:
-        return modify_default_entry(testcase)
-
-    funcvirnet_details = testcase_details['FUNCvirNet']['status']
-    funcvirnet_statuses = _get_dicts_from_list(testcase, funcvirnet_details, {'Case result', 'Case name:'})
-
-    funcvirnetl3_details = testcase_details['FUNCvirNetL3']['status']
-    funcvirnetl3_statuses = _get_dicts_from_list(testcase, funcvirnetl3_details, {'Case result', 'Case name:'})
-
-    if len(funcvirnet_statuses) < 0:
-        logger.info("No results found in 'FUNCvirNet' part of ONOS results")
-        return False
-    elif len(funcvirnetl3_statuses) < 0:
-        logger.info("No results found in 'FUNCvirNetL3' part of ONOS results")
-        return False
-    else:
-        funcvirnet_results = _get_results_from_list_of_dicts(funcvirnet_statuses,
-                                                             ('Case result',), ('PASS', 'FAIL'))
-        funcvirnetl3_results = _get_results_from_list_of_dicts(funcvirnetl3_statuses,
-                                                               ('Case result',), ('PASS', 'FAIL'))
-
-        funcvirnet_passed = funcvirnet_results['PASS']
-        funcvirnet_failed = funcvirnet_results['FAIL']
-        funcvirnet_all = funcvirnet_passed + funcvirnet_failed
-
-        funcvirnetl3_passed = funcvirnetl3_results['PASS']
-        funcvirnetl3_failed = funcvirnetl3_results['FAIL']
-        funcvirnetl3_all = funcvirnetl3_passed + funcvirnetl3_failed
-
-        testcase_details['FUNCvirNet'] = {
-            'duration': _convert_duration(testcase_details['FUNCvirNet']['duration']),
-            'tests': funcvirnet_all,
-            'failures': funcvirnet_failed
-        }
-
-        testcase_details['FUNCvirNetL3'] = {
-            'duration': _convert_duration(testcase_details['FUNCvirNetL3']['duration']),
-            'tests': funcvirnetl3_all,
-            'failures': funcvirnetl3_failed
-        }
-
-        return True
-
-
-def modify_functest_rally(testcase):
-    """
-    Structure:
-        details.[{summary.duration}]
-        details.[{summary.nb success}]
-        details.[{summary.nb tests}]
-
-    Find data for these fields
-        -> details.duration
-        -> details.tests
-        -> details.success_percentage
-    """
-    summaries = _get_dicts_from_list(testcase, testcase['details'], {'summary'})
-
-    if len(summaries) != 1:
-        logger.info("Found zero or more than one 'summaries' in Rally details, skipping")
-        return False
-    else:
-        summary = summaries[0]['summary']
-        testcase['details'] = {
-            'duration': summary['duration'],
-            'tests': summary['nb tests'],
-            'success_percentage': summary['nb success']
-        }
-        return True
+parser.add_argument('-u', '--elasticsearch-username', default=None,
+                    help='The username with password for elasticsearch in format username:password')
 
+args = parser.parse_args()
 
-def modify_functest_odl(testcase):
-    """
-    Structure:
-        details.details.[{test_status.@status}]
-
-    Find data for these fields
-        -> details.tests
-        -> details.failures
-        -> details.success_percentage?
-    """
-    test_statuses = _get_dicts_from_list(testcase, testcase['details']['details'],
-                                         {'test_status', 'test_doc', 'test_name'})
-    if len(test_statuses) < 1:
-        logger.info("No 'test_status' found in ODL details, skipping")
-        return False
-    else:
-        test_results = _get_results_from_list_of_dicts(test_statuses, ('test_status', '@status'), ('PASS', 'FAIL'))
-
-        passed_tests = test_results['PASS']
-        failed_tests = test_results['FAIL']
-        all_tests = passed_tests + failed_tests
-
-        testcase['details'] = {
-            'tests': all_tests,
-            'failures': failed_tests,
-            'success_percentage': 100 * passed_tests / float(all_tests)
-        }
-        logger.debug("Modified odl testcase: '{}'".format(json.dumps(testcase, indent=2)))
-        return True
-
-
-def modify_default_entry(testcase):
-    """
-    Look for these and leave any of those:
-        details.duration
-        details.tests
-        details.failures
-
-    If none are present, then return False
-    """
-    found = False
-    testcase_details = testcase['details']
-    fields = ['duration', 'tests', 'failures']
-    if isinstance(testcase_details, dict):
-        for key, value in testcase_details.items():
-            if key in fields:
-                found = True
-                if key == 'duration':
-                    testcase_details[key] = _convert_duration(value)
-            else:
-                del testcase_details[key]
-
-    return found
+tmp_docs_file = './mongo-{}.json'.format(uuid.uuid4())
 
 
 def _fix_date(date_string):
@@ -279,7 +45,7 @@ def _fix_date(date_string):
         return date_string[:-3].replace(' ', 'T') + 'Z'
 
 
-def verify_mongo_entry(testcase):
+def verify_document(testcase):
     """
     Mandatory fields:
         installer
@@ -364,124 +130,70 @@ def verify_mongo_entry(testcase):
         return True
 
 
-def modify_mongo_entry(testcase):
+def format_document(testcase):
     # 1. verify and identify the testcase
     # 2. if modification is implemented, then use that
     # 3. if not, try to use default
     # 4. if 2 or 3 is successful, return True, otherwise return False
-    if verify_mongo_entry(testcase):
+    if verify_document(testcase):
         project = testcase['project_name']
         case_name = testcase['case_name']
-        logger.info("Processing mongo test case '{}'".format(case_name))
-        try:
-            if project == 'functest':
-                if case_name == 'rally_sanity':
-                    return modify_functest_rally(testcase)
-                elif case_name.lower() == 'odl':
-                    return modify_functest_odl(testcase)
-                elif case_name.lower() == 'onos':
-                    return modify_functest_onos(testcase)
-                elif case_name.lower() == 'vims':
-                    return modify_functest_vims(testcase)
-                elif case_name == 'tempest_smoke_serial':
-                    return modify_functest_tempest(testcase)
-            return modify_default_entry(testcase)
-        except Exception:
-            logger.error("Fail in modify testcase[%s]\nerror message: %s" % (testcase, traceback.format_exc()))
+        fmt = conf_utils.get_format(project, case_name)
+        if fmt:
+            try:
+                logger.info("Processing %s/%s using format %s" % (project, case_name, fmt))
+                return vars(mongo2elastic_format)[fmt](testcase)
+            except Exception:
+                logger.error("Fail in format testcase[%s]\nerror message: %s" % (testcase, traceback.format_exc()))
+                return False
     else:
         return False
 
 
-def publish_mongo_data(output_destination):
-    tmp_filename = 'mongo-{}.log'.format(uuid.uuid4())
-    try:
-        subprocess.check_call(['mongoexport', '--db', 'test_results_collection', '-c', 'results', '--out',
-                               tmp_filename])
-        with open(tmp_filename) as fobj:
-            for mongo_json_line in fobj:
-                test_result = json.loads(mongo_json_line)
-                if modify_mongo_entry(test_result):
-                    status, data = shared_utils.publish_json(test_result, es_creds, output_destination)
-                    if status > 300:
-                        project = test_result['project_name']
-                        case_name = test_result['case_name']
-                        logger.info('project {} case {} publish failed, due to [{}]'
-                                    .format(project, case_name, json.loads(data)['error']['reason']))
-    finally:
-        if os.path.exists(tmp_filename):
-            os.remove(tmp_filename)
-
-
-def get_mongo_data(days):
-    past_time = datetime.datetime.today() - datetime.timedelta(days=days)
-    mongo_json_lines = subprocess.check_output(['mongoexport', '--db', 'test_results_collection', '-c', 'results',
-                                                '--query', '{{"start_date":{{$gt:"{}"}}}}'
-                                               .format(past_time)]).splitlines()
+def export_documents(days):
+    cmd = ['mongoexport', '--db', 'test_results_collection', '-c', 'results']
+    if days > 0:
+        past_time = datetime.datetime.today() - datetime.timedelta(days=days)
+        cmd += ['--query', '{{"start_date":{{$gt:"{}"}}}}'.format(past_time)]
+    cmd += [ '--out', '{}'.format(tmp_docs_file)]
 
-    mongo_data = []
-    for mongo_json_line in mongo_json_lines:
-        test_result = json.loads(mongo_json_line)
-        if modify_mongo_entry(test_result):
-            # if the modification could be applied, append the modified result
-            mongo_data.append(test_result)
-    return mongo_data
+    try:
+        subprocess.check_call(cmd)
+    except Exception, err:
+        logger.error("export mongodb failed: %s" % err)
+        exit(-1)
 
 
-def publish_difference(mongo_data, elastic_data, output_destination, es_creds):
-    for elastic_entry in elastic_data:
-        if elastic_entry in mongo_data:
-            mongo_data.remove(elastic_entry)
+def publish_document(document, es_creds, to):
+    status, data = shared_utils.publish_json(document, es_creds, to)
+    if status > 300:
+        logger.error('Publish record[{}] failed, due to [{}]'
+                    .format(document, json.loads(data)['error']['reason']))
 
-    logger.info('number of parsed test results: {}'.format(len(mongo_data)))
 
-    for parsed_test_result in mongo_data:
-        shared_utils.publish_json(parsed_test_result, es_creds, output_destination)
+def publish_nonexist_documents(elastic_docs, es_creds, to):
+    try:
+        with open(tmp_docs_file) as fdocs:
+            for doc_line in fdocs:
+                doc = json.loads(doc_line)
+                if format_document(doc) and doc not in elastic_docs:
+                    publish_document(doc, es_creds, to)
+    finally:
+        fdocs.close()
+        if os.path.exists(tmp_docs_file):
+            os.remove(tmp_docs_file)
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description='Modify and filter mongo json data for elasticsearch')
-    parser.add_argument('-od', '--output-destination',
-                        default='elasticsearch',
-                        choices=('elasticsearch', 'stdout'),
-                        help='defaults to elasticsearch')
-
-    parser.add_argument('-ml', '--merge-latest', default=0, type=int, metavar='N',
-                        help='get entries old at most N days from mongodb and'
-                             ' parse those that are not already in elasticsearch.'
-                             ' If not present, will get everything from mongodb, which is the default')
-
-    parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
-                        help='the url of elasticsearch, defaults to http://localhost:9200')
-
-    parser.add_argument('-u', '--elasticsearch-username', default=None,
-                        help='The username with password for elasticsearch in format username:password')
-
-    args = parser.parse_args()
     base_elastic_url = urlparse.urljoin(args.elasticsearch_url, '/test_results/mongo2elastic')
-    output_destination = args.output_destination
+    to = args.output_destination
     days = args.merge_latest
     es_creds = args.elasticsearch_username
 
-    if output_destination == 'elasticsearch':
-        output_destination = base_elastic_url
-
-    # parsed_test_results will be printed/sent to elasticsearch
-    if days == 0:
-        publish_mongo_data(output_destination)
-    elif days > 0:
-        body = '''{{
-    "query" : {{
-        "range" : {{
-            "start_date" : {{
-                "gte" : "now-{}d"
-            }}
-        }}
-    }}
-}}'''.format(days)
-        elastic_data = shared_utils.get_elastic_data(base_elastic_url, es_creds, body)
-        logger.info('number of hits in elasticsearch for now-{}d: {}'.format(days, len(elastic_data)))
-        mongo_data = get_mongo_data(days)
-        publish_difference(mongo_data, elastic_data, output_destination, es_creds)
-    else:
-        raise Exception('Update must be non-negative')
+    if to == 'elasticsearch':
+        to = base_elastic_url
 
+    export_documents(days)
+    elastic_docs = shared_utils.get_elastic_docs_by_days(base_elastic_url, es_creds, days)
+    logger.info('number of hits in elasticsearch for now-{}d: {}'.format(days, len(elastic_docs)))
+    publish_nonexist_documents(elastic_docs, es_creds, to)
index 8bbbdbe..aa8a65d 100644 (file)
@@ -1,5 +1,7 @@
-import urllib3
 import json
+
+import urllib3
+
 http = urllib3.PoolManager()
 
 
@@ -8,14 +10,14 @@ def delete_request(url, creds, body=None):
     http.request('DELETE', url, headers=headers, body=body)
 
 
-def publish_json(json_ojb, creds, output_destination):
+def publish_json(json_ojb, creds, to):
     json_dump = json.dumps(json_ojb)
-    if output_destination == 'stdout':
+    if to == 'stdout':
         print json_dump
         return 200, None
     else:
         headers = urllib3.make_headers(basic_auth=creds)
-        result = http.request('POST', output_destination, headers=headers, body=json_dump)
+        result = http.request('POST', to, headers=headers, body=json_dump)
         return result.status, result.data
 
 
@@ -23,17 +25,39 @@ def _get_nr_of_hits(elastic_json):
     return elastic_json['hits']['total']
 
 
-def get_elastic_data(elastic_url, creds, body, field='_source'):
+def get_elastic_docs(elastic_url, creds, body=None, field = '_source'):
+
     # 1. get the number of results
     headers = urllib3.make_headers(basic_auth=creds)
     elastic_json = json.loads(http.request('GET', elastic_url + '/_search?size=0', headers=headers, body=body).data)
+    print elastic_json
     nr_of_hits = _get_nr_of_hits(elastic_json)
 
     # 2. get all results
     elastic_json = json.loads(http.request('GET', elastic_url + '/_search?size={}'.format(nr_of_hits), headers=headers, body=body).data)
 
-    elastic_data = []
+    elastic_docs = []
     for hit in elastic_json['hits']['hits']:
-        elastic_data.append(hit[field])
-    return elastic_data
-
+        elastic_docs.append(hit[field])
+    return elastic_docs
+
+def get_elastic_docs_by_days(elastic_url, creds, days):
+    if days == 0:
+        body = '''{
+            "query": {
+                "match_all": {}
+            }
+        }'''
+    elif days > 0:
+        body = '''{{
+            "query" : {{
+                "range" : {{
+                    "start_date" : {{
+                        "gte" : "now-{}d"
+                    }}
+                }}
+            }}
+        }}'''.format(days)
+    else:
+        raise Exception('Update days must be non-negative')
+    return get_elastic_docs(elastic_url, creds, body)
index 12031ef..9c33d2e 100644 (file)
@@ -1,6 +1,7 @@
 functest:
     -
         name: tempest_smoke_serial
+        format: normal
         test_family: VIM
         visualizations:
             -
@@ -19,6 +20,7 @@ functest:
     -
         name: rally_sanity
         test_family: VIM
+        format: rally
         visualizations:
             -
                 name: duration
@@ -34,6 +36,7 @@ functest:
                     - field: details.success_percentage
     -
         name: vping_ssh
+        format: normal
         test_family: VIM
         visualizations:
             -
@@ -42,6 +45,7 @@ functest:
                     - field: details.duration
     -
         name: vping_userdata
+        format: normal
         test_family: VIM
         visualizations:
             -
@@ -51,6 +55,7 @@ functest:
     -
         name: odl
         test_family: Controller
+        format: odl
         visualizations:
             -
                 name: tests_failures
@@ -63,6 +68,7 @@ functest:
                     - field: details.success_percentage
     -
         name: onos
+        format: onos
         test_family: Controller
         visualizations:
             -
@@ -89,6 +95,7 @@ functest:
                     - field: details.FUNCvirNetL3.failures
     -
         name: vims
+        format: vims
         test_family: Features
         visualizations:
             -
@@ -107,6 +114,7 @@ functest:
 promise:
     -
         name: promise
+        format: normal
         test_family: Features
         visualizations:
             -
@@ -122,6 +130,7 @@ doctor:
     -
         name: doctor-notification
         test_family: Features
+        format: normal
         visualizations:
             -
                 name: duration