add vIMS for dahsboard 55/3255/2
authorMorgan Richomme <morgan.richomme@orange.com>
Thu, 12 Nov 2015 16:36:27 +0000 (17:36 +0100)
committerMorgan Richomme <morgan.richomme@orange.com>
Thu, 19 Nov 2015 11:26:55 +0000 (12:26 +0100)
JIRA: FUNCTEST-64

Change-Id: I344c3a882d64acdcd310decf015f234964faeb5a
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
utils/test/result_collection_api/dashboard/functest2Dashboard.py
utils/test/result_collection_api/resources/handlers.py

index 688f0c2..bfb7c87 100644 (file)
@@ -21,7 +21,7 @@ def get_functest_cases():
     get the list of the supported test cases
     TODO: update the list when adding a new test case for the dashboard
     """
-    return ["vPing", "Tempest", "odl", "Rally"]
+    return ["status", "vPing", "vIMS", "Tempest", "odl", "Rally"]
 
 
 def format_functest_for_dashboard(case, results):
@@ -53,6 +53,113 @@ def check_functest_case_exist(case):
         return True
 
 
+def format_status_for_dashboard(results):
+    test_data = [{'description': 'Functest status'}]
+
+    # define magic equation for the status....
+    # 5 suites: vPing, odl, Tempest, vIMS, Rally
+    # Which overall KPI make sense...
+
+    # TODO to be done and discussed
+    testcases = get_functest_cases()
+    test_data.append({'nb test suite(s) run': len(testcases)-1})
+    # test_data.append({'nb test suite(s) failed':1})
+    # test_data.append({'test suite run': ['vPing', 'tempest', 'vIMS' ]})
+    # test_data.append({'average Openstack Tempest failure rate (%)': 10})
+    # test_data.append({'average odl failure rate (%)': 10})
+
+    return test_data
+
+
+def format_vIMS_for_dashboard(results):
+    """
+    Post processing for the vIMS test case
+    """
+    test_data = [{'description': 'vIMS results for Dashboard'}]
+
+    # Graph 1: (duration_deployment_orchestrator,
+    #            duration_deployment_vnf,
+    #             duration_test) = f(time)
+    # ********************************
+    new_element = []
+
+    for data in results:
+        new_element.append({'x': data['creation_date'],
+                            'y1': data['details']['orchestrator']['duration'],
+                            'y2': data['details']['vIMS']['duration'],
+                            'y3': data['details']['sig_test']['duration']})
+
+    test_data.append({'name': "Tempest nb tests/nb failures",
+                      'info': {'type': "graph",
+                               'xlabel': 'time',
+                               'y1label': 'orchestation deployment duration',
+                               'y2label': 'vIMS deployment duration',
+                               'y3label': 'vIMS test duration'},
+                      'data_set': new_element})
+
+    # Graph 2: (Nb test, nb failure, nb skipped)=f(time)
+    # **************************************************
+    new_element = []
+
+    for data in results:
+        # Retrieve all the tests
+        nbTests = 0
+        nbFailures = 0
+        nbSkipped = 0
+        vIMS_test = data['details']['sig_test']['result']
+
+        for data_test in vIMS_test:
+            # Calculate nb of tests run and nb of tests failed
+            # vIMS_results = get_vIMSresults(vIMS_test)
+            # print vIMS_results
+            if data_test['result'] == "Passed":
+                nbTests += 1
+            elif data_test['result'] == "Failed":
+                nbFailures += 1
+            elif data_test['result'] == "Skipped":
+                nbSkipped += 1
+
+        new_element.append({'x': data['creation_date'],
+                            'y1': nbTests,
+                            'y2': nbFailures,
+                            'y3': nbSkipped})
+
+    test_data.append({'name': "vIMS nb tests passed/failed/skipped",
+                      'info': {'type': "graph",
+                               'xlabel': 'time',
+                               'y1label': 'Number of tests passed',
+                               'y2label': 'Number of tests failed',
+                               'y3label': 'Number of tests skipped'},
+                      'data_set': new_element})
+
+    # Graph 3: bar graph Summ(nb tests run), Sum (nb tests failed)
+    # ********************************************************
+    nbTests = 0
+    nbFailures = 0
+
+    for data in results:
+        vIMS_test = data['details']['sig_test']['result']
+
+        for data_test in vIMS_test:
+            nbTestsOK = 0
+            nbTestsKO = 0
+
+            if data_test['result'] == "Passed":
+                nbTestsOK += 1
+            elif data_test['result'] == "Failed":
+                nbTestsKO += 1
+
+            nbTests += nbTestsOK + nbTestsKO
+            nbFailures += nbTestsKO
+
+    test_data.append({'name': "Total number of tests run/failure tests",
+                      'info': {"type": "bar"},
+                      'data_set': [{'Run': nbTests,
+                                    'Failed': nbFailures}]})
+
+    return test_data
+
+
 def format_Tempest_for_dashboard(results):
     """
     Post processing for the Tempest test case
index 85c6172..be08c97 100644 (file)
@@ -719,6 +719,21 @@ class DashboardHandler(GenericApiHandler):
                             "error:Project name missing")
         elif check_dashboard_ready_project(project_arg, "./dashboard"):
             res = []
+
+            if case_arg is None:
+                raise HTTPError(
+                    HTTP_NOT_FOUND,
+                    "error:Test case missing for project " + project_arg)
+
+            # special case of status for project
+            if case_arg == "status":
+                del get_request["case_name"]
+                # retention time to be agreed
+                # last five days by default?
+                # TODO move to DB
+                period = datetime.now() - timedelta(days=5)
+                get_request["creation_date"] = {"$gte": period}
+
             # fetching results
             cursor = self.db.test_results.find(get_request)
             while (yield cursor.fetch_next):
@@ -726,11 +741,7 @@ class DashboardHandler(GenericApiHandler):
                     cursor.next_object())
                 res.append(test_result.format_http())
 
-            if case_arg is None:
-                raise HTTPError(
-                    HTTP_NOT_FOUND,
-                    "error:Test case missing for project " + project_arg)
-            elif check_dashboard_ready_case(project_arg, case_arg):
+            if check_dashboard_ready_case(project_arg, case_arg):
                 dashboard = get_dashboard_result(project_arg, case_arg, res)
             else:
                 raise HTTPError(