Merge "[compass] use specific network.yml in deploying onos scenario"
[releng.git] / utils / test / reporting / functest / reporting-tempest.py
index 563e530..e3f4e33 100644 (file)
@@ -1,25 +1,44 @@
 from urllib2 import Request, urlopen, URLError
 import json
 import jinja2
-import os
+import reportingConf as conf
+import reportingUtils as utils
 
-installers = ["apex", "compass", "fuel", "joid"]
+installers = conf.installers
 items = ["tests", "Success rate", "duration"]
 
-for installer in installers:
-    # we consider the Tempest results of the last 7 days
-    url = "http://testresults.opnfv.org/testapi/results?case=Tempest"
-    request = Request(url + '&period=7&installer=' + installer)
+PERIOD = conf.PERIOD
+criteria_nb_test = 165
+criteria_duration = 1800
+criteria_success_rate = 90
+
+logger = utils.getLogger("Tempest")
+logger.info("************************************************")
+logger.info("*   Generating reporting Tempest_smoke_serial  *")
+logger.info("*   Data retention = %s days                   *" % PERIOD)
+logger.info("*                                              *")
+logger.info("************************************************")
+
+logger.info("Success criteria:")
+logger.info("nb tests executed > %s s " % criteria_nb_test)
+logger.info("test duration < %s s " % criteria_duration)
+logger.info("success rate > %s " % criteria_success_rate)
 
+for installer in installers:
+    # we consider the Tempest results of the last PERIOD days
+    url = conf.URL_BASE + "?case=tempest_smoke_serial"
+    request = Request(url + '&period=' + str(PERIOD) +
+                      '&installer=' + installer + '&version=master')
+    logger.info("Search tempest_smoke_serial results for installer %s"
+                % installer)
     try:
         response = urlopen(request)
         k = response.read()
         results = json.loads(k)
     except URLError, e:
-        print 'No kittez. Got an error code:', e
+        logger.error("Error code: %s" % e)
 
-    test_results = results['test_results']
-    test_results.reverse()
+    test_results = results['results']
 
     scenario_results = {}
     criteria = {}
@@ -27,22 +46,26 @@ for installer in installers:
 
     for r in test_results:
         # Retrieve all the scenarios per installer
-        if not r['version'] in scenario_results.keys():
-            scenario_results[r['version']] = []
-        scenario_results[r['version']].append(r)
+        # In Brahmaputra use version
+        # Since Colorado use scenario
+        if not r['scenario'] in scenario_results.keys():
+            scenario_results[r['scenario']] = []
+        scenario_results[r['scenario']].append(r)
 
     for s, s_result in scenario_results.items():
         scenario_results[s] = s_result[0:5]
         # For each scenario, we build a result object to deal with
         # results, criteria and error handling
         for result in scenario_results[s]:
-            result["creation_date"] = result["creation_date"].split(".")[0]
+            result["start_date"] = result["start_date"].split(".")[0]
 
             # retrieve results
             # ****************
             nb_tests_run = result['details']['tests']
+            nb_tests_failed = result['details']['failures']
             if nb_tests_run != 0:
-                success_rate = 100*(int(result['details']['tests']) - int(result['details']['failures']))/int(result['details']['tests'])
+                success_rate = 100*(int(nb_tests_run) -
+                                    int(nb_tests_failed)) / int(nb_tests_run)
             else:
                 success_rate = 0
 
@@ -55,45 +78,50 @@ for installer in installers:
             crit_rate = False
             crit_time = False
 
-            # Expect that at least 200 tests are run
-            if nb_tests_run >= 200:
+            # Expect that at least 165 tests are run
+            if nb_tests_run >= criteria_nb_test:
                 crit_tests = True
 
             # Expect that at least 90% of success
-            if success_rate >= 90:
+            if success_rate >= criteria_success_rate:
                 crit_rate = True
 
-            # Expect that the suite duration is inferior to 45m
-            if result['details']['duration'] < 2700:
+            # Expect that the suite duration is inferior to 30m
+            if result['details']['duration'] < criteria_duration:
                 crit_time = True
 
             result['criteria'] = {'tests': crit_tests,
                                   'Success rate': crit_rate,
                                   'duration': crit_time}
-
-            # error management
+            try:
+                logger.debug("Scenario %s, Installer %s"
+                             % (s_result[1]['scenario'], installer))
+                logger.debug("Nb Test run: %s" % nb_tests_run)
+                logger.debug("Test duration: %s"
+                             % result['details']['duration'])
+                logger.debug("Success rate: %s" % success_rate)
+            except:
+                logger.error("Data format error")
+
+            # Error management
             # ****************
+            try:
+                errors = result['details']['errors']
+                result['errors'] = errors.replace('{0}', '')
+            except:
+                logger.error("Error field not present (Brahamputra runs?)")
 
-            # TODO get information from artefact based on build tag
-            # to identify errors of the associated run
-            # build tag needed to wget errors on the artifacts
-            # the idea is to list the tests in errors and provide the link
-            # towards complete artifact
-            # another option will be to put the errors in the DB
-            # (in the detail section)...
-            result['errors'] = {'tests': "",
-                                'Success rate': "",
-                                'duration': ""}
-
-    templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
+    templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
     templateEnv = jinja2.Environment(loader=templateLoader)
 
-    TEMPLATE_FILE = "./template/index-tempest-tmpl.html"
+    TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
     template = templateEnv.get_template(TEMPLATE_FILE)
 
     outputText = template.render(scenario_results=scenario_results,
                                  items=items,
                                  installer=installer)
 
-    with open("./release/index-tempest-" + installer + ".html", "wb") as fh:
+    with open(conf.REPORTING_PATH + "/release/master/index-tempest-" +
+              installer + ".html", "wb") as fh:
         fh.write(outputText)
+logger.info("Tempest automatic reporting succesfully generated.")