Fix security issues reported by the security audit
[releng.git] / utils / test / reporting / functest / reporting-tempest.py
old mode 100644 (file)
new mode 100755 (executable)
index 563e530..363f123
 from urllib2 import Request, urlopen, URLError
 import json
 import jinja2
-import os
+import reportingConf as conf
+import reportingUtils as utils
 
-installers = ["apex", "compass", "fuel", "joid"]
+installers = conf.installers
 items = ["tests", "Success rate", "duration"]
 
-for installer in installers:
-    # we consider the Tempest results of the last 7 days
-    url = "http://testresults.opnfv.org/testapi/results?case=Tempest"
-    request = Request(url + '&period=7&installer=' + installer)
-
-    try:
-        response = urlopen(request)
-        k = response.read()
-        results = json.loads(k)
-    except URLError, e:
-        print 'No kittez. Got an error code:', e
-
-    test_results = results['test_results']
-    test_results.reverse()
-
-    scenario_results = {}
-    criteria = {}
-    errors = {}
-
-    for r in test_results:
-        # Retrieve all the scenarios per installer
-        if not r['version'] in scenario_results.keys():
-            scenario_results[r['version']] = []
-        scenario_results[r['version']].append(r)
-
-    for s, s_result in scenario_results.items():
-        scenario_results[s] = s_result[0:5]
-        # For each scenario, we build a result object to deal with
-        # results, criteria and error handling
-        for result in scenario_results[s]:
-            result["creation_date"] = result["creation_date"].split(".")[0]
-
-            # retrieve results
-            # ****************
-            nb_tests_run = result['details']['tests']
-            if nb_tests_run != 0:
-                success_rate = 100*(int(result['details']['tests']) - int(result['details']['failures']))/int(result['details']['tests'])
-            else:
-                success_rate = 0
-
-            result['details']["tests"] = nb_tests_run
-            result['details']["Success rate"] = str(success_rate) + "%"
-
-            # Criteria management
-            # *******************
-            crit_tests = False
-            crit_rate = False
-            crit_time = False
-
-            # Expect that at least 200 tests are run
-            if nb_tests_run >= 200:
-                crit_tests = True
-
-            # Expect that at least 90% of success
-            if success_rate >= 90:
-                crit_rate = True
-
-            # Expect that the suite duration is inferior to 45m
-            if result['details']['duration'] < 2700:
-                crit_time = True
-
-            result['criteria'] = {'tests': crit_tests,
-                                  'Success rate': crit_rate,
-                                  'duration': crit_time}
-
-            # error management
-            # ****************
-
-            # TODO get information from artefact based on build tag
-            # to identify errors of the associated run
-            # build tag needed to wget errors on the artifacts
-            # the idea is to list the tests in errors and provide the link
-            # towards complete artifact
-            # another option will be to put the errors in the DB
-            # (in the detail section)...
-            result['errors'] = {'tests': "",
-                                'Success rate': "",
-                                'duration': ""}
-
-    templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
-    templateEnv = jinja2.Environment(loader=templateLoader)
-
-    TEMPLATE_FILE = "./template/index-tempest-tmpl.html"
-    template = templateEnv.get_template(TEMPLATE_FILE)
-
-    outputText = template.render(scenario_results=scenario_results,
-                                 items=items,
-                                 installer=installer)
-
-    with open("./release/index-tempest-" + installer + ".html", "wb") as fh:
-        fh.write(outputText)
+PERIOD = conf.PERIOD
+criteria_nb_test = 165
+criteria_duration = 1800
+criteria_success_rate = 90
+
+logger = utils.getLogger("Tempest")
+logger.info("************************************************")
+logger.info("*   Generating reporting Tempest_smoke_serial  *")
+logger.info("*   Data retention = %s days                   *" % PERIOD)
+logger.info("*                                              *")
+logger.info("************************************************")
+
+logger.info("Success criteria:")
+logger.info("nb tests executed > %s s " % criteria_nb_test)
+logger.info("test duration < %s s " % criteria_duration)
+logger.info("success rate > %s " % criteria_success_rate)
+
+# For all the versions
+for version in conf.versions:
+    for installer in conf.installers:
+        # we consider the Tempest results of the last PERIOD days
+        url = 'http://' + conf.URL_BASE + "?case=tempest_smoke_serial"
+        request = Request(url + '&period=' + str(PERIOD) +
+                          '&installer=' + installer +
+                          '&version=' + version)
+        logger.info("Search tempest_smoke_serial results for installer %s"
+                    " for version %s"
+                    % (installer, version))
+        try:
+            response = urlopen(request)
+            k = response.read()
+            results = json.loads(k)
+        except URLError, e:
+            logger.error("Error code: %s" % e)
+
+        test_results = results['results']
+
+        scenario_results = {}
+        criteria = {}
+        errors = {}
+
+        for r in test_results:
+            # Retrieve all the scenarios per installer
+            # In Brahmaputra use version
+            # Since Colorado use scenario
+            if not r['scenario'] in scenario_results.keys():
+                scenario_results[r['scenario']] = []
+            scenario_results[r['scenario']].append(r)
+
+        for s, s_result in scenario_results.items():
+            scenario_results[s] = s_result[0:5]
+            # For each scenario, we build a result object to deal with
+            # results, criteria and error handling
+            for result in scenario_results[s]:
+                result["start_date"] = result["start_date"].split(".")[0]
+
+                # retrieve results
+                # ****************
+                nb_tests_run = result['details']['tests']
+                nb_tests_failed = result['details']['failures']
+                if nb_tests_run != 0:
+                    success_rate = 100*(int(nb_tests_run) -
+                                        int(nb_tests_failed)) / int(nb_tests_run)
+                else:
+                    success_rate = 0
+
+                result['details']["tests"] = nb_tests_run
+                result['details']["Success rate"] = str(success_rate) + "%"
+
+                # Criteria management
+                # *******************
+                crit_tests = False
+                crit_rate = False
+                crit_time = False
+
+                # Expect that at least 165 tests are run
+                if nb_tests_run >= criteria_nb_test:
+                    crit_tests = True
+
+                # Expect that at least 90% of success
+                if success_rate >= criteria_success_rate:
+                    crit_rate = True
+
+                # Expect that the suite duration is inferior to 30m
+                if result['details']['duration'] < criteria_duration:
+                    crit_time = True
+
+                result['criteria'] = {'tests': crit_tests,
+                                      'Success rate': crit_rate,
+                                      'duration': crit_time}
+                try:
+                    logger.debug("Scenario %s, Installer %s"
+                                 % (s_result[1]['scenario'], installer))
+                    logger.debug("Nb Test run: %s" % nb_tests_run)
+                    logger.debug("Test duration: %s"
+                                 % result['details']['duration'])
+                    logger.debug("Success rate: %s" % success_rate)
+                except:
+                    logger.error("Data format error")
+
+                # Error management
+                # ****************
+                try:
+                    errors = result['details']['errors']
+                    result['errors'] = errors.replace('{0}', '')
+                except:
+                    logger.error("Error field not present (Brahamputra runs?)")
+
+        templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
+        templateEnv = jinja2.Environment(loader=templateLoader, autoescape=True)
+
+        TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
+        template = templateEnv.get_template(TEMPLATE_FILE)
+
+        outputText = template.render(scenario_results=scenario_results,
+                                     items=items,
+                                     installer=installer)
+
+        with open(conf.REPORTING_PATH + "/release/" + version +
+                  "/index-tempest-" + installer + ".html", "wb") as fh:
+            fh.write(outputText)
+logger.info("Tempest automatic reporting succesfully generated.")