bug fix: reporting Tempest 35/40735/1
authorMorgan Richomme <morgan.richomme@orange.com>
Thu, 31 Aug 2017 07:07:06 +0000 (09:07 +0200)
committerMorgan Richomme <morgan.richomme@orange.com>
Thu, 31 Aug 2017 07:07:06 +0000 (09:07 +0200)
not updated since Colorado and data model changed

 - minor fixes to match data model
 - pep8/pylint correction

Change-Id: I3a0a9ea689d25b3cfff35cb1ec39329981627bc6
Signed-off-by: Morgan Richomme <morgan.richomme@orange.com>
utils/test/reporting/reporting/functest/reporting-tempest.py

index bc28856..d78d9a1 100755 (executable)
@@ -8,58 +8,57 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 # SPDX-license-identifier: Apache-2.0
 
-from urllib2 import Request, urlopen, URLError
 from datetime import datetime
 import json
-import jinja2
 import os
 
-# manage conf
-import utils.reporting_utils as rp_utils
+from urllib2 import Request, urlopen, URLError
+import jinja2
+
+import reporting.utils.reporting_utils as rp_utils
 
-installers = rp_utils.get_config('general.installers')
-items = ["tests", "Success rate", "duration"]
+INSTALLERS = rp_utils.get_config('general.installers')
+ITEMS = ["tests", "Success rate", "duration"]
 
 CURRENT_DIR = os.getcwd()
 
 PERIOD = rp_utils.get_config('general.period')
-criteria_nb_test = 165
-criteria_duration = 1800
-criteria_success_rate = 90
+CRITERIA_NB_TEST = 100
+CRITERIA_DURATION = 1800
+CRITERIA_SUCCESS_RATE = 100
 
 logger = rp_utils.getLogger("Tempest")
 logger.info("************************************************")
 logger.info("*   Generating reporting Tempest_smoke_serial  *")
-logger.info("*   Data retention = %s days                   *" % PERIOD)
+logger.info("*   Data retention = %s days                   *", PERIOD)
 logger.info("*                                              *")
 logger.info("************************************************")
 
 logger.info("Success criteria:")
-logger.info("nb tests executed > %s s " % criteria_nb_test)
-logger.info("test duration < %s s " % criteria_duration)
-logger.info("success rate > %s " % criteria_success_rate)
+logger.info("nb tests executed > %s s ", CRITERIA_NB_TEST)
+logger.info("test duration < %s s ", CRITERIA_DURATION)
+logger.info("success rate > %s ", CRITERIA_SUCCESS_RATE)
 
 # For all the versions
 for version in rp_utils.get_config('general.versions'):
-    for installer in installers:
+    for installer in INSTALLERS:
         # we consider the Tempest results of the last PERIOD days
         url = ("http://" + rp_utils.get_config('testapi.url') +
-               "?case=tempest_smoke_serial")
-        request = Request(url + '&period=' + str(PERIOD) +
-                          '&installer=' + installer +
-                          '&version=' + version)
-        logger.info("Search tempest_smoke_serial results for installer %s"
-                    " for version %s"
-                    % (installer, version))
+               "?case=tempest_smoke_serial&period=" + str(PERIOD) +
+               "&installer=" + installer + "&version=" + version)
+        request = Request(url)
+        logger.info(("Search tempest_smoke_serial results for installer %s"
+                     " for version %s"), installer, version)
         try:
             response = urlopen(request)
             k = response.read()
             results = json.loads(k)
-        except URLError as e:
-            logger.error("Error code: %s" % e)
-
+        except URLError as err:
+            logger.error("Error code: %s", err)
+        logger.debug("request sent: %s", url)
+        logger.debug("Results from API: %s", results)
         test_results = results['results']
-
+        logger.debug("Test results: %s", test_results)
         scenario_results = {}
         criteria = {}
         errors = {}
@@ -72,27 +71,37 @@ for version in rp_utils.get_config('general.versions'):
                 scenario_results[r['scenario']] = []
             scenario_results[r['scenario']].append(r)
 
+        logger.debug("Scenario results: %s", scenario_results)
+
         for s, s_result in scenario_results.items():
             scenario_results[s] = s_result[0:5]
             # For each scenario, we build a result object to deal with
             # results, criteria and error handling
             for result in scenario_results[s]:
                 result["start_date"] = result["start_date"].split(".")[0]
+                logger.debug("start_date= %s", result["start_date"])
 
                 # retrieve results
                 # ****************
                 nb_tests_run = result['details']['tests']
                 nb_tests_failed = result['details']['failures']
-                if nb_tests_run != 0:
-                    success_rate = 100 * ((int(nb_tests_run) -
+                logger.debug("nb_tests_run= %s", nb_tests_run)
+                logger.debug("nb_tests_failed= %s", nb_tests_failed)
+
+                try:
+                    success_rate = (100 * (int(nb_tests_run) -
                                            int(nb_tests_failed)) /
-                                          int(nb_tests_run))
-                else:
+                                    int(nb_tests_run))
+                except ZeroDivisionError:
                     success_rate = 0
 
                 result['details']["tests"] = nb_tests_run
                 result['details']["Success rate"] = str(success_rate) + "%"
 
+                logger.info("nb_tests_run= %s", result['details']["tests"])
+                logger.info("test rate = %s",
+                            result['details']["Success rate"])
+
                 # Criteria management
                 # *******************
                 crit_tests = False
@@ -100,11 +109,11 @@ for version in rp_utils.get_config('general.versions'):
                 crit_time = False
 
                 # Expect that at least 165 tests are run
-                if nb_tests_run >= criteria_nb_test:
+                if nb_tests_run >= CRITERIA_NB_TEST:
                     crit_tests = True
 
                 # Expect that at least 90% of success
-                if success_rate >= criteria_success_rate:
+                if success_rate >= CRITERIA_SUCCESS_RATE:
                     crit_rate = True
 
                 # Expect that the suite duration is inferior to 30m
@@ -114,28 +123,27 @@ for version in rp_utils.get_config('general.versions'):
                                                '%Y-%m-%d %H:%M:%S')
 
                 delta = stop_date - start_date
-                if (delta.total_seconds() < criteria_duration):
+
+                if delta.total_seconds() < CRITERIA_DURATION:
                     crit_time = True
 
                 result['criteria'] = {'tests': crit_tests,
                                       'Success rate': crit_rate,
                                       'duration': crit_time}
                 try:
-                    logger.debug("Scenario %s, Installer %s"
-                                 % (s_result[1]['scenario'], installer))
-                    logger.debug("Nb Test run: %s" % nb_tests_run)
-                    logger.debug("Test duration: %s"
-                                 % result['details']['duration'])
-                    logger.debug("Success rate: %s" % success_rate)
-                except:
+                    logger.debug("Nb Test run: %s", nb_tests_run)
+                    logger.debug("Test duration: %s", delta)
+                    logger.debug("Success rate: %s", success_rate)
+                except Exception:  # pylint: disable=broad-except
                     logger.error("Data format error")
 
                 # Error management
                 # ****************
                 try:
                     errors = result['details']['errors']
-                    result['errors'] = errors.replace('{0}', '')
-                except:
+                    logger.info("errors: %s", errors)
+                    result['errors'] = errors
+                except Exception:  # pylint: disable=broad-except
                     logger.error("Error field not present (Brahamputra runs?)")
 
         templateLoader = jinja2.FileSystemLoader(".")
@@ -146,7 +154,7 @@ for version in rp_utils.get_config('general.versions'):
         template = templateEnv.get_template(TEMPLATE_FILE)
 
         outputText = template.render(scenario_results=scenario_results,
-                                     items=items,
+                                     items=ITEMS,
                                      installer=installer)
 
         with open("./display/" + version +