bug fix: reporting Tempest
[releng.git] / utils / test / reporting / reporting / functest / reporting-tempest.py
1 #!/usr/bin/env python
2
3 # Copyright (c) 2017 Orange and others.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are made available under the terms of the Apache License, Version 2.0
7 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
9 # SPDX-license-identifier: Apache-2.0
10
11 from datetime import datetime
12 import json
13 import os
14
15 from urllib2 import Request, urlopen, URLError
16 import jinja2
17
18 import reporting.utils.reporting_utils as rp_utils
19
20 INSTALLERS = rp_utils.get_config('general.installers')
21 ITEMS = ["tests", "Success rate", "duration"]
22
23 CURRENT_DIR = os.getcwd()
24
25 PERIOD = rp_utils.get_config('general.period')
26 CRITERIA_NB_TEST = 100
27 CRITERIA_DURATION = 1800
28 CRITERIA_SUCCESS_RATE = 100
29
30 logger = rp_utils.getLogger("Tempest")
31 logger.info("************************************************")
32 logger.info("*   Generating reporting Tempest_smoke_serial  *")
33 logger.info("*   Data retention = %s days                   *", PERIOD)
34 logger.info("*                                              *")
35 logger.info("************************************************")
36
37 logger.info("Success criteria:")
38 logger.info("nb tests executed > %s s ", CRITERIA_NB_TEST)
39 logger.info("test duration < %s s ", CRITERIA_DURATION)
40 logger.info("success rate > %s ", CRITERIA_SUCCESS_RATE)
41
42 # For all the versions
43 for version in rp_utils.get_config('general.versions'):
44     for installer in INSTALLERS:
45         # we consider the Tempest results of the last PERIOD days
46         url = ("http://" + rp_utils.get_config('testapi.url') +
47                "?case=tempest_smoke_serial&period=" + str(PERIOD) +
48                "&installer=" + installer + "&version=" + version)
49         request = Request(url)
50         logger.info(("Search tempest_smoke_serial results for installer %s"
51                      " for version %s"), installer, version)
52         try:
53             response = urlopen(request)
54             k = response.read()
55             results = json.loads(k)
56         except URLError as err:
57             logger.error("Error code: %s", err)
58         logger.debug("request sent: %s", url)
59         logger.debug("Results from API: %s", results)
60         test_results = results['results']
61         logger.debug("Test results: %s", test_results)
62         scenario_results = {}
63         criteria = {}
64         errors = {}
65
66         for r in test_results:
67             # Retrieve all the scenarios per installer
68             # In Brahmaputra use version
69             # Since Colorado use scenario
70             if not r['scenario'] in scenario_results.keys():
71                 scenario_results[r['scenario']] = []
72             scenario_results[r['scenario']].append(r)
73
74         logger.debug("Scenario results: %s", scenario_results)
75
76         for s, s_result in scenario_results.items():
77             scenario_results[s] = s_result[0:5]
78             # For each scenario, we build a result object to deal with
79             # results, criteria and error handling
80             for result in scenario_results[s]:
81                 result["start_date"] = result["start_date"].split(".")[0]
82                 logger.debug("start_date= %s", result["start_date"])
83
84                 # retrieve results
85                 # ****************
86                 nb_tests_run = result['details']['tests']
87                 nb_tests_failed = result['details']['failures']
88                 logger.debug("nb_tests_run= %s", nb_tests_run)
89                 logger.debug("nb_tests_failed= %s", nb_tests_failed)
90
91                 try:
92                     success_rate = (100 * (int(nb_tests_run) -
93                                            int(nb_tests_failed)) /
94                                     int(nb_tests_run))
95                 except ZeroDivisionError:
96                     success_rate = 0
97
98                 result['details']["tests"] = nb_tests_run
99                 result['details']["Success rate"] = str(success_rate) + "%"
100
101                 logger.info("nb_tests_run= %s", result['details']["tests"])
102                 logger.info("test rate = %s",
103                             result['details']["Success rate"])
104
105                 # Criteria management
106                 # *******************
107                 crit_tests = False
108                 crit_rate = False
109                 crit_time = False
110
111                 # Expect that at least 165 tests are run
112                 if nb_tests_run >= CRITERIA_NB_TEST:
113                     crit_tests = True
114
115                 # Expect that at least 90% of success
116                 if success_rate >= CRITERIA_SUCCESS_RATE:
117                     crit_rate = True
118
119                 # Expect that the suite duration is inferior to 30m
120                 stop_date = datetime.strptime(result['stop_date'],
121                                               '%Y-%m-%d %H:%M:%S')
122                 start_date = datetime.strptime(result['start_date'],
123                                                '%Y-%m-%d %H:%M:%S')
124
125                 delta = stop_date - start_date
126
127                 if delta.total_seconds() < CRITERIA_DURATION:
128                     crit_time = True
129
130                 result['criteria'] = {'tests': crit_tests,
131                                       'Success rate': crit_rate,
132                                       'duration': crit_time}
133                 try:
134                     logger.debug("Nb Test run: %s", nb_tests_run)
135                     logger.debug("Test duration: %s", delta)
136                     logger.debug("Success rate: %s", success_rate)
137                 except Exception:  # pylint: disable=broad-except
138                     logger.error("Data format error")
139
140                 # Error management
141                 # ****************
142                 try:
143                     errors = result['details']['errors']
144                     logger.info("errors: %s", errors)
145                     result['errors'] = errors
146                 except Exception:  # pylint: disable=broad-except
147                     logger.error("Error field not present (Brahamputra runs?)")
148
149         templateLoader = jinja2.FileSystemLoader(".")
150         templateEnv = jinja2.Environment(loader=templateLoader,
151                                          autoescape=True)
152
153         TEMPLATE_FILE = "./reporting/functest/template/index-tempest-tmpl.html"
154         template = templateEnv.get_template(TEMPLATE_FILE)
155
156         outputText = template.render(scenario_results=scenario_results,
157                                      items=ITEMS,
158                                      installer=installer)
159
160         with open("./display/" + version +
161                   "/functest/tempest-" + installer + ".html", "wb") as fh:
162             fh.write(outputText)
163 logger.info("Tempest automatic reporting succesfully generated.")