6e6585a321c1b787bd05405afc468981beaa91e3
[releng.git] / utils / test / reporting / functest / reporting-tempest.py
1 from urllib2 import Request, urlopen, URLError
2 import json
3 import jinja2
4 import os
5
6 # manage conf
7 import utils.reporting_utils as rp_utils
8
9 installers = rp_utils.get_config('general.installers')
10 items = ["tests", "Success rate", "duration"]
11
12 CURRENT_DIR = os.getcwd()
13
14 PERIOD = rp_utils.get_config('general.period')
15 criteria_nb_test = 165
16 criteria_duration = 1800
17 criteria_success_rate = 90
18
19 logger = rp_utils.getLogger("Tempest")
20 logger.info("************************************************")
21 logger.info("*   Generating reporting Tempest_smoke_serial  *")
22 logger.info("*   Data retention = %s days                   *" % PERIOD)
23 logger.info("*                                              *")
24 logger.info("************************************************")
25
26 logger.info("Success criteria:")
27 logger.info("nb tests executed > %s s " % criteria_nb_test)
28 logger.info("test duration < %s s " % criteria_duration)
29 logger.info("success rate > %s " % criteria_success_rate)
30
31 # For all the versions
32 for version in rp_utils.get_config('general.versions'):
33     for installer in installers:
34         # we consider the Tempest results of the last PERIOD days
35         url = ("http://" + rp_utils.get_config('testapi.url') +
36                "?case=tempest_smoke_serial")
37         request = Request(url + '&period=' + str(PERIOD) +
38                           '&installer=' + installer +
39                           '&version=' + version)
40         logger.info("Search tempest_smoke_serial results for installer %s"
41                     " for version %s"
42                     % (installer, version))
43         try:
44             response = urlopen(request)
45             k = response.read()
46             results = json.loads(k)
47         except URLError as e:
48             logger.error("Error code: %s" % e)
49
50         test_results = results['results']
51
52         scenario_results = {}
53         criteria = {}
54         errors = {}
55
56         for r in test_results:
57             # Retrieve all the scenarios per installer
58             # In Brahmaputra use version
59             # Since Colorado use scenario
60             if not r['scenario'] in scenario_results.keys():
61                 scenario_results[r['scenario']] = []
62             scenario_results[r['scenario']].append(r)
63
64         for s, s_result in scenario_results.items():
65             scenario_results[s] = s_result[0:5]
66             # For each scenario, we build a result object to deal with
67             # results, criteria and error handling
68             for result in scenario_results[s]:
69                 result["start_date"] = result["start_date"].split(".")[0]
70
71                 # retrieve results
72                 # ****************
73                 nb_tests_run = result['details']['tests']
74                 nb_tests_failed = result['details']['failures']
75                 if nb_tests_run != 0:
76                     success_rate = 100 * ((int(nb_tests_run) -
77                                            int(nb_tests_failed)) /
78                                           int(nb_tests_run))
79                 else:
80                     success_rate = 0
81
82                 result['details']["tests"] = nb_tests_run
83                 result['details']["Success rate"] = str(success_rate) + "%"
84
85                 # Criteria management
86                 # *******************
87                 crit_tests = False
88                 crit_rate = False
89                 crit_time = False
90
91                 # Expect that at least 165 tests are run
92                 if nb_tests_run >= criteria_nb_test:
93                     crit_tests = True
94
95                 # Expect that at least 90% of success
96                 if success_rate >= criteria_success_rate:
97                     crit_rate = True
98
99                 # Expect that the suite duration is inferior to 30m
100                 if result['details']['duration'] < criteria_duration:
101                     crit_time = True
102
103                 result['criteria'] = {'tests': crit_tests,
104                                       'Success rate': crit_rate,
105                                       'duration': crit_time}
106                 try:
107                     logger.debug("Scenario %s, Installer %s"
108                                  % (s_result[1]['scenario'], installer))
109                     logger.debug("Nb Test run: %s" % nb_tests_run)
110                     logger.debug("Test duration: %s"
111                                  % result['details']['duration'])
112                     logger.debug("Success rate: %s" % success_rate)
113                 except:
114                     logger.error("Data format error")
115
116                 # Error management
117                 # ****************
118                 try:
119                     errors = result['details']['errors']
120                     result['errors'] = errors.replace('{0}', '')
121                 except:
122                     logger.error("Error field not present (Brahamputra runs?)")
123
124         templateLoader = jinja2.FileSystemLoader(".")
125         templateEnv = jinja2.Environment(loader=templateLoader,
126                                          autoescape=True)
127
128         TEMPLATE_FILE = "./functest/template/index-tempest-tmpl.html"
129         template = templateEnv.get_template(TEMPLATE_FILE)
130
131         outputText = template.render(scenario_results=scenario_results,
132                                      items=items,
133                                      installer=installer)
134
135         with open("./display/" + version +
136                   "/functest/tempest-" + installer + ".html", "wb") as fh:
137             fh.write(outputText)
138 logger.info("Tempest automatic reporting succesfully generated.")