flake 8 fix + add logger
[releng.git] / utils / test / reporting / functest / reporting-tempest.py
1 from urllib2 import Request, urlopen, URLError
2 import json
3 import jinja2
4 import reportingConf as conf
5 import reportingUtils as utils
6
7 installers = conf.installers
8 items = ["tests", "Success rate", "duration"]
9
10 PERIOD = conf.PERIOD
11 criteria_nb_test = 165
12 criteria_duration = 1800
13 criteria_success_rate = 90
14
15 logger = utils.getLogger("Tempest")
16 logger.info("************************************************")
17 logger.info("*   Generating reporting Tempest_smoke_serial  *")
18 logger.info("*   Data retention = %s days                   *" % PERIOD)
19 logger.info("*                                              *")
20 logger.info("************************************************")
21
22 logger.info("Success criteria: nb tests executed > %s s," +
23             "test duration < %s s," +
24             "success rate > %s " % (criteria_nb_test,
25                                     criteria_duration,
26                                     criteria_success_rate))
27
28 for installer in installers:
29     # we consider the Tempest results of the last PERIOD days
30     url = conf.URL_BASE + "?case=tempest_smoke_serial"
31     request = Request(url + '&period=' + str(PERIOD) +
32                       '&installer=' + installer + '&version=master')
33     logger.info("Search tempest_smoke_serial results for installer %s"
34                 % installer)
35     try:
36         response = urlopen(request)
37         k = response.read()
38         results = json.loads(k)
39     except URLError, e:
40         logger.error("Error code: %s" % e)
41
42     test_results = results['results']
43     test_results.reverse()
44
45     scenario_results = {}
46     criteria = {}
47     errors = {}
48
49     for r in test_results:
50         # Retrieve all the scenarios per installer
51         # In Brahmaputra use version
52         # Since Colorado use scenario
53         if not r['scenario'] in scenario_results.keys():
54             scenario_results[r['scenario']] = []
55         scenario_results[r['scenario']].append(r)
56
57     for s, s_result in scenario_results.items():
58         scenario_results[s] = s_result[0:5]
59         # For each scenario, we build a result object to deal with
60         # results, criteria and error handling
61         for result in scenario_results[s]:
62             result["start_date"] = result["start_date"].split(".")[0]
63
64             # retrieve results
65             # ****************
66             nb_tests_run = result['details']['tests']
67             nb_tests_failed = result['details']['failures']
68             if nb_tests_run != 0:
69                 success_rate = 100*(int(nb_tests_run) -
70                                     int(nb_tests_failed)) / int(nb_tests_run)
71             else:
72                 success_rate = 0
73
74             result['details']["tests"] = nb_tests_run
75             result['details']["Success rate"] = str(success_rate) + "%"
76
77             # Criteria management
78             # *******************
79             crit_tests = False
80             crit_rate = False
81             crit_time = False
82
83             # Expect that at least 165 tests are run
84             if nb_tests_run >= criteria_nb_test:
85                 crit_tests = True
86
87             # Expect that at least 90% of success
88             if success_rate >= criteria_success_rate:
89                 crit_rate = True
90
91             # Expect that the suite duration is inferior to 30m
92             if result['details']['duration'] < criteria_duration:
93                 crit_time = True
94
95             result['criteria'] = {'tests': crit_tests,
96                                   'Success rate': crit_rate,
97                                   'duration': crit_time}
98             try:
99                 logger.debug("Scenario %s, Installer %s"
100                              % (s_result[1]['scenario'], installer))
101                 logger.debug("Nb Test run: %s" % nb_tests_run)
102                 logger.debug("Test duration: %s"
103                              % result['details']['duration'])
104                 logger.debug("Success rate: %s" % success_rate)
105             except:
106                 logger.error("Data format error")
107
108             # Error management
109             # ****************
110             try:
111                 errors = result['details']['errors']
112                 result['errors'] = errors.replace('{0}', '')
113             except:
114                 logger.error("Error field not present (Brahamputra runs?)")
115
116     templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
117     templateEnv = jinja2.Environment(loader=templateLoader)
118
119     TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
120     template = templateEnv.get_template(TEMPLATE_FILE)
121
122     outputText = template.render(scenario_results=scenario_results,
123                                  items=items,
124                                  installer=installer)
125
126     with open(conf.REPORTING_PATH + "/release/master/index-tempest-" +
127               installer + ".html", "wb") as fh:
128         fh.write(outputText)
129 logger.info("Tempest automatic reporting succesfully generated.")