Merge "fuel: Enable triggers of Colorado on zte-pod1 and zte-pod3"
[releng.git] / utils / test / reporting / functest / reporting-tempest.py
1 from urllib2 import Request, urlopen, URLError
2 import json
3 import jinja2
4 import reportingConf as conf
5 import reportingUtils as utils
6
7 installers = conf.installers
8 items = ["tests", "Success rate", "duration"]
9
10 PERIOD = conf.PERIOD
11 criteria_nb_test = 165
12 criteria_duration = 1800
13 criteria_success_rate = 90
14
15 logger = utils.getLogger("Tempest")
16 logger.info("************************************************")
17 logger.info("*   Generating reporting Tempest_smoke_serial  *")
18 logger.info("*   Data retention = %s days                   *" % PERIOD)
19 logger.info("*                                              *")
20 logger.info("************************************************")
21
22 logger.info("Success criteria:")
23 logger.info("nb tests executed > %s s " % criteria_nb_test)
24 logger.info("test duration < %s s " % criteria_duration)
25 logger.info("success rate > %s " % criteria_success_rate)
26
27 # For all the versions
28 for version in conf.versions:
29     for installer in conf.installers:
30         # we consider the Tempest results of the last PERIOD days
31         url = conf.URL_BASE + "?case=tempest_smoke_serial"
32         request = Request(url + '&period=' + str(PERIOD) +
33                           '&installer=' + installer +
34                           '&version=' + version)
35         logger.info("Search tempest_smoke_serial results for installer %s"
36                     " for version %s"
37                     % (installer, version))
38         try:
39             response = urlopen(request)
40             k = response.read()
41             results = json.loads(k)
42         except URLError, e:
43             logger.error("Error code: %s" % e)
44
45         test_results = results['results']
46
47         scenario_results = {}
48         criteria = {}
49         errors = {}
50
51         for r in test_results:
52             # Retrieve all the scenarios per installer
53             # In Brahmaputra use version
54             # Since Colorado use scenario
55             if not r['scenario'] in scenario_results.keys():
56                 scenario_results[r['scenario']] = []
57             scenario_results[r['scenario']].append(r)
58
59         for s, s_result in scenario_results.items():
60             scenario_results[s] = s_result[0:5]
61             # For each scenario, we build a result object to deal with
62             # results, criteria and error handling
63             for result in scenario_results[s]:
64                 result["start_date"] = result["start_date"].split(".")[0]
65
66                 # retrieve results
67                 # ****************
68                 nb_tests_run = result['details']['tests']
69                 nb_tests_failed = result['details']['failures']
70                 if nb_tests_run != 0:
71                     success_rate = 100*(int(nb_tests_run) -
72                                         int(nb_tests_failed)) / int(nb_tests_run)
73                 else:
74                     success_rate = 0
75
76                 result['details']["tests"] = nb_tests_run
77                 result['details']["Success rate"] = str(success_rate) + "%"
78
79                 # Criteria management
80                 # *******************
81                 crit_tests = False
82                 crit_rate = False
83                 crit_time = False
84
85                 # Expect that at least 165 tests are run
86                 if nb_tests_run >= criteria_nb_test:
87                     crit_tests = True
88
89                 # Expect that at least 90% of success
90                 if success_rate >= criteria_success_rate:
91                     crit_rate = True
92
93                 # Expect that the suite duration is inferior to 30m
94                 if result['details']['duration'] < criteria_duration:
95                     crit_time = True
96
97                 result['criteria'] = {'tests': crit_tests,
98                                       'Success rate': crit_rate,
99                                       'duration': crit_time}
100                 try:
101                     logger.debug("Scenario %s, Installer %s"
102                                  % (s_result[1]['scenario'], installer))
103                     logger.debug("Nb Test run: %s" % nb_tests_run)
104                     logger.debug("Test duration: %s"
105                                  % result['details']['duration'])
106                     logger.debug("Success rate: %s" % success_rate)
107                 except:
108                     logger.error("Data format error")
109
110                 # Error management
111                 # ****************
112                 try:
113                     errors = result['details']['errors']
114                     result['errors'] = errors.replace('{0}', '')
115                 except:
116                     logger.error("Error field not present (Brahamputra runs?)")
117
118         templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
119         templateEnv = jinja2.Environment(loader=templateLoader)
120
121         TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
122         template = templateEnv.get_template(TEMPLATE_FILE)
123
124         outputText = template.render(scenario_results=scenario_results,
125                                      items=items,
126                                      installer=installer)
127
128         with open(conf.REPORTING_PATH + "/release/" + version +
129                   "/index-tempest-" + installer + ".html", "wb") as fh:
130             fh.write(outputText)
131 logger.info("Tempest automatic reporting succesfully generated.")