Merge "apex, cperf: Adds cperf job to Apex"
[releng.git] / utils / test / reporting / functest / reporting-tempest.py
1 from urllib2 import Request, urlopen, URLError
2 import json
3 import jinja2
4 import reportingConf as conf
5 import reportingUtils as utils
6
7 installers = conf.installers
8 items = ["tests", "Success rate", "duration"]
9
10 PERIOD = conf.PERIOD
11 criteria_nb_test = 165
12 criteria_duration = 1800
13 criteria_success_rate = 90
14
15 logger = utils.getLogger("Tempest")
16 logger.info("************************************************")
17 logger.info("*   Generating reporting Tempest_smoke_serial  *")
18 logger.info("*   Data retention = %s days                   *" % PERIOD)
19 logger.info("*                                              *")
20 logger.info("************************************************")
21
22 logger.info("Success criteria:")
23 logger.info("nb tests executed > %s s " % criteria_nb_test)
24 logger.info("test duration < %s s " % criteria_duration)
25 logger.info("success rate > %s " % criteria_success_rate)
26
27 for installer in installers:
28     # we consider the Tempest results of the last PERIOD days
29     url = conf.URL_BASE + "?case=tempest_smoke_serial"
30     request = Request(url + '&period=' + str(PERIOD) +
31                       '&installer=' + installer + '&version=master')
32     logger.info("Search tempest_smoke_serial results for installer %s"
33                 % installer)
34     try:
35         response = urlopen(request)
36         k = response.read()
37         results = json.loads(k)
38     except URLError, e:
39         logger.error("Error code: %s" % e)
40
41     test_results = results['results']
42
43     scenario_results = {}
44     criteria = {}
45     errors = {}
46
47     for r in test_results:
48         # Retrieve all the scenarios per installer
49         # In Brahmaputra use version
50         # Since Colorado use scenario
51         if not r['scenario'] in scenario_results.keys():
52             scenario_results[r['scenario']] = []
53         scenario_results[r['scenario']].append(r)
54
55     for s, s_result in scenario_results.items():
56         scenario_results[s] = s_result[0:5]
57         # For each scenario, we build a result object to deal with
58         # results, criteria and error handling
59         for result in scenario_results[s]:
60             result["start_date"] = result["start_date"].split(".")[0]
61
62             # retrieve results
63             # ****************
64             nb_tests_run = result['details']['tests']
65             nb_tests_failed = result['details']['failures']
66             if nb_tests_run != 0:
67                 success_rate = 100*(int(nb_tests_run) -
68                                     int(nb_tests_failed)) / int(nb_tests_run)
69             else:
70                 success_rate = 0
71
72             result['details']["tests"] = nb_tests_run
73             result['details']["Success rate"] = str(success_rate) + "%"
74
75             # Criteria management
76             # *******************
77             crit_tests = False
78             crit_rate = False
79             crit_time = False
80
81             # Expect that at least 165 tests are run
82             if nb_tests_run >= criteria_nb_test:
83                 crit_tests = True
84
85             # Expect that at least 90% of success
86             if success_rate >= criteria_success_rate:
87                 crit_rate = True
88
89             # Expect that the suite duration is inferior to 30m
90             if result['details']['duration'] < criteria_duration:
91                 crit_time = True
92
93             result['criteria'] = {'tests': crit_tests,
94                                   'Success rate': crit_rate,
95                                   'duration': crit_time}
96             try:
97                 logger.debug("Scenario %s, Installer %s"
98                              % (s_result[1]['scenario'], installer))
99                 logger.debug("Nb Test run: %s" % nb_tests_run)
100                 logger.debug("Test duration: %s"
101                              % result['details']['duration'])
102                 logger.debug("Success rate: %s" % success_rate)
103             except:
104                 logger.error("Data format error")
105
106             # Error management
107             # ****************
108             try:
109                 errors = result['details']['errors']
110                 result['errors'] = errors.replace('{0}', '')
111             except:
112                 logger.error("Error field not present (Brahamputra runs?)")
113
114     templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
115     templateEnv = jinja2.Environment(loader=templateLoader)
116
117     TEMPLATE_FILE = "/template/index-tempest-tmpl.html"
118     template = templateEnv.get_template(TEMPLATE_FILE)
119
120     outputText = template.render(scenario_results=scenario_results,
121                                  items=items,
122                                  installer=installer)
123
124     with open(conf.REPORTING_PATH + "/release/master/index-tempest-" +
125               installer + ".html", "wb") as fh:
126         fh.write(outputText)
127 logger.info("Tempest automatic reporting succesfully generated.")