Functest reporting refactoring
[releng.git] / utils / test / reporting / functest / reporting-tempest.py
1 from urllib2 import Request, urlopen, URLError
2 import json
3 import jinja2
4 import os
5
6 installers = ["apex", "compass", "fuel", "joid"]
7 items = ["tests", "Success rate", "duration"]
8
9 for installer in installers:
10     # we consider the Tempest results of the last 7 days
11     url = "http://testresults.opnfv.org/testapi/results?case=Tempest"
12     request = Request(url + '&period=7&installer=' + installer)
13
14     try:
15         response = urlopen(request)
16         k = response.read()
17         results = json.loads(k)
18     except URLError, e:
19         print 'No kittez. Got an error code:', e
20
21     test_results = results['test_results']
22     test_results.reverse()
23
24     scenario_results = {}
25     criteria = {}
26     errors = {}
27
28     for r in test_results:
29         # Retrieve all the scenarios per installer
30         if not r['version'] in scenario_results.keys():
31             scenario_results[r['version']] = []
32         scenario_results[r['version']].append(r)
33
34     for s, s_result in scenario_results.items():
35         scenario_results[s] = s_result[0:5]
36         # For each scenario, we build a result object to deal with
37         # results, criteria and error handling
38         for result in scenario_results[s]:
39             result["creation_date"] = result["creation_date"].split(".")[0]
40
41             # retrieve results
42             # ****************
43             nb_tests_run = result['details']['tests']
44             if nb_tests_run != 0:
45                 success_rate = 100*(int(result['details']['tests']) - int(result['details']['failures']))/int(result['details']['tests'])
46             else:
47                 success_rate = 0
48
49             result['details']["tests"] = nb_tests_run
50             result['details']["Success rate"] = str(success_rate) + "%"
51
52             # Criteria management
53             # *******************
54             crit_tests = False
55             crit_rate = False
56             crit_time = False
57
58             # Expect that at least 200 tests are run
59             if nb_tests_run >= 200:
60                 crit_tests = True
61
62             # Expect that at least 90% of success
63             if success_rate >= 90:
64                 crit_rate = True
65
66             # Expect that the suite duration is inferior to 45m
67             if result['details']['duration'] < 2700:
68                 crit_time = True
69
70             result['criteria'] = {'tests': crit_tests,
71                                   'Success rate': crit_rate,
72                                   'duration': crit_time}
73
74             # error management
75             # ****************
76
77             # TODO get information from artefact based on build tag
78             # to identify errors of the associated run
79             # build tag needed to wget errors on the artifacts
80             # the idea is to list the tests in errors and provide the link
81             # towards complete artifact
82             # another option will be to put the errors in the DB
83             # (in the detail section)...
84             result['errors'] = {'tests': "",
85                                 'Success rate': "",
86                                 'duration': ""}
87
88     templateLoader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
89     templateEnv = jinja2.Environment(loader=templateLoader)
90
91     TEMPLATE_FILE = "./template/index-tempest-tmpl.html"
92     template = templateEnv.get_template(TEMPLATE_FILE)
93
94     outputText = template.render(scenario_results=scenario_results,
95                                  items=items,
96                                  installer=installer)
97
98     with open("./release/index-tempest-" + installer + ".html", "wb") as fh:
99         fh.write(outputText)