e6ee8d32106e001b845c283e174634838e75105d
[releng.git] / utils / test / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import jinja2
11 import os
12 import requests
13 import sys
14 import time
15 import yaml
16
17 import reportingUtils as utils
18 import reportingConf as conf
19 import testCase as tc
20 import scenarioResult as sr
21
22 testCases4Validation = []
23 otherTestCases = []
24
25 # init just tempest to get the list of scenarios
26 # as all the scenarios run Tempest
27 tempest = tc.TestCase("Tempest", "functest", -1)
28
29 # Retrieve the Functest configuration to detect which tests are relevant
30 # according to the installer, scenario
31 # cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
32 cf = "https://git.opnfv.org/cgit/functest/plain/ci/testcases.yaml"
33 response = requests.get(cf)
34 functest_yaml_config = yaml.load(response.text)
35
36 print "****************************************"
37 print "*   Generating reporting.....          *"
38 print ("*   Data retention = %s days           *" % conf.PERIOD)
39 print "*                                      *"
40 print "****************************************"
41
42 # Retrieve test cases of Tier 1 (smoke)
43 config_tiers = functest_yaml_config.get("tiers")
44
45 # we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
46 # to validate scenarios
47 # Tier > 4 are not used to validate scenarios but we display the results anyway
48 # tricky thing for the API as some tests are Functest tests
49 # other tests are declared directly in the feature projects
50 for tier in config_tiers:
51     if tier['order'] > 0 and tier['order'] < 3:
52         for case in tier['testcases']:
53             testCases4Validation.append(tc.TestCase(case['name'],
54                                                     "functest",
55                                                     case['dependencies']))
56     elif tier['order'] == 3:
57         for case in tier['testcases']:
58             testCases4Validation.append(tc.TestCase(case['name'],
59                                                     case['name'],
60                                                     case['dependencies']))
61     elif tier['order'] > 3:
62         for case in tier['testcases']:
63             otherTestCases.append(tc.TestCase(case['name'],
64                                               "functest",
65                                               case['dependencies']))
66
67 # For all the versions
68 for version in conf.versions:
69     # For all the installers
70     for installer in conf.installers:
71         # get scenarios
72         scenario_results = utils.getScenarios(tempest, installer, version)
73         scenario_stats = utils.getScenarioStats(scenario_results)
74         items = {}
75         scenario_result_criteria = {}
76
77         # For all the scenarios get results
78         for s, s_result in scenario_results.items():
79             # Green or Red light for a given scenario
80             nb_test_runnable_for_this_scenario = 0
81             scenario_score = 0
82
83             testCases2BeDisplayed = []
84             # Check if test case is runnable / installer, scenario
85             # for the test case used for Scenario validation
86             try:
87                 print ("---------------------------------")
88                 print ("installer %s, version %s, scenario %s:" %
89                        (installer, version, s))
90
91                 # 1) Manage the test cases for the scenario validation
92                 # concretely Tiers 0-3
93                 for test_case in testCases4Validation:
94                     test_case.checkRunnable(installer, s,
95                                             test_case.getConstraints())
96                     print ("testcase %s is %s" % (test_case.getName(),
97                                                   test_case.isRunnable))
98                     time.sleep(1)
99                     if test_case.isRunnable:
100                         dbName = test_case.getDbName()
101                         name = test_case.getName()
102                         project = test_case.getProject()
103                         nb_test_runnable_for_this_scenario += 1
104                         print (" Searching results for case %s " %
105                                (dbName))
106                         result = utils.getResult(dbName, installer, s, version)
107                         print " >>>> Test result=" + str(result)
108                         test_case.setCriteria(result)
109                         test_case.setIsRunnable(True)
110                         testCases2BeDisplayed.append(tc.TestCase(name,
111                                                                  project,
112                                                                  "",
113                                                                  result,
114                                                                  True,
115                                                                  1))
116                         scenario_score = scenario_score + result
117
118                 # 2) Manage the test cases for the scenario qualification
119                 # concretely Tiers > 3
120                 for test_case in otherTestCases:
121                     test_case.checkRunnable(installer, s,
122                                             test_case.getConstraints())
123                     print ("testcase %s is %s" % (test_case.getName(),
124                                                   test_case.isRunnable))
125                     time.sleep(1)
126                     if test_case.isRunnable:
127                         dbName = test_case.getDbName()
128                         name = test_case.getName()
129                         project = test_case.getProject()
130                         print (" Searching results for case %s " %
131                                (dbName))
132                         result = utils.getResult(dbName, installer, s, version)
133                         test_case.setCriteria(result)
134                         test_case.setIsRunnable(True)
135                         testCases2BeDisplayed.append(tc.TestCase(name,
136                                                                  project,
137                                                                  "",
138                                                                  result,
139                                                                  True,
140                                                                  4))
141
142                     items[s] = testCases2BeDisplayed
143             except:
144                 print ("Error: installer %s, version %s, scenario %s" %
145                        (installer, version, s))
146                 print "No data available , error %s " % (sys.exc_info()[0])
147
148             # **********************************************
149             # Evaluate the results for scenario validation
150             # **********************************************
151             # the validation criteria = nb runnable tests x 3
152             # because each test case = 0,1,2 or 3
153             scenario_criteria = nb_test_runnable_for_this_scenario * 3
154             # if 0 runnable tests set criteria at a high value
155             if scenario_criteria < 1:
156                 scenario_criteria = conf.MAX_SCENARIO_CRITERIA
157
158             s_score = str(scenario_score) + "/" + str(scenario_criteria)
159             s_status = "KO"
160             if scenario_score < scenario_criteria:
161                 print (">>>> scenario not OK, score = %s/%s" %
162                        (scenario_score, scenario_criteria))
163                 s_status = "KO"
164             else:
165                 print ">>>>> scenario OK, save the information"
166                 s_status = "OK"
167                 path_validation_file = ("./release/" + version +
168                                         "/validated_scenario_history.txt")
169                 with open(path_validation_file, "a") as f:
170                     time_format = "%Y-%m-%d %H:%M"
171                     info = (datetime.datetime.now().strftime(time_format) +
172                             ";" + installer + ";" + s + "\n")
173                     f.write(info)
174
175             scenario_result_criteria[s] = sr.ScenarioResult(s_status, s_score)
176             print "--------------------------"
177
178         templateLoader = jinja2.FileSystemLoader(os.path.dirname
179                                                  (os.path.abspath
180                                                   (__file__)))
181         templateEnv = jinja2.Environment(loader=templateLoader)
182
183         TEMPLATE_FILE = "./template/index-status-tmpl.html"
184         template = templateEnv.get_template(TEMPLATE_FILE)
185
186         outputText = template.render(scenario_stats=scenario_stats,
187                                      scenario_results=scenario_result_criteria,
188                                      items=items,
189                                      installer=installer,
190                                      period=conf.PERIOD,
191                                      version=version)
192
193         with open("./release/" + version +
194                   "/index-status-" + installer + ".html", "wb") as fh:
195             fh.write(outputText)