Merge "Add short_description field to testcase in test API"
[releng.git] / utils / test / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import jinja2
11 import os
12 import sys
13 import time
14
15 import testCase as tc
16 import scenarioResult as sr
17
18 # manage conf
19 import utils.reporting_utils as rp_utils
20
21 # Logger
22 logger = rp_utils.getLogger("Functest-Status")
23
24 # Initialization
25 testValid = []
26 otherTestCases = []
27 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
28
29 # init just connection_check to get the list of scenarios
30 # as all the scenarios run connection_check
31 healthcheck = tc.TestCase("connection_check", "functest", -1)
32
33 # Retrieve the Functest configuration to detect which tests are relevant
34 # according to the installer, scenario
35 cf = rp_utils.get_config('functest.test_conf')
36 period = rp_utils.get_config('general.period')
37 versions = rp_utils.get_config('general.versions')
38 installers = rp_utils.get_config('general.installers')
39 blacklist = rp_utils.get_config('functest.blacklist')
40 log_level = rp_utils.get_config('general.log.log_level')
41 exclude_noha = rp_utils.get_config('functest.exclude_noha')
42 exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
43
44 functest_yaml_config = rp_utils.getFunctestConfig()
45
46 logger.info("*******************************************")
47 logger.info("*                                         *")
48 logger.info("*   Generating reporting scenario status  *")
49 logger.info("*   Data retention: %s days               *" % period)
50 logger.info("*   Log level: %s                         *" % log_level)
51 logger.info("*                                         *")
52 logger.info("*   Virtual PODs exluded: %s              *" % exclude_virtual)
53 logger.info("*   NOHA scenarios excluded: %s           *" % exclude_noha)
54 logger.info("*                                         *")
55 logger.info("*******************************************")
56
57 # Retrieve test cases of Tier 1 (smoke)
58 config_tiers = functest_yaml_config.get("tiers")
59
60 # we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
61 # to validate scenarios
62 # Tier > 2 are not used to validate scenarios but we display the results anyway
63 # tricky thing for the API as some tests are Functest tests
64 # other tests are declared directly in the feature projects
65 for tier in config_tiers:
66     if tier['order'] >= 0 and tier['order'] < 2:
67         for case in tier['testcases']:
68             if case['case_name'] not in blacklist:
69                 testValid.append(tc.TestCase(case['case_name'],
70                                              "functest",
71                                              case['dependencies']))
72     elif tier['order'] == 2:
73         for case in tier['testcases']:
74             if case['case_name'] not in blacklist:
75                 testValid.append(tc.TestCase(case['case_name'],
76                                              case['case_name'],
77                                              case['dependencies']))
78     elif tier['order'] > 2:
79         for case in tier['testcases']:
80             if case['case_name'] not in blacklist:
81                 otherTestCases.append(tc.TestCase(case['case_name'],
82                                                   "functest",
83                                                   case['dependencies']))
84
85 logger.debug("Functest reporting start")
86
87 # For all the versions
88 for version in versions:
89     # For all the installers
90     scenario_directory = "./display/" + version + "/functest/"
91     scenario_file_name = scenario_directory + "scenario_history.txt"
92
93     # check that the directory exists, if not create it
94     # (first run on new version)
95     if not os.path.exists(scenario_directory):
96         os.makedirs(scenario_directory)
97
98     # initiate scenario file if it does not exist
99     if not os.path.isfile(scenario_file_name):
100         with open(scenario_file_name, "a") as my_file:
101             logger.debug("Create scenario file: %s" % scenario_file_name)
102             my_file.write("date,scenario,installer,detail,score\n")
103
104     for installer in installers:
105
106         # get scenarios
107         scenario_results = rp_utils.getScenarios(healthcheck,
108                                                  installer,
109                                                  version)
110
111         # get nb of supported architecture (x86, aarch64)
112         architectures = rp_utils.getArchitectures(scenario_results)
113         logger.info("Supported architectures: {}".format(architectures))
114
115         for architecture in architectures:
116             logger.info("architecture: {}".format(architecture))
117             # Consider only the results for the selected architecture
118             # i.e drop x86 for aarch64 and vice versa
119             filter_results = rp_utils.filterArchitecture(scenario_results,
120                                                          architecture)
121             scenario_stats = rp_utils.getScenarioStats(filter_results)
122             items = {}
123             scenario_result_criteria = {}
124
125             # in case of more than 1 architecture supported
126             # precise the architecture
127             installer_display = installer
128             if (len(architectures) > 1):
129                 installer_display = installer + "@" + architecture
130
131             # For all the scenarios get results
132             for s, s_result in filter_results.items():
133                 logger.info("---------------------------------")
134                 logger.info("installer %s, version %s, scenario %s:" %
135                             (installer, version, s))
136                 logger.debug("Scenario results: %s" % s_result)
137
138                 # Green or Red light for a given scenario
139                 nb_test_runnable_for_this_scenario = 0
140                 scenario_score = 0
141                 # url of the last jenkins log corresponding to a given
142                 # scenario
143                 s_url = ""
144                 if len(s_result) > 0:
145                     build_tag = s_result[len(s_result)-1]['build_tag']
146                     logger.debug("Build tag: %s" % build_tag)
147                     s_url = rp_utils.getJenkinsUrl(build_tag)
148                     if s_url is None:
149                         s_url = "http://testresultS.opnfv.org/reporting"
150                     logger.info("last jenkins url: %s" % s_url)
151                 testCases2BeDisplayed = []
152                 # Check if test case is runnable / installer, scenario
153                 # for the test case used for Scenario validation
154                 try:
155                     # 1) Manage the test cases for the scenario validation
156                     # concretely Tiers 0-3
157                     for test_case in testValid:
158                         test_case.checkRunnable(installer, s,
159                                                 test_case.getConstraints())
160                         logger.debug("testcase %s (%s) is %s" %
161                                      (test_case.getDisplayName(),
162                                       test_case.getName(),
163                                       test_case.isRunnable))
164                         time.sleep(1)
165                         if test_case.isRunnable:
166                             dbName = test_case.getDbName()
167                             name = test_case.getName()
168                             displayName = test_case.getDisplayName()
169                             project = test_case.getProject()
170                             nb_test_runnable_for_this_scenario += 1
171                             logger.info(" Searching results for case %s " %
172                                         (displayName))
173                             result = rp_utils.getResult(dbName, installer,
174                                                         s, version)
175                             # if no result set the value to 0
176                             if result < 0:
177                                 result = 0
178                             logger.info(" >>>> Test score = " + str(result))
179                             test_case.setCriteria(result)
180                             test_case.setIsRunnable(True)
181                             testCases2BeDisplayed.append(tc.TestCase(name,
182                                                                      project,
183                                                                      "",
184                                                                      result,
185                                                                      True,
186                                                                      1))
187                             scenario_score = scenario_score + result
188
189                     # 2) Manage the test cases for the scenario qualification
190                     # concretely Tiers > 3
191                     for test_case in otherTestCases:
192                         test_case.checkRunnable(installer, s,
193                                                 test_case.getConstraints())
194                         logger.debug("testcase %s (%s) is %s" %
195                                      (test_case.getDisplayName(),
196                                       test_case.getName(),
197                                       test_case.isRunnable))
198                         time.sleep(1)
199                         if test_case.isRunnable:
200                             dbName = test_case.getDbName()
201                             name = test_case.getName()
202                             displayName = test_case.getDisplayName()
203                             project = test_case.getProject()
204                             logger.info(" Searching results for case %s " %
205                                         (displayName))
206                             result = rp_utils.getResult(dbName, installer,
207                                                         s, version)
208                             # at least 1 result for the test
209                             if result > -1:
210                                 test_case.setCriteria(result)
211                                 test_case.setIsRunnable(True)
212                                 testCases2BeDisplayed.append(tc.TestCase(
213                                     name,
214                                     project,
215                                     "",
216                                     result,
217                                     True,
218                                     4))
219                             else:
220                                 logger.debug("No results found")
221
222                         items[s] = testCases2BeDisplayed
223                 except:
224                     logger.error("Error: installer %s, version %s, scenario %s"
225                                  % (installer, version, s))
226                     logger.error("No data available: %s" % (sys.exc_info()[0]))
227
228                 # **********************************************
229                 # Evaluate the results for scenario validation
230                 # **********************************************
231                 # the validation criteria = nb runnable tests x 3
232                 # because each test case = 0,1,2 or 3
233                 scenario_criteria = nb_test_runnable_for_this_scenario * 3
234                 # if 0 runnable tests set criteria at a high value
235                 if scenario_criteria < 1:
236                     scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
237
238                 s_score = str(scenario_score) + "/" + str(scenario_criteria)
239                 s_score_percent = rp_utils.getScenarioPercent(
240                     scenario_score,
241                     scenario_criteria)
242
243                 s_status = "KO"
244                 if scenario_score < scenario_criteria:
245                     logger.info(">>>> scenario not OK, score = %s/%s" %
246                                 (scenario_score, scenario_criteria))
247                     s_status = "KO"
248                 else:
249                     logger.info(">>>>> scenario OK, save the information")
250                     s_status = "OK"
251                     path_validation_file = ("./display/" + version +
252                                             "/functest/" +
253                                             "validated_scenario_history.txt")
254                     with open(path_validation_file, "a") as f:
255                         time_format = "%Y-%m-%d %H:%M"
256                         info = (datetime.datetime.now().strftime(time_format) +
257                                 ";" + installer_display + ";" + s + "\n")
258                         f.write(info)
259
260                 # Save daily results in a file
261                 with open(scenario_file_name, "a") as f:
262                     info = (reportingDate + "," + s + "," + installer_display +
263                             "," + s_score + "," +
264                             str(round(s_score_percent)) + "\n")
265                     f.write(info)
266
267                 scenario_result_criteria[s] = sr.ScenarioResult(
268                     s_status,
269                     s_score,
270                     s_score_percent,
271                     s_url)
272                 logger.info("--------------------------")
273
274             templateLoader = jinja2.FileSystemLoader(".")
275             templateEnv = jinja2.Environment(
276                 loader=templateLoader, autoescape=True)
277
278             TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
279             template = templateEnv.get_template(TEMPLATE_FILE)
280
281             outputText = template.render(
282                             scenario_stats=scenario_stats,
283                             scenario_results=scenario_result_criteria,
284                             items=items,
285                             installer=installer_display,
286                             period=period,
287                             version=version,
288                             date=reportingDate)
289
290             with open("./display/" + version +
291                       "/functest/status-" +
292                       installer_display + ".html", "wb") as fh:
293                 fh.write(outputText)
294
295             logger.info("Manage export CSV & PDF")
296             rp_utils.export_csv(scenario_file_name, installer_display, version)
297             logger.error("CSV generated...")
298
299             # Generate outputs for export
300             # pdf
301             # TODO Change once web site updated...use the current one
302             # to test pdf production
303             url_pdf = rp_utils.get_config('general.url')
304             pdf_path = ("./display/" + version +
305                         "/functest/status-" + installer_display + ".html")
306             pdf_doc_name = ("./display/" + version +
307                             "/functest/status-" + installer_display + ".pdf")
308             rp_utils.export_pdf(pdf_path, pdf_doc_name)
309             logger.info("PDF generated...")