02bf67d0e96996da13af09678547edeee068c0b8
[releng.git] / utils / test / reporting / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import os
11 import sys
12 import time
13
14 import jinja2
15
16 import testCase as tc
17 import scenarioResult as sr
18 import reporting.utils.reporting_utils as rp_utils
19
20 """
21 Functest reporting status
22 """
23
24 # Logger
25 logger = rp_utils.getLogger("Functest-Status")
26
27 # Initialization
28 testValid = []
29 otherTestCases = []
30 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
31
32 # init just connection_check to get the list of scenarios
33 # as all the scenarios run connection_check
34 healthcheck = tc.TestCase("connection_check", "functest", -1)
35
36 # Retrieve the Functest configuration to detect which tests are relevant
37 # according to the installer, scenario
38 cf = rp_utils.get_config('functest.test_conf')
39 period = rp_utils.get_config('general.period')
40 versions = rp_utils.get_config('general.versions')
41 installers = rp_utils.get_config('general.installers')
42 blacklist = rp_utils.get_config('functest.blacklist')
43 log_level = rp_utils.get_config('general.log.log_level')
44 exclude_noha = rp_utils.get_config('functest.exclude_noha')
45 exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
46
47 functest_yaml_config = rp_utils.getFunctestConfig()
48
49 logger.info("*******************************************")
50 logger.info("*                                         *")
51 logger.info("*   Generating reporting scenario status  *")
52 logger.info("*   Data retention: %s days               *" % period)
53 logger.info("*   Log level: %s                         *" % log_level)
54 logger.info("*                                         *")
55 logger.info("*   Virtual PODs exluded: %s              *" % exclude_virtual)
56 logger.info("*   NOHA scenarios excluded: %s           *" % exclude_noha)
57 logger.info("*                                         *")
58 logger.info("*******************************************")
59
60 # Retrieve test cases of Tier 1 (smoke)
61 config_tiers = functest_yaml_config.get("tiers")
62
63 # we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
64 # to validate scenarios
65 # Tier > 2 are not used to validate scenarios but we display the results anyway
66 # tricky thing for the API as some tests are Functest tests
67 # other tests are declared directly in the feature projects
68 for tier in config_tiers:
69     if tier['order'] >= 0 and tier['order'] < 2:
70         for case in tier['testcases']:
71             if case['case_name'] not in blacklist:
72                 testValid.append(tc.TestCase(case['case_name'],
73                                              "functest",
74                                              case['dependencies']))
75     elif tier['order'] == 2:
76         for case in tier['testcases']:
77             if case['case_name'] not in blacklist:
78                 testValid.append(tc.TestCase(case['case_name'],
79                                              case['case_name'],
80                                              case['dependencies']))
81     elif tier['order'] > 2:
82         for case in tier['testcases']:
83             if case['case_name'] not in blacklist:
84                 otherTestCases.append(tc.TestCase(case['case_name'],
85                                                   "functest",
86                                                   case['dependencies']))
87
88 logger.debug("Functest reporting start")
89
90 # For all the versions
91 for version in versions:
92     # For all the installers
93     scenario_directory = "./display/" + version + "/functest/"
94     scenario_file_name = scenario_directory + "scenario_history.txt"
95
96     # check that the directory exists, if not create it
97     # (first run on new version)
98     if not os.path.exists(scenario_directory):
99         os.makedirs(scenario_directory)
100
101     # initiate scenario file if it does not exist
102     if not os.path.isfile(scenario_file_name):
103         with open(scenario_file_name, "a") as my_file:
104             logger.debug("Create scenario file: %s" % scenario_file_name)
105             my_file.write("date,scenario,installer,detail,score\n")
106
107     for installer in installers:
108
109         # get scenarios
110         scenario_results = rp_utils.getScenarios("functest",
111                                                  "connection_check",
112                                                  installer,
113                                                  version)
114         # get nb of supported architecture (x86, aarch64)
115         architectures = rp_utils.getArchitectures(scenario_results)
116         logger.info("Supported architectures: {}".format(architectures))
117
118         for architecture in architectures:
119             logger.info("architecture: {}".format(architecture))
120             # Consider only the results for the selected architecture
121             # i.e drop x86 for aarch64 and vice versa
122             filter_results = rp_utils.filterArchitecture(scenario_results,
123                                                          architecture)
124             scenario_stats = rp_utils.getScenarioStats(filter_results)
125             items = {}
126             scenario_result_criteria = {}
127
128             # in case of more than 1 architecture supported
129             # precise the architecture
130             installer_display = installer
131             if "fuel" in installer:
132                 installer_display = installer + "@" + architecture
133
134             # For all the scenarios get results
135             for s, s_result in filter_results.items():
136                 logger.info("---------------------------------")
137                 logger.info("installer %s, version %s, scenario %s:" %
138                             (installer, version, s))
139                 logger.debug("Scenario results: %s" % s_result)
140
141                 # Green or Red light for a given scenario
142                 nb_test_runnable_for_this_scenario = 0
143                 scenario_score = 0
144                 # url of the last jenkins log corresponding to a given
145                 # scenario
146                 s_url = ""
147                 if len(s_result) > 0:
148                     build_tag = s_result[len(s_result)-1]['build_tag']
149                     logger.debug("Build tag: %s" % build_tag)
150                     s_url = rp_utils.getJenkinsUrl(build_tag)
151                     if s_url is None:
152                         s_url = "http://testresultS.opnfv.org/reporting"
153                     logger.info("last jenkins url: %s" % s_url)
154                 testCases2BeDisplayed = []
155                 # Check if test case is runnable / installer, scenario
156                 # for the test case used for Scenario validation
157                 try:
158                     # 1) Manage the test cases for the scenario validation
159                     # concretely Tiers 0-3
160                     for test_case in testValid:
161                         test_case.checkRunnable(installer, s,
162                                                 test_case.getConstraints())
163                         logger.debug("testcase %s (%s) is %s" %
164                                      (test_case.getDisplayName(),
165                                       test_case.getName(),
166                                       test_case.isRunnable))
167                         time.sleep(1)
168                         if test_case.isRunnable:
169                             name = test_case.getName()
170                             displayName = test_case.getDisplayName()
171                             project = test_case.getProject()
172                             nb_test_runnable_for_this_scenario += 1
173                             logger.info(" Searching results for case %s " %
174                                         (displayName))
175                             result = rp_utils.getResult(name, installer,
176                                                         s, version)
177                             # if no result set the value to 0
178                             if result < 0:
179                                 result = 0
180                             logger.info(" >>>> Test score = " + str(result))
181                             test_case.setCriteria(result)
182                             test_case.setIsRunnable(True)
183                             testCases2BeDisplayed.append(tc.TestCase(name,
184                                                                      project,
185                                                                      "",
186                                                                      result,
187                                                                      True,
188                                                                      1))
189                             scenario_score = scenario_score + result
190
191                     # 2) Manage the test cases for the scenario qualification
192                     # concretely Tiers > 3
193                     for test_case in otherTestCases:
194                         test_case.checkRunnable(installer, s,
195                                                 test_case.getConstraints())
196                         logger.debug("testcase %s (%s) is %s" %
197                                      (test_case.getDisplayName(),
198                                       test_case.getName(),
199                                       test_case.isRunnable))
200                         time.sleep(1)
201                         if test_case.isRunnable:
202                             name = test_case.getName()
203                             displayName = test_case.getDisplayName()
204                             project = test_case.getProject()
205                             logger.info(" Searching results for case %s " %
206                                         (displayName))
207                             result = rp_utils.getResult(name, installer,
208                                                         s, version)
209                             # at least 1 result for the test
210                             if result > -1:
211                                 test_case.setCriteria(result)
212                                 test_case.setIsRunnable(True)
213                                 testCases2BeDisplayed.append(tc.TestCase(
214                                     name,
215                                     project,
216                                     "",
217                                     result,
218                                     True,
219                                     4))
220                             else:
221                                 logger.debug("No results found")
222
223                         items[s] = testCases2BeDisplayed
224                 except Exception:
225                     logger.error("Error: installer %s, version %s, scenario %s"
226                                  % (installer, version, s))
227                     logger.error("No data available: %s" % (sys.exc_info()[0]))
228
229                 # **********************************************
230                 # Evaluate the results for scenario validation
231                 # **********************************************
232                 # the validation criteria = nb runnable tests x 3
233                 # because each test case = 0,1,2 or 3
234                 scenario_criteria = nb_test_runnable_for_this_scenario * 3
235                 # if 0 runnable tests set criteria at a high value
236                 if scenario_criteria < 1:
237                     scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
238
239                 s_score = str(scenario_score) + "/" + str(scenario_criteria)
240                 s_score_percent = rp_utils.getScenarioPercent(
241                     scenario_score,
242                     scenario_criteria)
243
244                 s_status = "KO"
245                 if scenario_score < scenario_criteria:
246                     logger.info(">>>> scenario not OK, score = %s/%s" %
247                                 (scenario_score, scenario_criteria))
248                     s_status = "KO"
249                 else:
250                     logger.info(">>>>> scenario OK, save the information")
251                     s_status = "OK"
252                     path_validation_file = ("./display/" + version +
253                                             "/functest/" +
254                                             "validated_scenario_history.txt")
255                     with open(path_validation_file, "a") as f:
256                         time_format = "%Y-%m-%d %H:%M"
257                         info = (datetime.datetime.now().strftime(time_format) +
258                                 ";" + installer_display + ";" + s + "\n")
259                         f.write(info)
260
261                 # Save daily results in a file
262                 with open(scenario_file_name, "a") as f:
263                     info = (reportingDate + "," + s + "," + installer_display +
264                             "," + s_score + "," +
265                             str(round(s_score_percent)) + "\n")
266                     f.write(info)
267
268                 scenario_result_criteria[s] = sr.ScenarioResult(
269                     s_status,
270                     s_score,
271                     s_score_percent,
272                     s_url)
273                 logger.info("--------------------------")
274
275             templateLoader = jinja2.FileSystemLoader(".")
276             templateEnv = jinja2.Environment(
277                 loader=templateLoader, autoescape=True)
278
279             TEMPLATE_FILE = ("./reporting/functest/template"
280                              "/index-status-tmpl.html")
281             template = templateEnv.get_template(TEMPLATE_FILE)
282
283             outputText = template.render(
284                 scenario_stats=scenario_stats,
285                 scenario_results=scenario_result_criteria,
286                 items=items,
287                 installer=installer_display,
288                 period=period,
289                 version=version,
290                 date=reportingDate)
291
292             with open("./display/" + version +
293                       "/functest/status-" +
294                       installer_display + ".html", "wb") as fh:
295                 fh.write(outputText)
296
297             logger.info("Manage export CSV & PDF")
298             rp_utils.export_csv(scenario_file_name, installer_display, version)
299             logger.error("CSV generated...")
300
301             # Generate outputs for export
302             # pdf
303             url_pdf = rp_utils.get_config('general.url')
304             pdf_path = ("./display/" + version +
305                         "/functest/status-" + installer_display + ".html")
306             pdf_doc_name = ("./display/" + version +
307                             "/functest/status-" + installer_display + ".pdf")
308             rp_utils.export_pdf(pdf_path, pdf_doc_name)
309             logger.info("PDF generated...")