Merge "add openrc fetching for daisy installer"
[releng.git] / utils / test / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import jinja2
11 import os
12 import requests
13 import sys
14 import time
15 import yaml
16
17 import testCase as tc
18 import scenarioResult as sr
19
20 # manage conf
21 import utils.reporting_utils as rp_utils
22
23 # Logger
24 logger = rp_utils.getLogger("Functest-Status")
25
26 # Initialization
27 testValid = []
28 otherTestCases = []
29 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
30
31 # init just connection_check to get the list of scenarios
32 # as all the scenarios run connection_check
33 healthcheck = tc.TestCase("connection_check", "functest", -1)
34
35 # Retrieve the Functest configuration to detect which tests are relevant
36 # according to the installer, scenario
37 cf = rp_utils.get_config('functest.test_conf')
38 period = rp_utils.get_config('general.period')
39 versions = rp_utils.get_config('general.versions')
40 installers = rp_utils.get_config('general.installers')
41 blacklist = rp_utils.get_config('functest.blacklist')
42 log_level = rp_utils.get_config('general.log.log_level')
43 exclude_noha = rp_utils.get_config('functest.exclude_noha')
44 exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
45
46 response = requests.get(cf)
47
48 functest_yaml_config = yaml.safe_load(response.text)
49
50 logger.info("*******************************************")
51 logger.info("*                                         *")
52 logger.info("*   Generating reporting scenario status  *")
53 logger.info("*   Data retention: %s days               *" % period)
54 logger.info("*   Log level: %s                         *" % log_level)
55 logger.info("*                                         *")
56 logger.info("*   Virtual PODs exluded: %s              *" % exclude_virtual)
57 logger.info("*   NOHA scenarios excluded: %s           *" % exclude_noha)
58 logger.info("*                                         *")
59 logger.info("*******************************************")
60
61 # Retrieve test cases of Tier 1 (smoke)
62 config_tiers = functest_yaml_config.get("tiers")
63
64 # we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
65 # to validate scenarios
66 # Tier > 2 are not used to validate scenarios but we display the results anyway
67 # tricky thing for the API as some tests are Functest tests
68 # other tests are declared directly in the feature projects
69 for tier in config_tiers:
70     if tier['order'] >= 0 and tier['order'] < 2:
71         for case in tier['testcases']:
72             if case['name'] not in blacklist:
73                 testValid.append(tc.TestCase(case['name'],
74                                              "functest",
75                                              case['dependencies']))
76     elif tier['order'] == 2:
77         for case in tier['testcases']:
78             if case['name'] not in blacklist:
79                 testValid.append(tc.TestCase(case['name'],
80                                              case['name'],
81                                              case['dependencies']))
82     elif tier['order'] > 2:
83         for case in tier['testcases']:
84             if case['name'] not in blacklist:
85                 otherTestCases.append(tc.TestCase(case['name'],
86                                                   "functest",
87                                                   case['dependencies']))
88
89 logger.debug("Functest reporting start")
90 # For all the versions
91 for version in versions:
92     # For all the installers
93     for installer in installers:
94         # get scenarios
95         scenario_results = rp_utils.getScenarios(healthcheck,
96                                                  installer,
97                                                  version)
98         scenario_stats = rp_utils.getScenarioStats(scenario_results)
99         items = {}
100         scenario_result_criteria = {}
101         scenario_file_name = ("./display/" + version +
102                               "/functest/scenario_history.txt")
103         # initiate scenario file if it does not exist
104         if not os.path.isfile(scenario_file_name):
105             with open(scenario_file_name, "a") as my_file:
106                 logger.debug("Create scenario file: %s" % scenario_file_name)
107                 my_file.write("date,scenario,installer,detail,score\n")
108
109         # For all the scenarios get results
110         for s, s_result in scenario_results.items():
111             logger.info("---------------------------------")
112             logger.info("installer %s, version %s, scenario %s:" %
113                         (installer, version, s))
114             logger.debug("Scenario results: %s" % s_result)
115
116             # Green or Red light for a given scenario
117             nb_test_runnable_for_this_scenario = 0
118             scenario_score = 0
119             # url of the last jenkins log corresponding to a given
120             # scenario
121             s_url = ""
122             if len(s_result) > 0:
123                 build_tag = s_result[len(s_result)-1]['build_tag']
124                 logger.debug("Build tag: %s" % build_tag)
125                 s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
126                 logger.info("last jenkins url: %s" % s_url)
127             testCases2BeDisplayed = []
128             # Check if test case is runnable / installer, scenario
129             # for the test case used for Scenario validation
130             try:
131                 # 1) Manage the test cases for the scenario validation
132                 # concretely Tiers 0-3
133                 for test_case in testValid:
134                     test_case.checkRunnable(installer, s,
135                                             test_case.getConstraints())
136                     logger.debug("testcase %s (%s) is %s" %
137                                  (test_case.getDisplayName(),
138                                   test_case.getName(),
139                                   test_case.isRunnable))
140                     time.sleep(1)
141                     if test_case.isRunnable:
142                         dbName = test_case.getDbName()
143                         name = test_case.getName()
144                         displayName = test_case.getDisplayName()
145                         project = test_case.getProject()
146                         nb_test_runnable_for_this_scenario += 1
147                         logger.info(" Searching results for case %s " %
148                                     (displayName))
149                         result = rp_utils.getResult(dbName, installer,
150                                                     s, version)
151                         # if no result set the value to 0
152                         if result < 0:
153                             result = 0
154                         logger.info(" >>>> Test score = " + str(result))
155                         test_case.setCriteria(result)
156                         test_case.setIsRunnable(True)
157                         testCases2BeDisplayed.append(tc.TestCase(name,
158                                                                  project,
159                                                                  "",
160                                                                  result,
161                                                                  True,
162                                                                  1))
163                         scenario_score = scenario_score + result
164
165                 # 2) Manage the test cases for the scenario qualification
166                 # concretely Tiers > 3
167                 for test_case in otherTestCases:
168                     test_case.checkRunnable(installer, s,
169                                             test_case.getConstraints())
170                     logger.debug("testcase %s (%s) is %s" %
171                                  (test_case.getDisplayName(),
172                                   test_case.getName(),
173                                   test_case.isRunnable))
174                     time.sleep(1)
175                     if test_case.isRunnable:
176                         dbName = test_case.getDbName()
177                         name = test_case.getName()
178                         displayName = test_case.getDisplayName()
179                         project = test_case.getProject()
180                         logger.info(" Searching results for case %s " %
181                                     (displayName))
182                         result = rp_utils.getResult(dbName, installer,
183                                                     s, version)
184                         # at least 1 result for the test
185                         if result > -1:
186                             test_case.setCriteria(result)
187                             test_case.setIsRunnable(True)
188                             testCases2BeDisplayed.append(tc.TestCase(name,
189                                                                      project,
190                                                                      "",
191                                                                      result,
192                                                                      True,
193                                                                      4))
194                         else:
195                             logger.debug("No results found")
196
197                     items[s] = testCases2BeDisplayed
198             except:
199                 logger.error("Error: installer %s, version %s, scenario %s" %
200                              (installer, version, s))
201                 logger.error("No data available: %s " % (sys.exc_info()[0]))
202
203             # **********************************************
204             # Evaluate the results for scenario validation
205             # **********************************************
206             # the validation criteria = nb runnable tests x 3
207             # because each test case = 0,1,2 or 3
208             scenario_criteria = nb_test_runnable_for_this_scenario * 3
209             # if 0 runnable tests set criteria at a high value
210             if scenario_criteria < 1:
211                 scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
212
213             s_score = str(scenario_score) + "/" + str(scenario_criteria)
214             s_score_percent = rp_utils.getScenarioPercent(scenario_score,
215                                                           scenario_criteria)
216
217             s_status = "KO"
218             if scenario_score < scenario_criteria:
219                 logger.info(">>>> scenario not OK, score = %s/%s" %
220                             (scenario_score, scenario_criteria))
221                 s_status = "KO"
222             else:
223                 logger.info(">>>>> scenario OK, save the information")
224                 s_status = "OK"
225                 path_validation_file = ("./display/" + version +
226                                         "/functest/" +
227                                         "validated_scenario_history.txt")
228                 with open(path_validation_file, "a") as f:
229                     time_format = "%Y-%m-%d %H:%M"
230                     info = (datetime.datetime.now().strftime(time_format) +
231                             ";" + installer + ";" + s + "\n")
232                     f.write(info)
233
234             # Save daily results in a file
235             with open(scenario_file_name, "a") as f:
236                 info = (reportingDate + "," + s + "," + installer +
237                         "," + s_score + "," +
238                         str(round(s_score_percent)) + "\n")
239                 f.write(info)
240
241             scenario_result_criteria[s] = sr.ScenarioResult(s_status,
242                                                             s_score,
243                                                             s_score_percent,
244                                                             s_url)
245             logger.info("--------------------------")
246
247         templateLoader = jinja2.FileSystemLoader(".")
248         templateEnv = jinja2.Environment(
249             loader=templateLoader, autoescape=True)
250
251         TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
252         template = templateEnv.get_template(TEMPLATE_FILE)
253
254         outputText = template.render(scenario_stats=scenario_stats,
255                                      scenario_results=scenario_result_criteria,
256                                      items=items,
257                                      installer=installer,
258                                      period=period,
259                                      version=version,
260                                      date=reportingDate)
261
262         with open("./display/" + version +
263                   "/functest/status-" + installer + ".html", "wb") as fh:
264             fh.write(outputText)
265
266         logger.info("Manage export CSV & PDF")
267         rp_utils.export_csv(scenario_file_name, installer, version)
268         logger.error("CSV generated...")
269
270         # Generate outputs for export
271         # pdf
272         # TODO Change once web site updated...use the current one
273         # to test pdf production
274         url_pdf = rp_utils.get_config('general.url')
275         pdf_path = ("./display/" + version +
276                     "/functest/status-" + installer + ".html")
277         pdf_doc_name = ("./display/" + version +
278                         "/functest/status-" + installer + ".pdf")
279         rp_utils.export_pdf(pdf_path, pdf_doc_name)
280         logger.info("PDF generated...")