Merge "Add CI job for traffic stress test in POSCA testsuite"
[releng.git] / utils / test / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import jinja2
11 import os
12 import requests
13 import sys
14 import time
15 import yaml
16
17 import testCase as tc
18 import scenarioResult as sr
19
20 # manage conf
21 import utils.reporting_utils as rp_utils
22
23 # Logger
24 logger = rp_utils.getLogger("Functest-Status")
25
26 # Initialization
27 testValid = []
28 otherTestCases = []
29 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
30
31 # init just tempest to get the list of scenarios
32 # as all the scenarios run Tempest
33 tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
34
35 # Retrieve the Functest configuration to detect which tests are relevant
36 # according to the installer, scenario
37 cf = rp_utils.get_config('functest.test_conf')
38 period = rp_utils.get_config('general.period')
39 versions = rp_utils.get_config('general.versions')
40 installers = rp_utils.get_config('general.installers')
41 blacklist = rp_utils.get_config('functest.blacklist')
42 log_level = rp_utils.get_config('general.log.log_level')
43 exclude_noha = rp_utils.get_config('functest.exclude_noha')
44 exclude_virtual = rp_utils.get_config('functest.exclude_virtual')
45
46 response = requests.get(cf)
47
48 functest_yaml_config = yaml.safe_load(response.text)
49
50 logger.info("*******************************************")
51 logger.info("*                                         *")
52 logger.info("*   Generating reporting scenario status  *")
53 logger.info("*   Data retention: %s days               *" % period)
54 logger.info("*   Log level: %s                         *" % log_level)
55 logger.info("*                                         *")
56 logger.info("*   Virtual PODs exluded: %s              *" % exclude_virtual)
57 logger.info("*   NOHA scenarios excluded: %s           *" % exclude_noha)
58 logger.info("*                                         *")
59 logger.info("*******************************************")
60
61 # Retrieve test cases of Tier 1 (smoke)
62 config_tiers = functest_yaml_config.get("tiers")
63
64 # we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features)
65 # to validate scenarios
66 # Tier > 2 are not used to validate scenarios but we display the results anyway
67 # tricky thing for the API as some tests are Functest tests
68 # other tests are declared directly in the feature projects
69 for tier in config_tiers:
70     if tier['order'] >= 0 and tier['order'] < 2:
71         for case in tier['testcases']:
72             if case['name'] not in blacklist:
73                 testValid.append(tc.TestCase(case['name'],
74                                              "functest",
75                                              case['dependencies']))
76     elif tier['order'] == 2:
77         for case in tier['testcases']:
78             if case['name'] not in blacklist:
79                 testValid.append(tc.TestCase(case['name'],
80                                              case['name'],
81                                              case['dependencies']))
82     elif tier['order'] > 2:
83         for case in tier['testcases']:
84             if case['name'] not in blacklist:
85                 otherTestCases.append(tc.TestCase(case['name'],
86                                                   "functest",
87                                                   case['dependencies']))
88
89 logger.debug("Functest reporting start")
90 # For all the versions
91 for version in versions:
92     # For all the installers
93     for installer in installers:
94         # get scenarios
95         scenario_results = rp_utils.getScenarios(tempest, installer, version)
96         scenario_stats = rp_utils.getScenarioStats(scenario_results)
97         items = {}
98         scenario_result_criteria = {}
99         scenario_file_name = ("./display/" + version +
100                               "/functest/scenario_history.txt")
101         # initiate scenario file if it does not exist
102         if not os.path.isfile(scenario_file_name):
103             with open(scenario_file_name, "a") as my_file:
104                 logger.debug("Create scenario file: %s" % scenario_file_name)
105                 my_file.write("date,scenario,installer,detail,score\n")
106
107         # For all the scenarios get results
108         for s, s_result in scenario_results.items():
109             logger.info("---------------------------------")
110             logger.info("installer %s, version %s, scenario %s:" %
111                         (installer, version, s))
112             logger.debug("Scenario results: %s" % s_result)
113
114             # Green or Red light for a given scenario
115             nb_test_runnable_for_this_scenario = 0
116             scenario_score = 0
117             # url of the last jenkins log corresponding to a given
118             # scenario
119             s_url = ""
120             if len(s_result) > 0:
121                 build_tag = s_result[len(s_result)-1]['build_tag']
122                 logger.debug("Build tag: %s" % build_tag)
123                 s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
124                 logger.info("last jenkins url: %s" % s_url)
125             testCases2BeDisplayed = []
126             # Check if test case is runnable / installer, scenario
127             # for the test case used for Scenario validation
128             try:
129                 # 1) Manage the test cases for the scenario validation
130                 # concretely Tiers 0-3
131                 for test_case in testValid:
132                     test_case.checkRunnable(installer, s,
133                                             test_case.getConstraints())
134                     logger.debug("testcase %s (%s) is %s" %
135                                  (test_case.getDisplayName(),
136                                   test_case.getName(),
137                                   test_case.isRunnable))
138                     time.sleep(1)
139                     if test_case.isRunnable:
140                         dbName = test_case.getDbName()
141                         name = test_case.getName()
142                         displayName = test_case.getDisplayName()
143                         project = test_case.getProject()
144                         nb_test_runnable_for_this_scenario += 1
145                         logger.info(" Searching results for case %s " %
146                                     (displayName))
147                         result = rp_utils.getResult(dbName, installer,
148                                                     s, version)
149                         # if no result set the value to 0
150                         if result < 0:
151                             result = 0
152                         logger.info(" >>>> Test score = " + str(result))
153                         test_case.setCriteria(result)
154                         test_case.setIsRunnable(True)
155                         testCases2BeDisplayed.append(tc.TestCase(name,
156                                                                  project,
157                                                                  "",
158                                                                  result,
159                                                                  True,
160                                                                  1))
161                         scenario_score = scenario_score + result
162
163                 # 2) Manage the test cases for the scenario qualification
164                 # concretely Tiers > 3
165                 for test_case in otherTestCases:
166                     test_case.checkRunnable(installer, s,
167                                             test_case.getConstraints())
168                     logger.debug("testcase %s (%s) is %s" %
169                                  (test_case.getDisplayName(),
170                                   test_case.getName(),
171                                   test_case.isRunnable))
172                     time.sleep(1)
173                     if test_case.isRunnable:
174                         dbName = test_case.getDbName()
175                         name = test_case.getName()
176                         displayName = test_case.getDisplayName()
177                         project = test_case.getProject()
178                         logger.info(" Searching results for case %s " %
179                                     (displayName))
180                         result = rp_utils.getResult(dbName, installer,
181                                                     s, version)
182                         # at least 1 result for the test
183                         if result > -1:
184                             test_case.setCriteria(result)
185                             test_case.setIsRunnable(True)
186                             testCases2BeDisplayed.append(tc.TestCase(name,
187                                                                      project,
188                                                                      "",
189                                                                      result,
190                                                                      True,
191                                                                      4))
192                         else:
193                             logger.debug("No results found")
194
195                     items[s] = testCases2BeDisplayed
196             except:
197                 logger.error("Error: installer %s, version %s, scenario %s" %
198                              (installer, version, s))
199                 logger.error("No data available: %s " % (sys.exc_info()[0]))
200
201             # **********************************************
202             # Evaluate the results for scenario validation
203             # **********************************************
204             # the validation criteria = nb runnable tests x 3
205             # because each test case = 0,1,2 or 3
206             scenario_criteria = nb_test_runnable_for_this_scenario * 3
207             # if 0 runnable tests set criteria at a high value
208             if scenario_criteria < 1:
209                 scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
210
211             s_score = str(scenario_score) + "/" + str(scenario_criteria)
212             s_score_percent = rp_utils.getScenarioPercent(scenario_score,
213                                                           scenario_criteria)
214
215             s_status = "KO"
216             if scenario_score < scenario_criteria:
217                 logger.info(">>>> scenario not OK, score = %s/%s" %
218                             (scenario_score, scenario_criteria))
219                 s_status = "KO"
220             else:
221                 logger.info(">>>>> scenario OK, save the information")
222                 s_status = "OK"
223                 path_validation_file = ("./display/" + version +
224                                         "/functest/" +
225                                         "validated_scenario_history.txt")
226                 with open(path_validation_file, "a") as f:
227                     time_format = "%Y-%m-%d %H:%M"
228                     info = (datetime.datetime.now().strftime(time_format) +
229                             ";" + installer + ";" + s + "\n")
230                     f.write(info)
231
232             # Save daily results in a file
233             with open(scenario_file_name, "a") as f:
234                 info = (reportingDate + "," + s + "," + installer +
235                         "," + s_score + "," +
236                         str(round(s_score_percent)) + "\n")
237                 f.write(info)
238
239             scenario_result_criteria[s] = sr.ScenarioResult(s_status,
240                                                             s_score,
241                                                             s_score_percent,
242                                                             s_url)
243             logger.info("--------------------------")
244
245         templateLoader = jinja2.FileSystemLoader(".")
246         templateEnv = jinja2.Environment(
247             loader=templateLoader, autoescape=True)
248
249         TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
250         template = templateEnv.get_template(TEMPLATE_FILE)
251
252         outputText = template.render(scenario_stats=scenario_stats,
253                                      scenario_results=scenario_result_criteria,
254                                      items=items,
255                                      installer=installer,
256                                      period=period,
257                                      version=version,
258                                      date=reportingDate)
259
260         with open("./display/" + version +
261                   "/functest/status-" + installer + ".html", "wb") as fh:
262             fh.write(outputText)
263
264         logger.info("Manage export CSV & PDF")
265         rp_utils.export_csv(scenario_file_name, installer, version)
266         logger.error("CSV generated...")
267
268         # Generate outputs for export
269         # pdf
270         # TODO Change once web site updated...use the current one
271         # to test pdf production
272         url_pdf = rp_utils.get_config('general.url')
273         pdf_path = ("./display/" + version +
274                     "/functest/status-" + installer + ".html")
275         pdf_doc_name = ("./display/" + version +
276                         "/functest/status-" + installer + ".pdf")
277         rp_utils.export_pdf(pdf_path, pdf_doc_name)
278         logger.info("PDF generated...")