Merge "Correct SFC testcase match in the reporting Dashboard"
[releng.git] / utils / test / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import jinja2
11 import os
12 import requests
13 import sys
14 import time
15 import yaml
16
17 import testCase as tc
18 import scenarioResult as sr
19
20 # manage conf
21 import utils.reporting_utils as rp_utils
22
23 # Logger
24 logger = rp_utils.getLogger("Functest-Status")
25
26 # Initialization
27 testValid = []
28 otherTestCases = []
29 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
30
31 # init just tempest to get the list of scenarios
32 # as all the scenarios run Tempest
33 tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
34
35 # Retrieve the Functest configuration to detect which tests are relevant
36 # according to the installer, scenario
37 cf = rp_utils.get_config('functest.test_conf')
38 period = rp_utils.get_config('general.period')
39 versions = rp_utils.get_config('general.versions')
40 installers = rp_utils.get_config('general.installers')
41 blacklist = rp_utils.get_config('functest.blacklist')
42 log_level = rp_utils.get_config('general.log.log_level')
43 response = requests.get(cf)
44
45 functest_yaml_config = yaml.safe_load(response.text)
46
47 logger.info("*******************************************")
48 logger.info("*                                         *")
49 logger.info("*   Generating reporting scenario status  *")
50 logger.info("*   Data retention: %s days               *" % period)
51 logger.info("*   Log level: %s                       *" % log_level)
52 logger.info("*                                         *")
53 logger.info("*******************************************")
54
55 # Retrieve test cases of Tier 1 (smoke)
56 config_tiers = functest_yaml_config.get("tiers")
57
58 # we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
59 # to validate scenarios
60 # Tier > 4 are not used to validate scenarios but we display the results anyway
61 # tricky thing for the API as some tests are Functest tests
62 # other tests are declared directly in the feature projects
63 for tier in config_tiers:
64     if tier['order'] > 0 and tier['order'] < 3:
65         for case in tier['testcases']:
66             if case['name'] not in blacklist:
67                 testValid.append(tc.TestCase(case['name'],
68                                              "functest",
69                                              case['dependencies']))
70     elif tier['order'] == 3:
71         for case in tier['testcases']:
72             if case['name'] not in blacklist:
73                 testValid.append(tc.TestCase(case['name'],
74                                              case['name'],
75                                              case['dependencies']))
76     elif tier['order'] > 3:
77         for case in tier['testcases']:
78             if case['name'] not in blacklist:
79                 otherTestCases.append(tc.TestCase(case['name'],
80                                                   "functest",
81                                                   case['dependencies']))
82
83 logger.debug("Functest reporting start")
84 # For all the versions
85 for version in versions:
86     # For all the installers
87     for installer in installers:
88         # get scenarios
89         scenario_results = rp_utils.getScenarios(tempest, installer, version)
90         scenario_stats = rp_utils.getScenarioStats(scenario_results)
91         items = {}
92         scenario_result_criteria = {}
93
94         scenario_file_name = ("./display/" + version +
95                               "/functest/scenario_history.txt")
96         # initiate scenario file if it does not exist
97         if not os.path.isfile(scenario_file_name):
98             with open(scenario_file_name, "a") as my_file:
99                 logger.debug("Create scenario file: %s" % scenario_file_name)
100                 my_file.write("date,scenario,installer,detail,score\n")
101
102         # For all the scenarios get results
103         for s, s_result in scenario_results.items():
104             logger.info("---------------------------------")
105             logger.info("installer %s, version %s, scenario %s:" %
106                         (installer, version, s))
107             logger.debug("Scenario results: %s" % s_result)
108
109             # Green or Red light for a given scenario
110             nb_test_runnable_for_this_scenario = 0
111             scenario_score = 0
112             # url of the last jenkins log corresponding to a given
113             # scenario
114             s_url = ""
115             if len(s_result) > 0:
116                 build_tag = s_result[len(s_result)-1]['build_tag']
117                 logger.debug("Build tag: %s" % build_tag)
118                 s_url = s_url = rp_utils.getJenkinsUrl(build_tag)
119                 logger.info("last jenkins url: %s" % s_url)
120             testCases2BeDisplayed = []
121             # Check if test case is runnable / installer, scenario
122             # for the test case used for Scenario validation
123             try:
124                 # 1) Manage the test cases for the scenario validation
125                 # concretely Tiers 0-3
126                 for test_case in testValid:
127                     test_case.checkRunnable(installer, s,
128                                             test_case.getConstraints())
129                     logger.debug("testcase %s (%s) is %s" %
130                                  (test_case.getDisplayName(),
131                                   test_case.getName(),
132                                   test_case.isRunnable))
133                     time.sleep(1)
134                     if test_case.isRunnable:
135                         dbName = test_case.getDbName()
136                         name = test_case.getName()
137                         displayName = test_case.getDisplayName()
138                         project = test_case.getProject()
139                         nb_test_runnable_for_this_scenario += 1
140                         logger.info(" Searching results for case %s " %
141                                     (displayName))
142                         result = rp_utils.getResult(dbName, installer,
143                                                     s, version)
144                         # if no result set the value to 0
145                         if result < 0:
146                             result = 0
147                         logger.info(" >>>> Test score = " + str(result))
148                         test_case.setCriteria(result)
149                         test_case.setIsRunnable(True)
150                         testCases2BeDisplayed.append(tc.TestCase(name,
151                                                                  project,
152                                                                  "",
153                                                                  result,
154                                                                  True,
155                                                                  1))
156                         scenario_score = scenario_score + result
157
158                 # 2) Manage the test cases for the scenario qualification
159                 # concretely Tiers > 3
160                 for test_case in otherTestCases:
161                     test_case.checkRunnable(installer, s,
162                                             test_case.getConstraints())
163                     logger.debug("testcase %s (%s) is %s" %
164                                  (test_case.getDisplayName(),
165                                   test_case.getName(),
166                                   test_case.isRunnable))
167                     time.sleep(1)
168                     if test_case.isRunnable:
169                         dbName = test_case.getDbName()
170                         name = test_case.getName()
171                         displayName = test_case.getDisplayName()
172                         project = test_case.getProject()
173                         logger.info(" Searching results for case %s " %
174                                     (displayName))
175                         result = rp_utils.getResult(dbName, installer,
176                                                     s, version)
177                         # at least 1 result for the test
178                         if result > -1:
179                             test_case.setCriteria(result)
180                             test_case.setIsRunnable(True)
181                             testCases2BeDisplayed.append(tc.TestCase(name,
182                                                                      project,
183                                                                      "",
184                                                                      result,
185                                                                      True,
186                                                                      4))
187                         else:
188                             logger.debug("No results found")
189
190                     items[s] = testCases2BeDisplayed
191             except:
192                 logger.error("Error: installer %s, version %s, scenario %s" %
193                              (installer, version, s))
194                 logger.error("No data available: %s " % (sys.exc_info()[0]))
195
196             # **********************************************
197             # Evaluate the results for scenario validation
198             # **********************************************
199             # the validation criteria = nb runnable tests x 3
200             # because each test case = 0,1,2 or 3
201             scenario_criteria = nb_test_runnable_for_this_scenario * 3
202             # if 0 runnable tests set criteria at a high value
203             if scenario_criteria < 1:
204                 scenario_criteria = 50  # conf.MAX_SCENARIO_CRITERIA
205
206             s_score = str(scenario_score) + "/" + str(scenario_criteria)
207             s_score_percent = rp_utils.getScenarioPercent(scenario_score,
208                                                           scenario_criteria)
209
210             s_status = "KO"
211             if scenario_score < scenario_criteria:
212                 logger.info(">>>> scenario not OK, score = %s/%s" %
213                             (scenario_score, scenario_criteria))
214                 s_status = "KO"
215             else:
216                 logger.info(">>>>> scenario OK, save the information")
217                 s_status = "OK"
218                 path_validation_file = ("./display/" + version +
219                                         "/functest/" +
220                                         "validated_scenario_history.txt")
221                 with open(path_validation_file, "a") as f:
222                     time_format = "%Y-%m-%d %H:%M"
223                     info = (datetime.datetime.now().strftime(time_format) +
224                             ";" + installer + ";" + s + "\n")
225                     f.write(info)
226
227             # Save daily results in a file
228             with open(scenario_file_name, "a") as f:
229                 info = (reportingDate + "," + s + "," + installer +
230                         "," + s_score + "," +
231                         str(round(s_score_percent)) + "\n")
232                 f.write(info)
233
234             scenario_result_criteria[s] = sr.ScenarioResult(s_status,
235                                                             s_score,
236                                                             s_score_percent,
237                                                             s_url)
238             logger.info("--------------------------")
239
240         templateLoader = jinja2.FileSystemLoader(".")
241         templateEnv = jinja2.Environment(
242             loader=templateLoader, autoescape=True)
243
244         TEMPLATE_FILE = "./functest/template/index-status-tmpl.html"
245         template = templateEnv.get_template(TEMPLATE_FILE)
246
247         outputText = template.render(scenario_stats=scenario_stats,
248                                      scenario_results=scenario_result_criteria,
249                                      items=items,
250                                      installer=installer,
251                                      period=period,
252                                      version=version,
253                                      date=reportingDate)
254
255         with open("./display/" + version +
256                   "/functest/status-" + installer + ".html", "wb") as fh:
257             fh.write(outputText)
258
259         logger.info("Manage export CSV & PDF")
260         rp_utils.export_csv(scenario_file_name, installer, version)
261         logger.error("CSV generated...")
262
263         # Generate outputs for export
264         # pdf
265         # TODO Change once web site updated...use the current one
266         # to test pdf production
267         url_pdf = rp_utils.get_config('general.url')
268         pdf_path = ("./display/" + version +
269                     "/functest/status-" + installer + ".html")
270         pdf_doc_name = ("./display/" + version +
271                         "/functest/status-" + installer + ".pdf")
272         rp_utils.export_pdf(pdf_path, pdf_doc_name)
273         logger.info("PDF generated...")