Merge "reslove issue about can not trigger verify jobs"
[releng.git] / utils / test / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import jinja2
11 import pdfkit
12 import requests
13 import sys
14 import time
15 import yaml
16
17 import reportingUtils as utils
18 import reportingConf as conf
19 import testCase as tc
20 import scenarioResult as sr
21
22 # Logger
23 logger = utils.getLogger("Status")
24
25 # Initialization
26 testValid = []
27 otherTestCases = []
28 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
29
30 # init just tempest to get the list of scenarios
31 # as all the scenarios run Tempest
32 tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
33
34 # Retrieve the Functest configuration to detect which tests are relevant
35 # according to the installer, scenario
36 cf = conf.TEST_CONF
37 response = requests.get(cf)
38
39 functest_yaml_config = yaml.safe_load(response.text)
40
41 logger.info("*******************************************")
42 logger.info("*   Generating reporting scenario status  *")
43 logger.info("*   Data retention = %s days              *" % conf.PERIOD)
44 logger.info("*                                         *")
45 logger.info("*******************************************")
46
47 # Retrieve test cases of Tier 1 (smoke)
48 config_tiers = functest_yaml_config.get("tiers")
49
50 # we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
51 # to validate scenarios
52 # Tier > 4 are not used to validate scenarios but we display the results anyway
53 # tricky thing for the API as some tests are Functest tests
54 # other tests are declared directly in the feature projects
55 for tier in config_tiers:
56     if tier['order'] > 0 and tier['order'] < 3:
57         for case in tier['testcases']:
58             if case['name'] not in conf.blacklist:
59                 testValid.append(tc.TestCase(case['name'],
60                                              "functest",
61                                              case['dependencies']))
62     elif tier['order'] == 3:
63         for case in tier['testcases']:
64             if case['name'] not in conf.blacklist:
65                 testValid.append(tc.TestCase(case['name'],
66                                              case['name'],
67                                              case['dependencies']))
68     elif tier['order'] > 3:
69         for case in tier['testcases']:
70             if case['name'] not in conf.blacklist:
71                 otherTestCases.append(tc.TestCase(case['name'],
72                                                   "functest",
73                                                   case['dependencies']))
74
75 # For all the versions
76 for version in conf.versions:
77     # For all the installers
78     for installer in conf.installers:
79         # get scenarios
80         scenario_results = utils.getScenarios(tempest, installer, version)
81         scenario_stats = utils.getScenarioStats(scenario_results)
82         items = {}
83         scenario_result_criteria = {}
84
85         scenario_file_name = (conf.REPORTING_PATH +
86                               "/functest/release/" + version +
87                               "/scenario_history.txt")
88
89         # For all the scenarios get results
90         for s, s_result in scenario_results.items():
91             logger.info("---------------------------------")
92             logger.info("installer %s, version %s, scenario %s:" %
93                         (installer, version, s))
94             logger.debug("Scenario results: %s" % s_result)
95
96             # Green or Red light for a given scenario
97             nb_test_runnable_for_this_scenario = 0
98             scenario_score = 0
99             # url of the last jenkins log corresponding to a given
100             # scenario
101             s_url = ""
102             if len(s_result) > 0:
103                 build_tag = s_result[len(s_result)-1]['build_tag']
104                 logger.debug("Build tag: %s" % build_tag)
105                 s_url = s_url = utils.getJenkinsUrl(build_tag)
106                 logger.info("last jenkins url: %s" % s_url)
107             testCases2BeDisplayed = []
108             # Check if test case is runnable / installer, scenario
109             # for the test case used for Scenario validation
110             try:
111                 # 1) Manage the test cases for the scenario validation
112                 # concretely Tiers 0-3
113                 for test_case in testValid:
114                     test_case.checkRunnable(installer, s,
115                                             test_case.getConstraints())
116                     logger.debug("testcase %s (%s) is %s" %
117                                  (test_case.getDisplayName(),
118                                   test_case.getName(),
119                                   test_case.isRunnable))
120                     time.sleep(1)
121                     if test_case.isRunnable:
122                         dbName = test_case.getDbName()
123                         name = test_case.getName()
124                         displayName = test_case.getDisplayName()
125                         project = test_case.getProject()
126                         nb_test_runnable_for_this_scenario += 1
127                         logger.info(" Searching results for case %s " %
128                                     (displayName))
129                         result = utils.getResult(dbName, installer, s, version)
130                         # if no result set the value to 0
131                         if result < 0:
132                             result = 0
133                         logger.info(" >>>> Test score = " + str(result))
134                         test_case.setCriteria(result)
135                         test_case.setIsRunnable(True)
136                         testCases2BeDisplayed.append(tc.TestCase(name,
137                                                                  project,
138                                                                  "",
139                                                                  result,
140                                                                  True,
141                                                                  1))
142                         scenario_score = scenario_score + result
143
144                 # 2) Manage the test cases for the scenario qualification
145                 # concretely Tiers > 3
146                 for test_case in otherTestCases:
147                     test_case.checkRunnable(installer, s,
148                                             test_case.getConstraints())
149                     logger.debug("testcase %s (%s) is %s" %
150                                  (test_case.getDisplayName(),
151                                   test_case.getName(),
152                                   test_case.isRunnable))
153                     time.sleep(1)
154                     if test_case.isRunnable:
155                         dbName = test_case.getDbName()
156                         name = test_case.getName()
157                         displayName = test_case.getDisplayName()
158                         project = test_case.getProject()
159                         logger.info(" Searching results for case %s " %
160                                     (displayName))
161                         result = utils.getResult(dbName, installer, s, version)
162                         # at least 1 result for the test
163                         if result > -1:
164                             test_case.setCriteria(result)
165                             test_case.setIsRunnable(True)
166                             testCases2BeDisplayed.append(tc.TestCase(name,
167                                                                      project,
168                                                                      "",
169                                                                      result,
170                                                                      True,
171                                                                      4))
172                         else:
173                             logger.debug("No results found")
174
175                     items[s] = testCases2BeDisplayed
176             except:
177                 logger.error("Error: installer %s, version %s, scenario %s" %
178                              (installer, version, s))
179                 logger.error("No data available: %s " % (sys.exc_info()[0]))
180
181             # **********************************************
182             # Evaluate the results for scenario validation
183             # **********************************************
184             # the validation criteria = nb runnable tests x 3
185             # because each test case = 0,1,2 or 3
186             scenario_criteria = nb_test_runnable_for_this_scenario * 3
187             # if 0 runnable tests set criteria at a high value
188             if scenario_criteria < 1:
189                 scenario_criteria = conf.MAX_SCENARIO_CRITERIA
190
191             s_score = str(scenario_score) + "/" + str(scenario_criteria)
192             s_score_percent = utils.getScenarioPercent(scenario_score,
193                                                        scenario_criteria)
194
195             s_status = "KO"
196             if scenario_score < scenario_criteria:
197                 logger.info(">>>> scenario not OK, score = %s/%s" %
198                             (scenario_score, scenario_criteria))
199                 s_status = "KO"
200             else:
201                 logger.info(">>>>> scenario OK, save the information")
202                 s_status = "OK"
203                 path_validation_file = (conf.REPORTING_PATH +
204                                         "/functest/release/" + version +
205                                         "/validated_scenario_history.txt")
206                 with open(path_validation_file, "a") as f:
207                     time_format = "%Y-%m-%d %H:%M"
208                     info = (datetime.datetime.now().strftime(time_format) +
209                             ";" + installer + ";" + s + "\n")
210                     f.write(info)
211
212             # Save daily results in a file
213             with open(scenario_file_name, "a") as f:
214                 info = (reportingDate + "," + s + "," + installer +
215                         "," + s_score + "," +
216                         str(round(s_score_percent)) + "\n")
217                 f.write(info)
218
219             scenario_result_criteria[s] = sr.ScenarioResult(s_status,
220                                                             s_score,
221                                                             s_score_percent,
222                                                             s_url)
223             logger.info("--------------------------")
224
225         templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
226         templateEnv = jinja2.Environment(
227             loader=templateLoader, autoescape=True)
228
229         TEMPLATE_FILE = "/functest/template/index-status-tmpl.html"
230         template = templateEnv.get_template(TEMPLATE_FILE)
231
232         outputText = template.render(scenario_stats=scenario_stats,
233                                      scenario_results=scenario_result_criteria,
234                                      items=items,
235                                      installer=installer,
236                                      period=conf.PERIOD,
237                                      version=version,
238                                      date=reportingDate)
239
240         # csv
241         # generate sub files based on scenario_history.txt
242         scenario_installer_file_name = (conf.REPORTING_PATH +
243                                         "/functest/release/" + version +
244                                         "/scenario_history_" + installer +
245                                         ".txt")
246         scenario_installer_file = open(scenario_installer_file_name, "a")
247         logger.info("Generate CSV...")
248         with open(scenario_file_name, "r") as f:
249             for line in f:
250                 if installer in line:
251                     logger.debug("Add new line... %s" % line)
252                     scenario_installer_file.write(line)
253         scenario_installer_file.close
254
255         with open(conf.REPORTING_PATH + "/functest/release/" + version +
256                   "/index-status-" + installer + ".html", "wb") as fh:
257             fh.write(outputText)
258         logger.info("CSV generated...")
259
260         # Generate outputs for export
261         # pdf
262         logger.info("Generate PDF...")
263         try:
264             pdf_path = ("http://testresults.opnfv.org/reporting/" +
265                         "functest/release/" + version +
266                         "/index-status-" + installer + ".html")
267             pdf_doc_name = (conf.REPORTING_PATH +
268                             "/functest/release/" + version +
269                             "/status-" + installer + ".pdf")
270             pdfkit.from_url(pdf_path, pdf_doc_name)
271             logger.info("PDF generated...")
272         except IOError:
273             logger.info("pdf generated anyway...")
274         except:
275             logger.error("impossible to generate PDF")