Merge "Add export functions to reporting"
[releng.git] / utils / test / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import jinja2
11 import pdfkit
12 import requests
13 import sys
14 import time
15 import yaml
16
17 import reportingUtils as utils
18 import reportingConf as conf
19 import testCase as tc
20 import scenarioResult as sr
21
22 # Logger
23 logger = utils.getLogger("Status")
24
25 # Initialization
26 testValid = []
27 otherTestCases = []
28 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
29
30 # init just tempest to get the list of scenarios
31 # as all the scenarios run Tempest
32 tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
33
34 # Retrieve the Functest configuration to detect which tests are relevant
35 # according to the installer, scenario
36 cf = conf.TEST_CONF
37 response = requests.get(cf)
38
39 functest_yaml_config = yaml.safe_load(response.text)
40
41 logger.info("*******************************************")
42 logger.info("*   Generating reporting scenario status  *")
43 logger.info("*   Data retention = %s days              *" % conf.PERIOD)
44 logger.info("*                                         *")
45 logger.info("*******************************************")
46
47 # Retrieve test cases of Tier 1 (smoke)
48 config_tiers = functest_yaml_config.get("tiers")
49
50 # we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
51 # to validate scenarios
52 # Tier > 4 are not used to validate scenarios but we display the results anyway
53 # tricky thing for the API as some tests are Functest tests
54 # other tests are declared directly in the feature projects
55 for tier in config_tiers:
56     if tier['order'] > 0 and tier['order'] < 3:
57         for case in tier['testcases']:
58             if case['name'] not in conf.blacklist:
59                 testValid.append(tc.TestCase(case['name'],
60                                              "functest",
61                                              case['dependencies']))
62     elif tier['order'] == 3:
63         for case in tier['testcases']:
64             if case['name'] not in conf.blacklist:
65                 testValid.append(tc.TestCase(case['name'],
66                                              case['name'],
67                                              case['dependencies']))
68     elif tier['order'] > 3:
69         for case in tier['testcases']:
70             if case['name'] not in conf.blacklist:
71                 otherTestCases.append(tc.TestCase(case['name'],
72                                                   "functest",
73                                                   case['dependencies']))
74
75 # For all the versions
76 for version in conf.versions:
77     # For all the installers
78     for installer in conf.installers:
79         # get scenarios
80         scenario_results = utils.getScenarios(tempest, installer, version)
81         scenario_stats = utils.getScenarioStats(scenario_results)
82         items = {}
83         scenario_result_criteria = {}
84
85         # For all the scenarios get results
86         for s, s_result in scenario_results.items():
87             logger.info("---------------------------------")
88             logger.info("installer %s, version %s, scenario %s:" %
89                         (installer, version, s))
90             logger.debug("Scenario results: %s" % s_result)
91
92             # Green or Red light for a given scenario
93             nb_test_runnable_for_this_scenario = 0
94             scenario_score = 0
95             # url of the last jenkins log corresponding to a given
96             # scenario
97             s_url = ""
98             if len(s_result) > 0:
99                 build_tag = s_result[len(s_result)-1]['build_tag']
100                 logger.debug("Build tag: %s" % build_tag)
101                 s_url = s_url = utils.getJenkinsUrl(build_tag)
102                 logger.info("last jenkins url: %s" % s_url)
103             testCases2BeDisplayed = []
104             # Check if test case is runnable / installer, scenario
105             # for the test case used for Scenario validation
106             try:
107                 # 1) Manage the test cases for the scenario validation
108                 # concretely Tiers 0-3
109                 for test_case in testValid:
110                     test_case.checkRunnable(installer, s,
111                                             test_case.getConstraints())
112                     logger.debug("testcase %s (%s) is %s" %
113                                  (test_case.getDisplayName(),
114                                   test_case.getName(),
115                                   test_case.isRunnable))
116                     time.sleep(1)
117                     if test_case.isRunnable:
118                         dbName = test_case.getDbName()
119                         name = test_case.getName()
120                         displayName = test_case.getDisplayName()
121                         project = test_case.getProject()
122                         nb_test_runnable_for_this_scenario += 1
123                         logger.info(" Searching results for case %s " %
124                                     (displayName))
125                         result = utils.getResult(dbName, installer, s, version)
126                         # if no result set the value to 0
127                         if result < 0:
128                             result = 0
129                         logger.info(" >>>> Test score = " + str(result))
130                         test_case.setCriteria(result)
131                         test_case.setIsRunnable(True)
132                         testCases2BeDisplayed.append(tc.TestCase(name,
133                                                                  project,
134                                                                  "",
135                                                                  result,
136                                                                  True,
137                                                                  1))
138                         scenario_score = scenario_score + result
139
140                 # 2) Manage the test cases for the scenario qualification
141                 # concretely Tiers > 3
142                 for test_case in otherTestCases:
143                     test_case.checkRunnable(installer, s,
144                                             test_case.getConstraints())
145                     logger.debug("testcase %s (%s) is %s" %
146                                  (test_case.getDisplayName(),
147                                   test_case.getName(),
148                                   test_case.isRunnable))
149                     time.sleep(1)
150                     if test_case.isRunnable:
151                         dbName = test_case.getDbName()
152                         name = test_case.getName()
153                         displayName = test_case.getDisplayName()
154                         project = test_case.getProject()
155                         logger.info(" Searching results for case %s " %
156                                     (displayName))
157                         result = utils.getResult(dbName, installer, s, version)
158                         # at least 1 result for the test
159                         if result > -1:
160                             test_case.setCriteria(result)
161                             test_case.setIsRunnable(True)
162                             testCases2BeDisplayed.append(tc.TestCase(name,
163                                                                      project,
164                                                                      "",
165                                                                      result,
166                                                                      True,
167                                                                      4))
168                         else:
169                             logger.debug("No results found")
170
171                     items[s] = testCases2BeDisplayed
172             except:
173                 logger.error("Error: installer %s, version %s, scenario %s" %
174                              (installer, version, s))
175                 logger.error("No data available: %s " % (sys.exc_info()[0]))
176
177             # **********************************************
178             # Evaluate the results for scenario validation
179             # **********************************************
180             # the validation criteria = nb runnable tests x 3
181             # because each test case = 0,1,2 or 3
182             scenario_criteria = nb_test_runnable_for_this_scenario * 3
183             # if 0 runnable tests set criteria at a high value
184             if scenario_criteria < 1:
185                 scenario_criteria = conf.MAX_SCENARIO_CRITERIA
186
187             s_score = str(scenario_score) + "/" + str(scenario_criteria)
188             s_score_percent = 0.0
189             try:
190                 s_score_percent = float(scenario_score) / float(scenario_criteria) * 100
191             except:
192                 logger.error("cannot calculate the score percent")
193
194             s_status = "KO"
195             if scenario_score < scenario_criteria:
196                 logger.info(">>>> scenario not OK, score = %s/%s" %
197                             (scenario_score, scenario_criteria))
198                 s_status = "KO"
199             else:
200                 logger.info(">>>>> scenario OK, save the information")
201                 s_status = "OK"
202                 path_validation_file = (conf.REPORTING_PATH +
203                                         "/functest/release/" + version +
204                                         "/validated_scenario_history.txt")
205                 with open(path_validation_file, "a") as f:
206                     time_format = "%Y-%m-%d %H:%M"
207                     info = (datetime.datetime.now().strftime(time_format) +
208                             ";" + installer + ";" + s + "\n")
209                     f.write(info)
210
211             # Save daily results in a file
212             path_validation_file = (conf.REPORTING_PATH +
213                                     "/functest/release/" + version +
214                                     "/scenario_history.txt")
215             with open(path_validation_file, "a") as f:
216                 info = (reportingDate + "," + s + "," + installer +
217                         "," + s_score + "," +
218                         str(round(s_score_percent)) + "\n")
219                 f.write(info)
220
221             scenario_result_criteria[s] = sr.ScenarioResult(s_status,
222                                                             s_score,
223                                                             s_score_percent,
224                                                             s_url)
225             logger.info("--------------------------")
226
227         templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
228         templateEnv = jinja2.Environment(
229             loader=templateLoader, autoescape=True)
230
231         TEMPLATE_FILE = "/functest/template/index-status-tmpl.html"
232         template = templateEnv.get_template(TEMPLATE_FILE)
233
234         outputText = template.render(scenario_stats=scenario_stats,
235                                      scenario_results=scenario_result_criteria,
236                                      items=items,
237                                      installer=installer,
238                                      period=conf.PERIOD,
239                                      version=version,
240                                      date=reportingDate)
241
242     with open(conf.REPORTING_PATH + "/functest/release/" + version +
243               "/index-status-" + installer + ".html", "wb") as fh:
244         fh.write(outputText)
245
246     # Generate outputs for export
247     # pdf
248     try:
249         pdf_path = ("http://testresults.opnfv.org/reporting/" +
250                     "functest/release/" + version +
251                     "/index-status-" + installer + ".html")
252         pdf_doc_name = (conf.REPORTING_PATH +
253                         "/functest/release/" + version +
254                         "/status-" + installer + ".pdf")
255         pdfkit.from_url(pdf_path, pdf_doc_name)
256     except IOError:
257         logger.info("pdf generated anyway...")
258     except:
259         logger.error("impossible to generate PDF")
260     # csv
261     # generate sub files based on scenario_history.txt
262     scenario_installer_file_name = (conf.REPORTING_PATH +
263                                     "/functest/release/" + version +
264                                     "/scenario_history_" +
265                                     installer + ".txt")
266     scenario_installer_file = open(scenario_installer_file_name, "w")
267  
268     with open(path_validation_file, "r") as f:
269         for line in f:
270             if installer in line:
271                 scenario_installer_file.write(line)
272     scenario_installer_file.close