fuel: Enable 3rd party CI for fuel plugin onos
[releng.git] / utils / test / reporting / functest / reporting-status.py
1 #!/usr/bin/python
2 #
3 # This program and the accompanying materials
4 # are made available under the terms of the Apache License, Version 2.0
5 # which accompanies this distribution, and is available at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 import datetime
10 import jinja2
11 import requests
12 import sys
13 import time
14 import yaml
15
16 import reportingUtils as utils
17 import reportingConf as conf
18 import testCase as tc
19 import scenarioResult as sr
20
21 # Logger
22 logger = utils.getLogger("Status")
23
24 # Initialization
25 testValid = []
26 otherTestCases = []
27 reportingDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
28
29 # init just tempest to get the list of scenarios
30 # as all the scenarios run Tempest
31 tempest = tc.TestCase("tempest_smoke_serial", "functest", -1)
32
33 # Retrieve the Functest configuration to detect which tests are relevant
34 # according to the installer, scenario
35 cf = conf.TEST_CONF
36 response = requests.get(cf)
37
38 functest_yaml_config = yaml.safe_load(response.text)
39
40 logger.info("*******************************************")
41 logger.info("*   Generating reporting scenario status  *")
42 logger.info("*   Data retention = %s days              *" % conf.PERIOD)
43 logger.info("*                                         *")
44 logger.info("*******************************************")
45
46 # Retrieve test cases of Tier 1 (smoke)
47 config_tiers = functest_yaml_config.get("tiers")
48
49 # we consider Tier 1 (smoke),2 (sdn suites) and 3 (features)
50 # to validate scenarios
51 # Tier > 4 are not used to validate scenarios but we display the results anyway
52 # tricky thing for the API as some tests are Functest tests
53 # other tests are declared directly in the feature projects
54 for tier in config_tiers:
55     if tier['order'] > 0 and tier['order'] < 3:
56         for case in tier['testcases']:
57             if case['name'] not in conf.blacklist:
58                 testValid.append(tc.TestCase(case['name'],
59                                              "functest",
60                                              case['dependencies']))
61     elif tier['order'] == 3:
62         for case in tier['testcases']:
63             if case['name'] not in conf.blacklist:
64                 testValid.append(tc.TestCase(case['name'],
65                                              case['name'],
66                                              case['dependencies']))
67     elif tier['order'] > 3:
68         for case in tier['testcases']:
69             if case['name'] not in conf.blacklist:
70                 otherTestCases.append(tc.TestCase(case['name'],
71                                                   "functest",
72                                                   case['dependencies']))
73
74 # For all the versions
75 for version in conf.versions:
76     # For all the installers
77     for installer in conf.installers:
78         # get scenarios
79         scenario_results = utils.getScenarios(tempest, installer, version)
80         scenario_stats = utils.getScenarioStats(scenario_results)
81         items = {}
82         scenario_result_criteria = {}
83
84         # For all the scenarios get results
85         for s, s_result in scenario_results.items():
86             logger.info("---------------------------------")
87             logger.info("installer %s, version %s, scenario %s:" %
88                         (installer, version, s))
89             logger.debug("Scenario results: %s" % s_result)
90
91             # Green or Red light for a given scenario
92             nb_test_runnable_for_this_scenario = 0
93             scenario_score = 0
94             # url of the last jenkins log corresponding to a given
95             # scenario
96             s_url = ""
97             if len(s_result) > 0:
98                 build_tag = s_result[len(s_result)-1]['build_tag']
99                 logger.debug("Build tag: %s" % build_tag)
100                 s_url = s_url = utils.getJenkinsUrl(build_tag)
101                 logger.info("last jenkins url: %s" % s_url)
102             testCases2BeDisplayed = []
103             # Check if test case is runnable / installer, scenario
104             # for the test case used for Scenario validation
105             try:
106                 # 1) Manage the test cases for the scenario validation
107                 # concretely Tiers 0-3
108                 for test_case in testValid:
109                     test_case.checkRunnable(installer, s,
110                                             test_case.getConstraints())
111                     logger.debug("testcase %s (%s) is %s" %
112                                  (test_case.getDisplayName(),
113                                   test_case.getName(),
114                                   test_case.isRunnable))
115                     time.sleep(1)
116                     if test_case.isRunnable:
117                         dbName = test_case.getDbName()
118                         name = test_case.getName()
119                         displayName = test_case.getDisplayName()
120                         project = test_case.getProject()
121                         nb_test_runnable_for_this_scenario += 1
122                         logger.info(" Searching results for case %s " %
123                                     (displayName))
124                         result = utils.getResult(dbName, installer, s, version)
125                         # if no result set the value to 0
126                         if result < 0:
127                             result = 0
128                         logger.info(" >>>> Test score = " + str(result))
129                         test_case.setCriteria(result)
130                         test_case.setIsRunnable(True)
131                         testCases2BeDisplayed.append(tc.TestCase(name,
132                                                                  project,
133                                                                  "",
134                                                                  result,
135                                                                  True,
136                                                                  1))
137                         scenario_score = scenario_score + result
138
139                 # 2) Manage the test cases for the scenario qualification
140                 # concretely Tiers > 3
141                 for test_case in otherTestCases:
142                     test_case.checkRunnable(installer, s,
143                                             test_case.getConstraints())
144                     logger.debug("testcase %s (%s) is %s" %
145                                  (test_case.getDisplayName(),
146                                   test_case.getName(),
147                                   test_case.isRunnable))
148                     time.sleep(1)
149                     if test_case.isRunnable:
150                         dbName = test_case.getDbName()
151                         name = test_case.getName()
152                         displayName = test_case.getDisplayName()
153                         project = test_case.getProject()
154                         logger.info(" Searching results for case %s " %
155                                     (displayName))
156                         result = utils.getResult(dbName, installer, s, version)
157                         # at least 1 result for the test
158                         if result > -1:
159                             test_case.setCriteria(result)
160                             test_case.setIsRunnable(True)
161                             testCases2BeDisplayed.append(tc.TestCase(name,
162                                                                      project,
163                                                                      "",
164                                                                      result,
165                                                                      True,
166                                                                      4))
167                         else:
168                             logger.debug("No results found")
169
170                     items[s] = testCases2BeDisplayed
171             except:
172                 logger.error("Error: installer %s, version %s, scenario %s" %
173                              (installer, version, s))
174                 logger.error("No data available: %s " % (sys.exc_info()[0]))
175
176             # **********************************************
177             # Evaluate the results for scenario validation
178             # **********************************************
179             # the validation criteria = nb runnable tests x 3
180             # because each test case = 0,1,2 or 3
181             scenario_criteria = nb_test_runnable_for_this_scenario * 3
182             # if 0 runnable tests set criteria at a high value
183             if scenario_criteria < 1:
184                 scenario_criteria = conf.MAX_SCENARIO_CRITERIA
185
186             s_score = str(scenario_score) + "/" + str(scenario_criteria)
187             s_score_percent = float(
188                 scenario_score) / float(scenario_criteria) * 100
189             s_status = "KO"
190             if scenario_score < scenario_criteria:
191                 logger.info(">>>> scenario not OK, score = %s/%s" %
192                             (scenario_score, scenario_criteria))
193                 s_status = "KO"
194             else:
195                 logger.info(">>>>> scenario OK, save the information")
196                 s_status = "OK"
197                 path_validation_file = (conf.REPORTING_PATH +
198                                         "/release/" + version +
199                                         "/validated_scenario_history.txt")
200                 with open(path_validation_file, "a") as f:
201                     time_format = "%Y-%m-%d %H:%M"
202                     info = (datetime.datetime.now().strftime(time_format) +
203                             ";" + installer + ";" + s + "\n")
204                     f.write(info)
205
206             # Save daily results in a file
207             path_validation_file = (conf.REPORTING_PATH +
208                                     "/release/" + version +
209                                     "/scenario_history.txt")
210             with open(path_validation_file, "a") as f:
211                 info = (reportingDate + "," + s + "," + installer +
212                         "," + s_score + "," +
213                         str(round(s_score_percent)) + "\n")
214                 f.write(info)
215
216             scenario_result_criteria[s] = sr.ScenarioResult(s_status,
217                                                             s_score,
218                                                             s_score_percent,
219                                                             s_url)
220             logger.info("--------------------------")
221
222         templateLoader = jinja2.FileSystemLoader(conf.REPORTING_PATH)
223         templateEnv = jinja2.Environment(
224             loader=templateLoader, autoescape=True)
225
226         TEMPLATE_FILE = "/template/index-status-tmpl.html"
227         template = templateEnv.get_template(TEMPLATE_FILE)
228
229         outputText = template.render(scenario_stats=scenario_stats,
230                                      scenario_results=scenario_result_criteria,
231                                      items=items,
232                                      installer=installer,
233                                      period=conf.PERIOD,
234                                      version=version,
235                                      date=reportingDate)
236
237         with open(conf.REPORTING_PATH + "/release/" + version +
238                   "/index-status-" + installer + ".html", "wb") as fh:
239             fh.write(outputText)