1 from urllib2 import Request, urlopen, URLError
12 # Declaration of the variables
13 functest_test_list = ['vPing', 'vPing_userdata',
15 'ODL', 'ONOS', 'vIMS']
16 # functest_test_list = ['vPing']
17 companion_test_list = ['doctor/doctor-notification', 'promise/promise']
18 # companion_test_list = []
19 installers = ["apex", "compass", "fuel", "joid"]
20 # installers = ["fuel"]
21 versions = ["brahmaputra", "master"]
22 # versions = ["master"]
24 MAX_SCENARIO_CRITERIA = 18
26 # Correspondance between the name of the test case and the name in the DB
27 # ideally we should modify the DB to avoid such interface....
28 # '<name in the DB':'<name in the config'>
29 # I know it is uggly...
30 test_match_matrix = {'healthcheck': 'healthcheck',
32 'vPing_userdata': 'vping_userdata',
38 'doctor-notification': 'doctor',
42 class TestCase(object):
43 def __init__(self, name, project, criteria=-1, isRunnable=True):
45 self.project = project
46 self.criteria = criteria
47 self.isRunnable = isRunnable
55 def getCriteria(self):
58 def setCriteria(self, criteria):
59 self.criteria = criteria
61 def setIsRunnable(self, isRunnable):
62 self.isRunnable = isRunnable
64 def checkRunnable(self, installer, scenario, config):
65 # Re-use Functest declaration
66 # Retrieve Functest configuration file functest_config.yaml
69 TEST_ENV = functest_yaml_config.get("test-dependencies")
71 # print " *********************** "
73 # print " ---------------------- "
74 # print "case = " + self.name
75 # print "installer = " + installer
76 # print "scenario = " + scenario
77 # print "project = " + self.project
79 # Retrieve test constraints
80 case_name_formated = test_match_matrix[self.name]
83 config_test = TEST_ENV[self.project][case_name_formated]
85 # if not defined in dependencies => no dependencies
86 config_test = TEST_ENV[case_name_formated]
88 print "Error [getTestEnv]:", e
90 # Retrieve test execution param
91 test_execution_context = {"installer": installer,
93 # By default we assume that all the tests are always runnable...
94 # if test_env not empty => dependencies to be checked
95 if config_test is not None and len(config_test) > 0:
96 # possible criteria = ["installer", "scenario"]
97 # consider test criteria from config file
98 # compare towards CI env through CI en variable
99 for criteria in config_test:
100 if re.search(config_test[criteria],
101 test_execution_context[criteria]) is None:
102 # print "Test "+ test + " cannot be run on the environment"
105 self.isRunnable = is_runnable
108 class ScenarioResult(object):
109 def __init__(self, status, score=0):
119 # *****************************************************************************
122 def getApiResults(case, installer, scenario, version):
123 case = case.getName()
124 results = json.dumps([])
125 # to remove proxy (to be removed at the end for local test only)
126 # proxy_handler = urllib2.ProxyHandler({})
127 # opener = urllib2.build_opener(proxy_handler)
128 # urllib2.install_opener(opener)
129 # url = "http://127.0.0.1:8000/results?case=" + case + \
130 # "&period=30&installer=" + installer
131 url = ("http://testresults.opnfv.org/testapi/results?case=" + case +
132 "&period=" + str(PERIOD) + "&installer=" + installer +
133 "&scenario=" + scenario + "&version=" + version)
134 request = Request(url)
137 response = urlopen(request)
139 results = json.loads(k)
141 print 'No kittez. Got an error code:', e
146 def getScenarios(case, installer, version):
148 case = case.getName()
149 url = "http://testresults.opnfv.org/testapi/results?case=" + case + \
150 "&period=" + str(PERIOD) + "&installer=" + installer + \
151 "&version=" + version
152 request = Request(url)
155 response = urlopen(request)
157 results = json.loads(k)
159 print 'Got an error code:', e
161 test_results = results['test_results']
163 if test_results is not None:
164 test_results.reverse()
166 scenario_results = {}
168 for r in test_results:
169 # Retrieve all the scenarios per installer
170 if not r['scenario'] in scenario_results.keys():
171 scenario_results[r['scenario']] = []
172 scenario_results[r['scenario']].append(r)
174 return scenario_results
177 def getScenarioStats(scenario_results):
179 for k, v in scenario_results.iteritems():
180 scenario_stats[k] = len(v)
182 return scenario_stats
185 def getNbtestOk(results):
188 for k, v in r.iteritems():
193 print "Cannot retrieve test status"
197 def getResult(testCase, installer, scenario, version):
199 # retrieve raw results
200 results = getApiResults(testCase, installer, scenario, version)
201 # let's concentrate on test results only
202 test_results = results['test_results']
204 # if results found, analyze them
205 if test_results is not None:
206 test_results.reverse()
208 scenario_results = []
210 # print " ---------------- "
212 # print " ---------------- "
213 # print "nb of results:" + str(len(test_results))
215 for r in test_results:
216 # print r["creation_date"]
217 # print r["criteria"]
218 scenario_results.append({r["creation_date"]: r["criteria"]})
220 scenario_results.sort()
221 # 4 levels for the results
222 # 3: 4+ consecutive runs passing the success criteria
223 # 2: <4 successful consecutive runs but passing the criteria
224 # 1: close to pass the success criteria
225 # 0: 0% success, not passing
226 test_result_indicator = 0
227 nbTestOk = getNbtestOk(scenario_results)
228 # print "Nb test OK:"+ str(nbTestOk)
229 # check that we have at least 4 runs
231 test_result_indicator = 0
233 test_result_indicator = 1
235 # Test the last 4 run
236 if (len(scenario_results) > 3):
237 last4runResults = scenario_results[-4:]
238 if getNbtestOk(last4runResults):
239 test_result_indicator = 3
241 test_result_indicator = 2
243 test_result_indicator = 2
244 print " >>>> Test indicator:" + str(test_result_indicator)
245 return test_result_indicator
247 # ******************************************************************************
248 # ******************************************************************************
249 # ******************************************************************************
250 # ******************************************************************************
251 # ******************************************************************************
253 # init just tempest to get the list of scenarios
254 # as all the scenarios run Tempest
255 tempest = TestCase("Tempest", "functest", -1)
257 # Retrieve the Functest configuration to detect which tests are relevant
258 # according to the installer, scenario
259 cf = "https://git.opnfv.org/cgit/functest/plain/ci/config_functest.yaml"
260 response = requests.get(cf)
261 functest_yaml_config = yaml.load(response.text)
263 print "****************************************"
264 print "* Generating reporting..... *"
265 print ("* Data retention = %s days *" % PERIOD)
267 print "****************************************"
269 # For all the versions
270 for version in versions:
271 # For all the installers
272 for installer in installers:
274 scenario_results = getScenarios(tempest, installer, version)
275 scenario_stats = getScenarioStats(scenario_results)
277 scenario_result_criteria = {}
279 # For all the scenarios get results
280 for s, s_result in scenario_results.items():
282 # Green or Red light for a given scenario
283 nb_test_runnable_for_this_scenario = 0
286 # For each scenario declare the test cases
288 for test_case in functest_test_list:
289 testCases.append(TestCase(test_case, "functest"))
292 for test_case in companion_test_list:
293 test_split = test_case.split("/")
294 test_project = test_split[0]
295 test_case = test_split[1]
296 testCases.append(TestCase(test_case, test_project))
298 # Check if test case is runnable / installer, scenario
300 for test_case in testCases:
301 test_case.checkRunnable(installer, s, functest_yaml_config)
302 # print "testcase %s is %s" % (test_case.getName(),
303 # test_case.isRunnable)
304 print ("installer %s, version %s, scenario %s:" %
305 (installer, version, s))
306 for testCase in testCases:
308 if testCase.isRunnable:
309 nb_test_runnable_for_this_scenario += 1
310 print (" Searching results for case %s " %
311 (testCase.getName()))
312 result = getResult(testCase, installer, s, version)
313 testCase.setCriteria(result)
315 scenario_score = scenario_score + result
317 print ("installer %s, version %s, scenario %s" %
318 (installer, version, s))
319 print "No data available , error %s " % (sys.exc_info()[0])
321 # the validation criteria = nb runnable tests x 3
322 scenario_criteria = nb_test_runnable_for_this_scenario * 3
323 # if 0 runnable tests set criteria at a high value
324 if scenario_criteria < 1:
325 scenario_criteria = MAX_SCENARIO_CRITERIA
327 s_score = str(scenario_score) + "/" + str(scenario_criteria)
329 if scenario_score < scenario_criteria:
330 print (">>>> scenario not OK, score = %s/%s" %
331 (scenario_score, scenario_criteria))
334 print ">>>>> scenario OK, save the information"
336 with open("./release/" + version +
337 "/validated_scenario_history.txt", "a") as f:
338 time_format = "%Y-%m-%d %H:%M"
339 info = (datetime.datetime.now().strftime(time_format) +
340 ";" + installer + ";" + s + "\n")
343 scenario_result_criteria[s] = ScenarioResult(s_status, s_score)
344 print "--------------------------"
346 templateLoader = jinja2.FileSystemLoader(os.path.dirname
349 templateEnv = jinja2.Environment(loader=templateLoader)
351 TEMPLATE_FILE = "./template/index-status-tmpl.html"
352 template = templateEnv.get_template(TEMPLATE_FILE)
354 outputText = template.render(scenario_stats=scenario_stats,
355 scenario_results=scenario_result_criteria,
361 with open("./release/" + version +
362 "/index-status-" + installer + ".html", "wb") as fh: